file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
129273054
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
# Column EJ takes value A or B. Need to make it numerical, so that we can feed it into a neural network.
train_df["EJ"] = train_df["EJ"].replace({"A": 0, "B": 1})
test_df["EJ"] = test_df["EJ"].replace({"A": 0, "B": 1})
train_X = train_df.drop(["Id", "Class"], axis=1)
training_Y = train_df[["Class"]]
test_X = test_df.drop(["Id"], axis=1)
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
X_train, X_val, y_train, y_val = train_test_split(
train_X, training_Y, test_size=0.2, random_state=1
) # Want a validation data set
early_stopping = tf.keras.callbacks.EarlyStopping(patience=30, monitor="val_loss")
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", min_lr=0.01, patience=30, mode="min"
)
def create_model():
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(loss="binary_crossentropy", optimizer="RMSprop", metrics=["accuracy"])
return model
model = create_model()
history = model.fit(
X_train,
y_train,
epochs=500,
batch_size=16,
validation_data=(X_val, y_val),
callbacks=[early_stopping, reduce_lr],
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/273/129273054.ipynb
| null | null |
[{"Id": 129273054, "ScriptId": 38434570, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6523460, "CreationDate": "05/12/2023 10:59:17", "VersionNumber": 1.0, "Title": "Baseline with Tensorflow", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 46.0, "LinesInsertedFromPrevious": 46.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
# Column EJ takes value A or B. Need to make it numerical, so that we can feed it into a neural network.
train_df["EJ"] = train_df["EJ"].replace({"A": 0, "B": 1})
test_df["EJ"] = test_df["EJ"].replace({"A": 0, "B": 1})
train_X = train_df.drop(["Id", "Class"], axis=1)
training_Y = train_df[["Class"]]
test_X = test_df.drop(["Id"], axis=1)
scaler = StandardScaler()
train_X = scaler.fit_transform(train_X)
test_X = scaler.fit_transform(test_X)
X_train, X_val, y_train, y_val = train_test_split(
train_X, training_Y, test_size=0.2, random_state=1
) # Want a validation data set
early_stopping = tf.keras.callbacks.EarlyStopping(patience=30, monitor="val_loss")
reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", min_lr=0.01, patience=30, mode="min"
)
def create_model():
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(64, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(loss="binary_crossentropy", optimizer="RMSprop", metrics=["accuracy"])
return model
model = create_model()
history = model.fit(
X_train,
y_train,
epochs=500,
batch_size=16,
validation_data=(X_val, y_val),
callbacks=[early_stopping, reduce_lr],
)
| false | 0 | 627 | 0 | 627 | 627 |
||
129390322
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from graphviz import Digraph
class TreeNode:
def __init__(
self, node_indices, entropy, feature, left_child=None, right_child=None
):
self.node_indices = node_indices
self.left_child = left_child
self.right_child = right_child
self.entropy = entropy
self.feature = feature
def traverse_and_output(self, node): # runs from root and shows the entire Tree
pass
class DecisionTree: # Simplified version for only 2 classes
def __init__(self, X, y, max_depth=5, feature_names=None):
# X - numpy matrix (one hot encoded, only classes)
# y - numpy array(1 or 0)
# setting feature names for visualization
if feature_names:
self.feature_names = feature_names
else:
self.feature_names = ["Feature {}".format(i) for i in range(X.shape[1])]
self.X = X
self.y = y
self.max_depth = max_depth
self.n_feature = self.X.shape[1]
self.initial_indices = np.array([i for i in range(len(X))])
self.ROOT_NODE = (
None # Format "Depth # {n}" : {"feature" : ..., "depth" : ..., "entropy"}
)
def entropy(self, sample_of_classes):
# fraction
p = np.sum(sample_of_classes) / len(sample_of_classes)
if p == 0 or p == 1:
return 0
return -p * np.log2(p) - (1 - p) * np.log2(1 - p)
def get_sample_of_data(self, X_indices): # input array or list of indeces to get
X_data = np.zeros((len(X_indices), self.n_feature)) # prepare matrix
y_data = []
for i in range(len(X_indices)):
for j in range(self.n_feature):
ind = X_indices[i]
X_data[i, j] = self.X[ind, j]
y_data.append(self.y[ind])
y_data = np.array(y_data)
return X_data, y_data
def split_dataset(self, X_node_indeces, feature_ind):
left_indices = [] # feature = 1
right_indices = [] # feature = 0
for i in X_node_indeces:
if self.X[i, feature_ind] == 1:
left_indices.append(i)
else: # if 0
right_indices.append(i)
return right_indices, left_indices
def info_gain(self, node_indices, feature_ind):
# extract data
right_ind, left_ind = self.split_dataset(node_indices, feature_ind)
X_parent, y_parent = self.get_sample_of_data(node_indices)
X_left, y_left = self.get_sample_of_data(left_ind)
X_right, y_right = self.get_sample_of_data(right_ind)
# compute fractions
n_parent = np.sum(y_parent)
n_left = np.sum(y_left)
n_right = np.sum(y_right)
p_parent = n_parent / len(y_parent)
p_left = n_left / len(y_left)
p_right = n_right / len(y_right)
# compute weights
left_weight = len(y_left) / len(y_parent)
right_weight = len(y_right) / len(y_parent)
info_gain = self.entropy(y_parent) - (
left_weight * self.entropy(y_left) + right_weight * self.entropy(y_right)
)
return info_gain
def get_best_split(self, X_node_indices):
# def info_gain(self,X_parent, y_parent, X_left,y_left, X_right, y_right):
num_features = self.X.shape[1]
# init vals to return
best_feature = -1
best_info_gain = -0.1
# end
X_parent, y_parent = self.get_sample_of_data(X_node_indices)
for i in range(num_features):
# extract needed data
info_gain = self.info_gain(X_node_indices, i)
if info_gain > best_info_gain:
best_feature = i
best_info_gain = info_gain
if best_info_gain <= 0:
return -1
return best_feature
def build_tree(self, node_indices, current_depth, side="root"):
if current_depth >= self.max_depth:
return
# gain best feature for now
best_feature_to_split = self.get_best_split(X_node_indices=node_indices)
# checking entropy. It may not need to split
_, y_data = self.get_sample_of_data(node_indices)
curr_entropy = self.entropy(y_data)
if curr_entropy == 0:
node = TreeNode(node_indices, curr_entropy, "NOTHING TO SPLIT")
return node
# def __init__(self, node_indices,entropy, feature,left_child=None, right_child=None):
# split data into 2 branches by this feature
left_indices, right_indices = self.split_dataset(
node_indices, best_feature_to_split
)
# OUTPUT INFO
print(
"Curr_depth = {} , best_feature = {}".format(
current_depth, best_feature_to_split
)
)
# record node in dict
node = TreeNode(
node_indices,
curr_entropy,
self.feature_names[best_feature_to_split],
)
# recursively do the same for left child and right child
node.left_child = self.build_tree(
left_indices, current_depth + 1, side="left"
) # increment curr_depth
node.right_child = self.build_tree(
right_indices, current_depth + 1, side="right"
)
return node
def visualize_tree(self):
# I am gonna complete this important piece later
# The tree does its job but without visual. so far
pass
def fit(self, max_depth):
self.ROOT_NODE = self.build_tree(self.initial_indices, current_depth=0)
# ## How to do better
# It is a good idea to put out a Node storing in a class TreeNode with attributes:
# 1. entropy
# 2. left child
# 3. right child
# 4. depth
# 5. indices
# Moreover, we will need to implement predict method
# In a perfect case, simple visualization as well
# ## Testing my implementation
#
# ARTIFICIAL DATASET FOR TESTING
# FEATURES
# "Temp", "Cough","Happy","Tired"
# target = sick or not
X = np.array(
[
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 1],
[0, 1, 1, 1],
[1, 0, 1, 1],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 1, 1],
[1, 1, 0, 1],
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 1],
]
)
# Create a simple target variable (y) based on some rule
y = np.array([1 if row[0] == row[2] else 0 for row in X])
dec_tree = DecisionTree(X, y, 4, ["Temp", "Cough", "Happy", "Tired"])
dec_tree.fit(max_depth=3)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/390/129390322.ipynb
| null | null |
[{"Id": 129390322, "ScriptId": 38470584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6066769, "CreationDate": "05/13/2023 11:36:28", "VersionNumber": 2.0, "Title": "Decision Tree from scartch", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 337.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 282.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from graphviz import Digraph
class TreeNode:
def __init__(
self, node_indices, entropy, feature, left_child=None, right_child=None
):
self.node_indices = node_indices
self.left_child = left_child
self.right_child = right_child
self.entropy = entropy
self.feature = feature
def traverse_and_output(self, node): # runs from root and shows the entire Tree
pass
class DecisionTree: # Simplified version for only 2 classes
def __init__(self, X, y, max_depth=5, feature_names=None):
# X - numpy matrix (one hot encoded, only classes)
# y - numpy array(1 or 0)
# setting feature names for visualization
if feature_names:
self.feature_names = feature_names
else:
self.feature_names = ["Feature {}".format(i) for i in range(X.shape[1])]
self.X = X
self.y = y
self.max_depth = max_depth
self.n_feature = self.X.shape[1]
self.initial_indices = np.array([i for i in range(len(X))])
self.ROOT_NODE = (
None # Format "Depth # {n}" : {"feature" : ..., "depth" : ..., "entropy"}
)
def entropy(self, sample_of_classes):
# fraction
p = np.sum(sample_of_classes) / len(sample_of_classes)
if p == 0 or p == 1:
return 0
return -p * np.log2(p) - (1 - p) * np.log2(1 - p)
def get_sample_of_data(self, X_indices): # input array or list of indeces to get
X_data = np.zeros((len(X_indices), self.n_feature)) # prepare matrix
y_data = []
for i in range(len(X_indices)):
for j in range(self.n_feature):
ind = X_indices[i]
X_data[i, j] = self.X[ind, j]
y_data.append(self.y[ind])
y_data = np.array(y_data)
return X_data, y_data
def split_dataset(self, X_node_indeces, feature_ind):
left_indices = [] # feature = 1
right_indices = [] # feature = 0
for i in X_node_indeces:
if self.X[i, feature_ind] == 1:
left_indices.append(i)
else: # if 0
right_indices.append(i)
return right_indices, left_indices
def info_gain(self, node_indices, feature_ind):
# extract data
right_ind, left_ind = self.split_dataset(node_indices, feature_ind)
X_parent, y_parent = self.get_sample_of_data(node_indices)
X_left, y_left = self.get_sample_of_data(left_ind)
X_right, y_right = self.get_sample_of_data(right_ind)
# compute fractions
n_parent = np.sum(y_parent)
n_left = np.sum(y_left)
n_right = np.sum(y_right)
p_parent = n_parent / len(y_parent)
p_left = n_left / len(y_left)
p_right = n_right / len(y_right)
# compute weights
left_weight = len(y_left) / len(y_parent)
right_weight = len(y_right) / len(y_parent)
info_gain = self.entropy(y_parent) - (
left_weight * self.entropy(y_left) + right_weight * self.entropy(y_right)
)
return info_gain
def get_best_split(self, X_node_indices):
# def info_gain(self,X_parent, y_parent, X_left,y_left, X_right, y_right):
num_features = self.X.shape[1]
# init vals to return
best_feature = -1
best_info_gain = -0.1
# end
X_parent, y_parent = self.get_sample_of_data(X_node_indices)
for i in range(num_features):
# extract needed data
info_gain = self.info_gain(X_node_indices, i)
if info_gain > best_info_gain:
best_feature = i
best_info_gain = info_gain
if best_info_gain <= 0:
return -1
return best_feature
def build_tree(self, node_indices, current_depth, side="root"):
if current_depth >= self.max_depth:
return
# gain best feature for now
best_feature_to_split = self.get_best_split(X_node_indices=node_indices)
# checking entropy. It may not need to split
_, y_data = self.get_sample_of_data(node_indices)
curr_entropy = self.entropy(y_data)
if curr_entropy == 0:
node = TreeNode(node_indices, curr_entropy, "NOTHING TO SPLIT")
return node
# def __init__(self, node_indices,entropy, feature,left_child=None, right_child=None):
# split data into 2 branches by this feature
left_indices, right_indices = self.split_dataset(
node_indices, best_feature_to_split
)
# OUTPUT INFO
print(
"Curr_depth = {} , best_feature = {}".format(
current_depth, best_feature_to_split
)
)
# record node in dict
node = TreeNode(
node_indices,
curr_entropy,
self.feature_names[best_feature_to_split],
)
# recursively do the same for left child and right child
node.left_child = self.build_tree(
left_indices, current_depth + 1, side="left"
) # increment curr_depth
node.right_child = self.build_tree(
right_indices, current_depth + 1, side="right"
)
return node
def visualize_tree(self):
# I am gonna complete this important piece later
# The tree does its job but without visual. so far
pass
def fit(self, max_depth):
self.ROOT_NODE = self.build_tree(self.initial_indices, current_depth=0)
# ## How to do better
# It is a good idea to put out a Node storing in a class TreeNode with attributes:
# 1. entropy
# 2. left child
# 3. right child
# 4. depth
# 5. indices
# Moreover, we will need to implement predict method
# In a perfect case, simple visualization as well
# ## Testing my implementation
#
# ARTIFICIAL DATASET FOR TESTING
# FEATURES
# "Temp", "Cough","Happy","Tired"
# target = sick or not
X = np.array(
[
[1, 0, 1, 0],
[0, 1, 0, 1],
[1, 1, 0, 0],
[0, 0, 1, 1],
[1, 0, 0, 1],
[0, 1, 1, 0],
[1, 1, 1, 0],
[0, 0, 0, 1],
[1, 1, 0, 1],
[0, 1, 1, 1],
[1, 0, 1, 1],
[0, 1, 0, 0],
[1, 1, 1, 1],
[0, 0, 1, 0],
[1, 0, 0, 0],
[0, 1, 1, 1],
[1, 1, 0, 1],
[0, 0, 0, 0],
[1, 0, 1, 0],
[0, 1, 0, 1],
]
)
# Create a simple target variable (y) based on some rule
y = np.array([1 if row[0] == row[2] else 0 for row in X])
dec_tree = DecisionTree(X, y, 4, ["Temp", "Cough", "Happy", "Tired"])
dec_tree.fit(max_depth=3)
| false | 0 | 2,074 | 0 | 2,074 | 2,074 |
||
129390134
|
import pandas as pd
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
train = train.iloc[:, 1:]
train.head()
train.duplicated().value_counts()
train = train.drop_duplicates()
train.duplicated().value_counts()
input_cols = list(train.columns)[:-1]
target_col = list(train.columns)[-1]
input_cols, target_col
scale_features = [
"clonesize",
"honeybee",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"seeds",
]
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler().fit(train[scale_features])
train[scale_features] = scaler.transform(train[scale_features])
inputs = train[input_cols].values.reshape(15282, 2, 2, 4)
targets = train[target_col].values.reshape(-1, 1)
inputs = inputs.astype("float32")
targets = targets.astype("float32")
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
dataset = TensorDataset(inputs, targets)
random_seed = 42
torch.manual_seed(random_seed)
val_size = 2000
train_size = len(dataset) - val_size
train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)
batch_size = 150
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
val_dl = DataLoader(val_ds, batch_size * 2)
class blueberrybase(nn.Module):
def training_step(self, batch):
in_fts, targets = batch
out = self(in_fts)
loss = loss_fn(out, targets)
return loss
def validation_step(self, batch):
in_fts, targets = batch
out = self(in_fts)
loss = loss_fn(out, targets)
return {"val_loss": loss.detach()}
def validation_epoch_end(self, outputs):
batch_losses = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
return {"val_loss": epoch_loss.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss:{:.4f}".format(epoch, result["val_loss"]))
class blueberryconvmodel(blueberrybase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(2, 8, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(8, 16, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(16, 32, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(32 * 2 * 1, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, 1),
)
def forward(self, xb):
return self.network(xb)
model = blueberryconvmodel()
model
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
result["train_loss"] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
loss_fn = F.l1_loss
evaluate(model, val_dl)
num_epochs = 10
opt_func = torch.optim.Adam
lr = 0.05
history = fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.005
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.001
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.0001
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 1e-5
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
history
import matplotlib.pyplot as plt
def plot_accuracies(history):
val_loss = [x["val_loss"] for x in history]
train_loss = [x["train_loss"] for x in history]
plt.plot(val_loss, "-x")
plt.plot(train_loss, "-o")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("Loss vs. No. of epochs")
plot_accuracies(history)
test = test.set_index("id")
test.head()
test[scale_features] = scaler.transform(test[scale_features])
test.head()
test_tensor = torch.from_numpy(test.values.reshape(10194, 2, 2, 4).astype("float32"))
test_result = model(test_tensor)
test_result
test_result = test_result.detach().numpy()
submission["yield"] = test_result
submission.head()
submission.to_csv("sample_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/390/129390134.ipynb
| null | null |
[{"Id": 129390134, "ScriptId": 38471741, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8400283, "CreationDate": "05/13/2023 11:34:14", "VersionNumber": 1.0, "Title": "Simple Convolution Neural Network Attempt", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 187.0, "LinesInsertedFromPrevious": 187.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import pandas as pd
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader
from torch.utils.data import random_split
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
train = train.iloc[:, 1:]
train.head()
train.duplicated().value_counts()
train = train.drop_duplicates()
train.duplicated().value_counts()
input_cols = list(train.columns)[:-1]
target_col = list(train.columns)[-1]
input_cols, target_col
scale_features = [
"clonesize",
"honeybee",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"seeds",
]
from sklearn.preprocessing import MinMaxScaler
scaler = MinMaxScaler().fit(train[scale_features])
train[scale_features] = scaler.transform(train[scale_features])
inputs = train[input_cols].values.reshape(15282, 2, 2, 4)
targets = train[target_col].values.reshape(-1, 1)
inputs = inputs.astype("float32")
targets = targets.astype("float32")
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
dataset = TensorDataset(inputs, targets)
random_seed = 42
torch.manual_seed(random_seed)
val_size = 2000
train_size = len(dataset) - val_size
train_ds, val_ds = random_split(dataset, [train_size, val_size])
len(train_ds), len(val_ds)
batch_size = 150
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
val_dl = DataLoader(val_ds, batch_size * 2)
class blueberrybase(nn.Module):
def training_step(self, batch):
in_fts, targets = batch
out = self(in_fts)
loss = loss_fn(out, targets)
return loss
def validation_step(self, batch):
in_fts, targets = batch
out = self(in_fts)
loss = loss_fn(out, targets)
return {"val_loss": loss.detach()}
def validation_epoch_end(self, outputs):
batch_losses = [x["val_loss"] for x in outputs]
epoch_loss = torch.stack(batch_losses).mean()
return {"val_loss": epoch_loss.item()}
def epoch_end(self, epoch, result):
print("Epoch [{}], val_loss:{:.4f}".format(epoch, result["val_loss"]))
class blueberryconvmodel(blueberrybase):
def __init__(self):
super().__init__()
self.network = nn.Sequential(
nn.Conv2d(2, 8, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.Conv2d(8, 16, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(16, 32, kernel_size=2, padding=1, stride=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Flatten(),
nn.Linear(32 * 2 * 1, 16),
nn.ReLU(),
nn.Linear(16, 8),
nn.ReLU(),
nn.Linear(8, 1),
)
def forward(self, xb):
return self.network(xb)
model = blueberryconvmodel()
model
@torch.no_grad()
def evaluate(model, val_loader):
model.eval()
outputs = [model.validation_step(batch) for batch in val_loader]
return model.validation_epoch_end(outputs)
def fit(epochs, lr, model, train_loader, val_loader, opt_func=torch.optim.SGD):
history = []
optimizer = opt_func(model.parameters(), lr)
for epoch in range(epochs):
# Training Phase
model.train()
train_losses = []
for batch in train_loader:
loss = model.training_step(batch)
train_losses.append(loss)
loss.backward()
optimizer.step()
optimizer.zero_grad()
# Validation phase
result = evaluate(model, val_loader)
result["train_loss"] = torch.stack(train_losses).mean().item()
model.epoch_end(epoch, result)
history.append(result)
return history
loss_fn = F.l1_loss
evaluate(model, val_dl)
num_epochs = 10
opt_func = torch.optim.Adam
lr = 0.05
history = fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.005
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.001
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 0.0001
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
lr = 1e-5
history += fit(num_epochs, lr, model, train_dl, val_dl, opt_func)
history
import matplotlib.pyplot as plt
def plot_accuracies(history):
val_loss = [x["val_loss"] for x in history]
train_loss = [x["train_loss"] for x in history]
plt.plot(val_loss, "-x")
plt.plot(train_loss, "-o")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("Loss vs. No. of epochs")
plot_accuracies(history)
test = test.set_index("id")
test.head()
test[scale_features] = scaler.transform(test[scale_features])
test.head()
test_tensor = torch.from_numpy(test.values.reshape(10194, 2, 2, 4).astype("float32"))
test_result = model(test_tensor)
test_result
test_result = test_result.detach().numpy()
submission["yield"] = test_result
submission.head()
submission.to_csv("sample_submission.csv", index=False)
| false | 0 | 1,687 | 1 | 1,687 | 1,687 |
||
129390060
|
<jupyter_start><jupyter_text>Car Price Prediction Linear Regression
Kaggle dataset identifier: car-price-prediction-linear-regression
<jupyter_script># # Import Libraries
# #### Pandas
# #### Numpy
# #### Matplotlib
# #### Seaborn
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ## Read The Dataset
data = pd.read_csv(
"/kaggle/input/car-price-prediction-linear-regression/CarPrice_Assignment.csv",
index_col="car_ID",
)
data.head()
data.info()
data.describe()
data.describe().transpose()
data.columns
# ## Data Cleaning
data.isnull().sum()
# # Data Visuilization
sns.pairplot(data)
sns.countplot(data=data, x="fueltype")
plt.title("Countplot of Fueltype")
sns.countplot(data=data, x="aspiration")
plt.title("Countplot of Aspiration")
sns.countplot(data=data, x="drivewheel")
plt.title("Countplot of DrivenWheel")
sns.countplot(data=data, x="doornumber")
plt.title("Countplot of Doors")
sns.countplot(data=data, x="enginelocation")
plt.title("Countplot of Engine Location")
sns.countplot(data=data, x="carbody")
plt.title("Countplot of Car Body")
sns.countplot(data=data, x="enginetype")
plt.title("Countplot of Enginetype")
sns.countplot(data=data, x="fuelsystem")
plt.title("Countplot of FuelSystem")
plt.figure(figsize=(8, 10))
sns.countplot(data=data, y="enginesize")
plt.title("Countplot of Engine Size")
plt.figure(figsize=(8, 10))
sns.countplot(data=data, y="horsepower")
plt.title("Countplot of Horse Power")
plt.figure(figsize=(12, 6))
sns.countplot(data=data, x="highwaympg", hue="enginetype")
plt.figure(figsize=(12, 6))
sns.barplot(data=data, y="highwaympg", x="enginetype", hue="fueltype")
sns.heatmap(data.corr())
# # Data Preprossesing
# In this column, we need to remove all the columns that we are not going to use in the model.
# ##### NewData
newdata = data.drop(
[
"symboling",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"CarName",
],
axis=1,
)
newdata.head()
#
# Now we are going to obtain the dummy variables for the column we have and use them to assist the model.
# ###### Getting Dummies Coloum
gas = pd.get_dummies(newdata["fueltype"], drop_first=True)
aspiration = pd.get_dummies(newdata["aspiration"], drop_first=True)
doornumber = pd.get_dummies(newdata["doornumber"], drop_first=True)
enginelocation = pd.get_dummies(newdata["enginelocation"], drop_first=True)
# ###### Remove Original Coloum
newdata.drop(["fueltype"], axis=1, inplace=True)
newdata.drop(["aspiration"], axis=1, inplace=True)
newdata.drop(["doornumber"], axis=1, inplace=True)
newdata.drop(["enginelocation"], axis=1, inplace=True)
# ###### Add Dummies Colum in the NewData
newdata = pd.concat([newdata, gas, aspiration, doornumber, enginelocation], axis=1)
newdata.head()
# ## Training a Linear Regression Model
# Let's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use.
# ### X and y arrays
X = newdata.drop("price", axis=1)
y = data["price"]
# ## Train Test Split
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
# ## Model Evaluation
#
from sklearn.linear_model import LinearRegression
lnrg = LinearRegression()
lnrg.fit(X_train, y_train)
coeff_df = pd.DataFrame(lnrg.coef_, X.columns, columns=["Coefficient"])
coeff_df
# ## Predictions from our Model
#
pred = lnrg.predict(X_test)
plt.scatter(y_test, pred)
# **Residual Histogram**
sns.displot((y_test, pred), bins=50, kde=True)
# ## Regression Evaluation Metrics
#
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import metrics
print("MAE:", metrics.mean_absolute_error(y_test, pred))
print("MSE:", metrics.mean_squared_error(y_test, pred))
print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred)))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/390/129390060.ipynb
|
car-price-prediction-linear-regression
|
furkandurmus0
|
[{"Id": 129390060, "ScriptId": 38472216, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13879337, "CreationDate": "05/13/2023 11:33:18", "VersionNumber": 1.0, "Title": "Car Price Prediction Linear Regression (EDA)", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 169.0, "LinesInsertedFromPrevious": 169.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185392510, "KernelVersionId": 129390060, "SourceDatasetVersionId": 5641415}]
|
[{"Id": 5641415, "DatasetId": 3242661, "DatasourceVersionId": 5716730, "CreatorUserId": 6993084, "LicenseName": "Unknown", "CreationDate": "05/09/2023 08:46:22", "VersionNumber": 1.0, "Title": "Car Price Prediction Linear Regression", "Slug": "car-price-prediction-linear-regression", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3242661, "CreatorUserId": 6993084, "OwnerUserId": 6993084.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5641415.0, "CurrentDatasourceVersionId": 5716730.0, "ForumId": 3307930, "Type": 2, "CreationDate": "05/09/2023 08:46:22", "LastActivityDate": "05/09/2023", "TotalViews": 139, "TotalDownloads": 25, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 6993084, "UserName": "furkandurmus0", "DisplayName": "Furkan Durmus", "RegisterDate": "03/21/2021", "PerformanceTier": 1}]
|
# # Import Libraries
# #### Pandas
# #### Numpy
# #### Matplotlib
# #### Seaborn
#
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# ## Read The Dataset
data = pd.read_csv(
"/kaggle/input/car-price-prediction-linear-regression/CarPrice_Assignment.csv",
index_col="car_ID",
)
data.head()
data.info()
data.describe()
data.describe().transpose()
data.columns
# ## Data Cleaning
data.isnull().sum()
# # Data Visuilization
sns.pairplot(data)
sns.countplot(data=data, x="fueltype")
plt.title("Countplot of Fueltype")
sns.countplot(data=data, x="aspiration")
plt.title("Countplot of Aspiration")
sns.countplot(data=data, x="drivewheel")
plt.title("Countplot of DrivenWheel")
sns.countplot(data=data, x="doornumber")
plt.title("Countplot of Doors")
sns.countplot(data=data, x="enginelocation")
plt.title("Countplot of Engine Location")
sns.countplot(data=data, x="carbody")
plt.title("Countplot of Car Body")
sns.countplot(data=data, x="enginetype")
plt.title("Countplot of Enginetype")
sns.countplot(data=data, x="fuelsystem")
plt.title("Countplot of FuelSystem")
plt.figure(figsize=(8, 10))
sns.countplot(data=data, y="enginesize")
plt.title("Countplot of Engine Size")
plt.figure(figsize=(8, 10))
sns.countplot(data=data, y="horsepower")
plt.title("Countplot of Horse Power")
plt.figure(figsize=(12, 6))
sns.countplot(data=data, x="highwaympg", hue="enginetype")
plt.figure(figsize=(12, 6))
sns.barplot(data=data, y="highwaympg", x="enginetype", hue="fueltype")
sns.heatmap(data.corr())
# # Data Preprossesing
# In this column, we need to remove all the columns that we are not going to use in the model.
# ##### NewData
newdata = data.drop(
[
"symboling",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"CarName",
],
axis=1,
)
newdata.head()
#
# Now we are going to obtain the dummy variables for the column we have and use them to assist the model.
# ###### Getting Dummies Coloum
gas = pd.get_dummies(newdata["fueltype"], drop_first=True)
aspiration = pd.get_dummies(newdata["aspiration"], drop_first=True)
doornumber = pd.get_dummies(newdata["doornumber"], drop_first=True)
enginelocation = pd.get_dummies(newdata["enginelocation"], drop_first=True)
# ###### Remove Original Coloum
newdata.drop(["fueltype"], axis=1, inplace=True)
newdata.drop(["aspiration"], axis=1, inplace=True)
newdata.drop(["doornumber"], axis=1, inplace=True)
newdata.drop(["enginelocation"], axis=1, inplace=True)
# ###### Add Dummies Colum in the NewData
newdata = pd.concat([newdata, gas, aspiration, doornumber, enginelocation], axis=1)
newdata.head()
# ## Training a Linear Regression Model
# Let's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use.
# ### X and y arrays
X = newdata.drop("price", axis=1)
y = data["price"]
# ## Train Test Split
#
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.30, random_state=101
)
# ## Model Evaluation
#
from sklearn.linear_model import LinearRegression
lnrg = LinearRegression()
lnrg.fit(X_train, y_train)
coeff_df = pd.DataFrame(lnrg.coef_, X.columns, columns=["Coefficient"])
coeff_df
# ## Predictions from our Model
#
pred = lnrg.predict(X_test)
plt.scatter(y_test, pred)
# **Residual Histogram**
sns.displot((y_test, pred), bins=50, kde=True)
# ## Regression Evaluation Metrics
#
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import metrics
print("MAE:", metrics.mean_absolute_error(y_test, pred))
print("MSE:", metrics.mean_squared_error(y_test, pred))
print("RMSE:", np.sqrt(metrics.mean_squared_error(y_test, pred)))
| false | 1 | 1,342 | 3 | 1,369 | 1,342 |
||
129204767
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# импортируем библиотеки для визуализации
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 42
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# Подгрузим наши данные из соревнования
DATA_DIR = "/kaggle/input/sf-booking/"
df_train = pd.read_csv(DATA_DIR + "/hotels_train.csv") # датасет для обучения
df_test = pd.read_csv(DATA_DIR + "hotels_test.csv") # датасет для предсказания
sample_submission = pd.read_csv(DATA_DIR + "/submission.csv") # самбмишн
df_train.info()
df_train.head(2)
df_test.info()
df_test.head(2)
sample_submission.head(2)
sample_submission.info()
# ВАЖНО! дря корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, по этому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Перед обработкой данных сделаем копию датасета, чтобы сохранить его на случай, если что-то пойдет не так :)
# Before processing the data, let's make a copy of the dataset to preserve it in case something goes wrong :)
data_copy = data.copy()
# Начнем обработку данных с признака адрес отеля. Данный признак представлен типом object и содержит в себе строку. Мы можем сделать из строки список с помощью метода split. Выделим из адреса страну, в которой находится отель. Для этого напишем функцию country. Так как в нашем датасете есть страна United Kingdom, название которой состоит из двух слов, необходимо учесть это при написании функции
# Let's start data processing from the "hotel address" feature. This feature is represented as an object type and contains a string. We can convert the string to a list using the split method. Let's extract the country where the hotel is located from the address. To do this, we will write a function called "country". Since our dataset includes the country "United Kingdom," which consists of two words, we need to take this into account when writing the function.
def country(address):
lst = address.split(" ")
if lst[-1] == "Kingdom":
return f"{lst[-2]} {lst[-1]}"
else:
return lst[-1]
# создаем новый признак country en: We are creating a new feature country
data_copy["country"] = data_copy["hotel_address"].apply(country)
# Для того, чтобы выделить из адреса город отеля, нам так же необходимо учесть страну United Kingdom. Проверим сколько адресов содержит город Лондон и сверим с количеством вхождений значения "United Kingdom" в признаке country. Если количество будет совпадать, то мы можем заполнить признак city по стране United Kingdom одним значением London
# To extract the city of the hotel from the address, we also need to take into account the country "United Kingdom." Let's check how many addresses contain the city of London and compare it with the number of occurrences of the value "United Kingdom" in the "country" feature. If the number of occurrences matches, we can fill in the "city" feature with one value "London" for the country "United Kingdom".
count = 0
for address in data_copy["hotel_address"]:
if "London" in address:
count += 1
result = abs(data[data_copy["country"] == "United Kingdom"].shape[0] - count)
if result == 0:
print("All hotels in United Kingdom are situated in London")
else:
print(f"There are {result} hotels which situated in other cities")
# Теперь мы можем создать признак город
# Now we can create a new feature called "city".
def city(address):
lst = address.split(" ")
if lst[-1] == "Kingdom":
return "London"
else:
return lst[-2]
# создаем новый признак city en: We are creating a new feature city
data_copy["city"] = data_copy["hotel_address"].apply(city)
data.nunique(dropna=False)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True)
# убираем признаки которые еще не успели обработать,
# модель на признаках с dtypes "object" обучаться не будет, просто выберим их и удалим
object_columns = [s for s in data.columns if data[s].dtypes == "object"]
data.drop(object_columns, axis=1, inplace=True)
data.info()
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.reviewer_score.values # наш таргет
X = train_data.drop(["reviewer_score"], axis=1)
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = model.predict(X_test)
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAPE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
test_data.sample(10)
test_data = test_data.drop(["reviewer_score"], axis=1)
sample_submission
predict_submission = model.predict(test_data)
predict_submission
list(sample_submission)
sample_submission["reviewer_score"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/204/129204767.ipynb
| null | null |
[{"Id": 129204767, "ScriptId": 38313921, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14821900, "CreationDate": "05/11/2023 19:55:51", "VersionNumber": 5.0, "Title": "banzarkhanova_booking", "EvaluationDate": "05/11/2023", "IsChange": false, "TotalLines": 182.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 182.0, "LinesInsertedFromFork": 51.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 131.0, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# импортируем библиотеки для визуализации
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 42
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# Подгрузим наши данные из соревнования
DATA_DIR = "/kaggle/input/sf-booking/"
df_train = pd.read_csv(DATA_DIR + "/hotels_train.csv") # датасет для обучения
df_test = pd.read_csv(DATA_DIR + "hotels_test.csv") # датасет для предсказания
sample_submission = pd.read_csv(DATA_DIR + "/submission.csv") # самбмишн
df_train.info()
df_train.head(2)
df_test.info()
df_test.head(2)
sample_submission.head(2)
sample_submission.info()
# ВАЖНО! дря корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, по этому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Перед обработкой данных сделаем копию датасета, чтобы сохранить его на случай, если что-то пойдет не так :)
# Before processing the data, let's make a copy of the dataset to preserve it in case something goes wrong :)
data_copy = data.copy()
# Начнем обработку данных с признака адрес отеля. Данный признак представлен типом object и содержит в себе строку. Мы можем сделать из строки список с помощью метода split. Выделим из адреса страну, в которой находится отель. Для этого напишем функцию country. Так как в нашем датасете есть страна United Kingdom, название которой состоит из двух слов, необходимо учесть это при написании функции
# Let's start data processing from the "hotel address" feature. This feature is represented as an object type and contains a string. We can convert the string to a list using the split method. Let's extract the country where the hotel is located from the address. To do this, we will write a function called "country". Since our dataset includes the country "United Kingdom," which consists of two words, we need to take this into account when writing the function.
def country(address):
lst = address.split(" ")
if lst[-1] == "Kingdom":
return f"{lst[-2]} {lst[-1]}"
else:
return lst[-1]
# создаем новый признак country en: We are creating a new feature country
data_copy["country"] = data_copy["hotel_address"].apply(country)
# Для того, чтобы выделить из адреса город отеля, нам так же необходимо учесть страну United Kingdom. Проверим сколько адресов содержит город Лондон и сверим с количеством вхождений значения "United Kingdom" в признаке country. Если количество будет совпадать, то мы можем заполнить признак city по стране United Kingdom одним значением London
# To extract the city of the hotel from the address, we also need to take into account the country "United Kingdom." Let's check how many addresses contain the city of London and compare it with the number of occurrences of the value "United Kingdom" in the "country" feature. If the number of occurrences matches, we can fill in the "city" feature with one value "London" for the country "United Kingdom".
count = 0
for address in data_copy["hotel_address"]:
if "London" in address:
count += 1
result = abs(data[data_copy["country"] == "United Kingdom"].shape[0] - count)
if result == 0:
print("All hotels in United Kingdom are situated in London")
else:
print(f"There are {result} hotels which situated in other cities")
# Теперь мы можем создать признак город
# Now we can create a new feature called "city".
def city(address):
lst = address.split(" ")
if lst[-1] == "Kingdom":
return "London"
else:
return lst[-2]
# создаем новый признак city en: We are creating a new feature city
data_copy["city"] = data_copy["hotel_address"].apply(city)
data.nunique(dropna=False)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True)
# убираем признаки которые еще не успели обработать,
# модель на признаках с dtypes "object" обучаться не будет, просто выберим их и удалим
object_columns = [s for s in data.columns if data[s].dtypes == "object"]
data.drop(object_columns, axis=1, inplace=True)
data.info()
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.reviewer_score.values # наш таргет
X = train_data.drop(["reviewer_score"], axis=1)
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = model.predict(X_test)
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAPE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
test_data.sample(10)
test_data = test_data.drop(["reviewer_score"], axis=1)
sample_submission
predict_submission = model.predict(test_data)
predict_submission
list(sample_submission)
sample_submission["reviewer_score"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
| false | 0 | 2,487 | 0 | 2,487 | 2,487 |
||
129204842
|
<jupyter_start><jupyter_text>Cats-vs-Dogs
### Context
**This data set contains two class of images Cats and Dogs.
This can be best utilized in a binary classification problem set in computer vision.**
👍
Kaggle dataset identifier: microsoft-catsvsdogs-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import torch
import torchvision
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torchvision.datasets as datasets # Has standard datasets we can import in a nice way
import torchvision.transforms as transforms # Transformations we can perform on our dataset
import torch.nn.functional as F # All functions that don't have any parameters
from torch.utils.data import (
DataLoader,
Dataset,
) # Gives easier dataset managment and creates mini batches
from torchvision.datasets import ImageFolder
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
from PIL import Image
import random
import shutil
from math import ceil
from torchvision.transforms.functional import to_tensor
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
break
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu or cpu
img_size = 224
orig_data_path = "/kaggle/input/microsoft-catsvsdogs-dataset/PetImages/"
train_path = "tmp/train/"
val_path = "tmp/val/"
test_path = "tmp/test"
batch_size = 64
classes = ["Dog", "Cat"]
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15
# # Split train-val-test
random.seed(42)
if os.path.isdir("tmp"):
shutil.rmtree("tmp")
else:
print("ok")
for target_path in [train_path, val_path, test_path]:
if not os.path.exists(target_path):
os.makedirs(target_path)
for _class in classes:
print("class: ", _class)
os.makedirs(os.path.join(target_path, _class))
def is_valid_image_file(filename):
valid_extensions = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
return any(filename.endswith(ext) for ext in valid_extensions)
# split the dataset into train, test
for _class in classes:
class_path = os.path.join(orig_data_path, _class)
images = [img for img in os.listdir(class_path) if is_valid_image_file(img)]
# images = os.listdir(class_path)
random.shuffle(images)
train_images = images[: ceil(len(images) * train_ratio)]
val_images = images[
ceil(len(images) * train_ratio) : ceil(len(images) * (train_ratio + val_ratio))
]
test_images = images[ceil(len(images) * (train_ratio + val_ratio)) :]
print("Copying files...")
for img in train_images:
shutil.copy(
os.path.join(class_path, img), os.path.join(train_path, _class, img)
)
for img in val_images:
shutil.copy(os.path.join(class_path, img), os.path.join(val_path, _class, img))
for img in test_images:
shutil.copy(os.path.join(class_path, img), os.path.join(test_path, _class, img))
print("Finish")
print(
f"Number of train {_class} = {len(os.listdir(os.path.join(train_path, _class)))}"
)
print(f"Number of val {_class} = {len(os.listdir(os.path.join(val_path, _class)))}")
print(
f"Number of test {_class} = {len(os.listdir(os.path.join(test_path, _class)))}"
)
print("Done")
# # Data Loaders
# initial checks for images
# check image not corrupted
class ImageFolderWithFilter(torchvision.datasets.ImageFolder):
def __init__(self, root, transform=None, target_transform=None):
super(ImageFolderWithFilter, self).__init__(
root, transform=transform, target_transform=target_transform
)
self.samples = self._filter_invalid_images()
def _filter_invalid_images(self):
valid_samples = []
for sample in self.samples:
try:
Image.open(sample[0]).convert("RGB")
valid_samples.append(sample)
except (IOError, SyntaxError) as e:
print(f"Invalid image: {sample[0]} - {e}")
return valid_samples
# check image not B&W or RGBA
class ToTensorEnsure3D:
def __call__(self, pic):
# Convert the image to a 3-channel image if it is grayscale or has an alpha channel
if pic.mode == "L" or pic.mode == "RGBA":
pic = pic.convert("RGB")
return to_tensor(pic)
simple_transforms = transforms.Compose(
[
transforms.Resize((img_size, img_size)),
ToTensorEnsure3D(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# -----------------------------------------------------------------
# Load the datasets with ImageFolderWithFilter and apply the simple_transforms
train_dataset = ImageFolderWithFilter(train_path, transform=simple_transforms)
val_dataset = ImageFolderWithFilter(val_path, transform=simple_transforms)
test_dataset = ImageFolderWithFilter(test_path, transform=simple_transforms)
# Create the data loaders
train_loader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, num_workers=4
)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(
test_dataset, batch_size=batch_size, shuffle=False, num_workers=4
)
print("Done")
# # Simple NN fully connected
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(img_size * img_size * 3, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = x.view(x.size(0), -1) # Flatten the input
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
# Create the network
net = SimpleNet()
net.to(device)
# # inspect the network
from torchsummary import summary
# Print the model summary
summary(net, input_size=(3, img_size, img_size))
img_size * img_size * 512 * 3 + 512
# # Training loop
import time
def train(model, nb_epochs):
losses = []
accuracies = []
epoches = 8
start = time.time()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(nb_epochs):
epoch_loss = 0
epoch_accuracy = 0
for X, y in train_loader:
X = X.to(device)
y = y.to(device)
preds = model(X)
loss = criterion(preds, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
accuracy = (preds.argmax(dim=1) == y).float().mean()
epoch_accuracy += accuracy
epoch_loss += loss
print(".", end="", flush=True)
epoch_accuracy = epoch_accuracy / len(train_loader)
accuracies.append(epoch_accuracy)
epoch_loss = epoch_loss / len(train_loader)
losses.append(epoch_loss)
print(
"\n --- Epoch: {}, train loss: {:.4f}, train acc: {:.4f}, time: {}".format(
epoch, epoch_loss, epoch_accuracy, time.time() - start
)
)
# save model
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
"checpoint_epoch_" + str(epoch) + ".pt",
)
# test set accuracy
with torch.no_grad():
test_epoch_loss = 0
test_epoch_accuracy = 0
for test_X, test_y in val_loader:
test_X = test_X.to(device)
test_y = test_y.to(device)
test_preds = model(test_X)
test_loss = criterion(test_preds, test_y)
test_epoch_loss += test_loss
test_accuracy = (test_preds.argmax(dim=1) == test_y).float().mean()
test_epoch_accuracy += test_accuracy
test_epoch_accuracy = test_epoch_accuracy / len(val_loader)
test_epoch_loss = test_epoch_loss / len(val_loader)
print(
"Epoch: {}, test loss: {:.4f}, test acc: {:.4f}, time: {}\n".format(
epoch, test_epoch_loss, test_epoch_accuracy, time.time() - start
)
)
# # Testing loop
def test(model):
criterion = nn.CrossEntropyLoss()
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for x, y in test_loader:
x = x.to(device)
y = y.to(device)
output = model(x)
_, predictions = torch.max(output, 1)
correct += (predictions == y).sum().item()
test_loss = criterion(output, y)
test_loss /= len(test_loader.dataset)
print(
"Average Loss: ",
test_loss,
" Accuracy: ",
correct,
" / ",
len(test_loader.dataset),
" ",
int(correct / len(test_loader.dataset) * 100),
"%",
)
train(net, 10)
test(net)
# # Chenge to a CNN model
class CatDogCNN(nn.Module):
def __init__(self, img_size):
super(CatDogCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
reduced_size = (
img_size // 2**3
) # Calculate the reduced size after three pooling layers
self.fc1 = nn.Linear(128 * reduced_size * reduced_size, 512)
self.fc2 = nn.Linear(512, 2)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(x.size(0), -1) # Flatten the tensor
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
net = CatDogCNN(img_size)
net.to(device)
train(net, 10)
test(net)
from tqdm import tqdm
from torchvision import models
model = models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
model.to(device)
train(model, 10)
test(model)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/204/129204842.ipynb
|
microsoft-catsvsdogs-dataset
|
shaunthesheep
|
[{"Id": 129204842, "ScriptId": 38392118, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 622287, "CreationDate": "05/11/2023 19:57:04", "VersionNumber": 1.0, "Title": "2_cats_vs_dogs", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 307.0, "LinesInsertedFromPrevious": 307.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185042288, "KernelVersionId": 129204842, "SourceDatasetVersionId": 1003830}]
|
[{"Id": 1003830, "DatasetId": 550917, "DatasourceVersionId": 1032543, "CreatorUserId": 1673856, "LicenseName": "Other (specified in description)", "CreationDate": "03/12/2020 05:34:30", "VersionNumber": 1.0, "Title": "Cats-vs-Dogs", "Slug": "microsoft-catsvsdogs-dataset", "Subtitle": "image dataset for binary classification.", "Description": "### Context\n\n**This data set contains two class of images Cats and Dogs.\nThis can be best utilized in a binary classification problem set in computer vision.**\n\ud83d\udc4d", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 550917, "CreatorUserId": 1673856, "OwnerUserId": 1673856.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1003830.0, "CurrentDatasourceVersionId": 1032543.0, "ForumId": 564512, "Type": 2, "CreationDate": "03/12/2020 05:34:30", "LastActivityDate": "03/12/2020", "TotalViews": 137853, "TotalDownloads": 28311, "TotalVotes": 351, "TotalKernels": 101}]
|
[{"Id": 1673856, "UserName": "shaunthesheep", "DisplayName": "Sachin", "RegisterDate": "02/27/2018", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import torch
import torchvision
import torch.nn as nn # All neural network modules, nn.Linear, nn.Conv2d, BatchNorm, Loss functions
import torchvision.datasets as datasets # Has standard datasets we can import in a nice way
import torchvision.transforms as transforms # Transformations we can perform on our dataset
import torch.nn.functional as F # All functions that don't have any parameters
from torch.utils.data import (
DataLoader,
Dataset,
) # Gives easier dataset managment and creates mini batches
from torchvision.datasets import ImageFolder
import torch.optim as optim # For all Optimization algorithms, SGD, Adam, etc.
from PIL import Image
import random
import shutil
from math import ceil
from torchvision.transforms.functional import to_tensor
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
break
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use gpu or cpu
img_size = 224
orig_data_path = "/kaggle/input/microsoft-catsvsdogs-dataset/PetImages/"
train_path = "tmp/train/"
val_path = "tmp/val/"
test_path = "tmp/test"
batch_size = 64
classes = ["Dog", "Cat"]
train_ratio = 0.7
val_ratio = 0.15
test_ratio = 0.15
# # Split train-val-test
random.seed(42)
if os.path.isdir("tmp"):
shutil.rmtree("tmp")
else:
print("ok")
for target_path in [train_path, val_path, test_path]:
if not os.path.exists(target_path):
os.makedirs(target_path)
for _class in classes:
print("class: ", _class)
os.makedirs(os.path.join(target_path, _class))
def is_valid_image_file(filename):
valid_extensions = (
".jpg",
".jpeg",
".png",
".ppm",
".bmp",
".pgm",
".tif",
".tiff",
".webp",
)
return any(filename.endswith(ext) for ext in valid_extensions)
# split the dataset into train, test
for _class in classes:
class_path = os.path.join(orig_data_path, _class)
images = [img for img in os.listdir(class_path) if is_valid_image_file(img)]
# images = os.listdir(class_path)
random.shuffle(images)
train_images = images[: ceil(len(images) * train_ratio)]
val_images = images[
ceil(len(images) * train_ratio) : ceil(len(images) * (train_ratio + val_ratio))
]
test_images = images[ceil(len(images) * (train_ratio + val_ratio)) :]
print("Copying files...")
for img in train_images:
shutil.copy(
os.path.join(class_path, img), os.path.join(train_path, _class, img)
)
for img in val_images:
shutil.copy(os.path.join(class_path, img), os.path.join(val_path, _class, img))
for img in test_images:
shutil.copy(os.path.join(class_path, img), os.path.join(test_path, _class, img))
print("Finish")
print(
f"Number of train {_class} = {len(os.listdir(os.path.join(train_path, _class)))}"
)
print(f"Number of val {_class} = {len(os.listdir(os.path.join(val_path, _class)))}")
print(
f"Number of test {_class} = {len(os.listdir(os.path.join(test_path, _class)))}"
)
print("Done")
# # Data Loaders
# initial checks for images
# check image not corrupted
class ImageFolderWithFilter(torchvision.datasets.ImageFolder):
def __init__(self, root, transform=None, target_transform=None):
super(ImageFolderWithFilter, self).__init__(
root, transform=transform, target_transform=target_transform
)
self.samples = self._filter_invalid_images()
def _filter_invalid_images(self):
valid_samples = []
for sample in self.samples:
try:
Image.open(sample[0]).convert("RGB")
valid_samples.append(sample)
except (IOError, SyntaxError) as e:
print(f"Invalid image: {sample[0]} - {e}")
return valid_samples
# check image not B&W or RGBA
class ToTensorEnsure3D:
def __call__(self, pic):
# Convert the image to a 3-channel image if it is grayscale or has an alpha channel
if pic.mode == "L" or pic.mode == "RGBA":
pic = pic.convert("RGB")
return to_tensor(pic)
simple_transforms = transforms.Compose(
[
transforms.Resize((img_size, img_size)),
ToTensorEnsure3D(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
]
)
# -----------------------------------------------------------------
# Load the datasets with ImageFolderWithFilter and apply the simple_transforms
train_dataset = ImageFolderWithFilter(train_path, transform=simple_transforms)
val_dataset = ImageFolderWithFilter(val_path, transform=simple_transforms)
test_dataset = ImageFolderWithFilter(test_path, transform=simple_transforms)
# Create the data loaders
train_loader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, num_workers=4
)
val_loader = DataLoader(val_dataset, batch_size=batch_size, shuffle=True, num_workers=4)
test_loader = DataLoader(
test_dataset, batch_size=batch_size, shuffle=False, num_workers=4
)
print("Done")
# # Simple NN fully connected
class SimpleNet(nn.Module):
def __init__(self):
super(SimpleNet, self).__init__()
self.fc1 = nn.Linear(img_size * img_size * 3, 512)
self.fc2 = nn.Linear(512, 256)
self.fc3 = nn.Linear(256, 2)
def forward(self, x):
x = x.view(x.size(0), -1) # Flatten the input
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return F.log_softmax(x, dim=1)
# Create the network
net = SimpleNet()
net.to(device)
# # inspect the network
from torchsummary import summary
# Print the model summary
summary(net, input_size=(3, img_size, img_size))
img_size * img_size * 512 * 3 + 512
# # Training loop
import time
def train(model, nb_epochs):
losses = []
accuracies = []
epoches = 8
start = time.time()
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
for epoch in range(nb_epochs):
epoch_loss = 0
epoch_accuracy = 0
for X, y in train_loader:
X = X.to(device)
y = y.to(device)
preds = model(X)
loss = criterion(preds, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
accuracy = (preds.argmax(dim=1) == y).float().mean()
epoch_accuracy += accuracy
epoch_loss += loss
print(".", end="", flush=True)
epoch_accuracy = epoch_accuracy / len(train_loader)
accuracies.append(epoch_accuracy)
epoch_loss = epoch_loss / len(train_loader)
losses.append(epoch_loss)
print(
"\n --- Epoch: {}, train loss: {:.4f}, train acc: {:.4f}, time: {}".format(
epoch, epoch_loss, epoch_accuracy, time.time() - start
)
)
# save model
torch.save(
{
"model_state_dict": model.state_dict(),
"optimizer_state_dict": optimizer.state_dict(),
},
"checpoint_epoch_" + str(epoch) + ".pt",
)
# test set accuracy
with torch.no_grad():
test_epoch_loss = 0
test_epoch_accuracy = 0
for test_X, test_y in val_loader:
test_X = test_X.to(device)
test_y = test_y.to(device)
test_preds = model(test_X)
test_loss = criterion(test_preds, test_y)
test_epoch_loss += test_loss
test_accuracy = (test_preds.argmax(dim=1) == test_y).float().mean()
test_epoch_accuracy += test_accuracy
test_epoch_accuracy = test_epoch_accuracy / len(val_loader)
test_epoch_loss = test_epoch_loss / len(val_loader)
print(
"Epoch: {}, test loss: {:.4f}, test acc: {:.4f}, time: {}\n".format(
epoch, test_epoch_loss, test_epoch_accuracy, time.time() - start
)
)
# # Testing loop
def test(model):
criterion = nn.CrossEntropyLoss()
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for x, y in test_loader:
x = x.to(device)
y = y.to(device)
output = model(x)
_, predictions = torch.max(output, 1)
correct += (predictions == y).sum().item()
test_loss = criterion(output, y)
test_loss /= len(test_loader.dataset)
print(
"Average Loss: ",
test_loss,
" Accuracy: ",
correct,
" / ",
len(test_loader.dataset),
" ",
int(correct / len(test_loader.dataset) * 100),
"%",
)
train(net, 10)
test(net)
# # Chenge to a CNN model
class CatDogCNN(nn.Module):
def __init__(self, img_size):
super(CatDogCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 128, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
reduced_size = (
img_size // 2**3
) # Calculate the reduced size after three pooling layers
self.fc1 = nn.Linear(128 * reduced_size * reduced_size, 512)
self.fc2 = nn.Linear(512, 2)
self.dropout = nn.Dropout(0.5)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = self.pool(F.relu(self.conv3(x)))
x = x.view(x.size(0), -1) # Flatten the tensor
x = self.dropout(x)
x = F.relu(self.fc1(x))
x = self.dropout(x)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
net = CatDogCNN(img_size)
net.to(device)
train(net, 10)
test(net)
from tqdm import tqdm
from torchvision import models
model = models.resnet50(pretrained=True)
for param in model.parameters():
param.requires_grad = False
num_ftrs = model.fc.in_features
model.fc = nn.Linear(num_ftrs, 2)
model.to(device)
train(model, 10)
test(model)
| false | 0 | 3,162 | 0 | 3,233 | 3,162 |
||
129204370
|
# for importing libraries and packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
# for loading and reading file
epa_data = pd.read_csv("/kaggle/input/epa-air-quality/epa_air_quality.csv", index_col=0)
# for takign a look at the first 10 rows of data
epa_data.head(10)
# for generating a table of descriptive statistics
epa_data.describe(include="all")
# for creating the population mean
population_mean = epa_data["aqi"].mean()
# fr sampling data with replacmenet
sampled_data = epa_data.sample(n=50, replace=True, random_state=42)
# for observing the heasd of the sampled data
sampled_data.head(10)
# for computing the sample mean
sample_mean = sampled_data["aqi"].mean()
# for applying the Central Limit Theorem
# Summary for reference: Imagine repeating the the earlier sample with replacement 10,000 times
# and obtaining 10,000 point estimates of the mean.
# In other words, imagine taking 10,000 random samples of 50 AQI values and computing the mean for each sample.
# According to the central limit theorem, the mean of a sampling distribution should be roughly equal to the population mean.
# The following steps will compute the mean of the sampling distribution with 10,000 samples.
# creates an empty list
estimate_list = []
# the list will iterate for 10,000 times
# the estimate list will take a random sample of 50 AQI values from the total population
for i in range(10000):
estimate_list.append(epa_data["aqi"].sample(n=50, replace=True).mean())
# for creating a new dataframe from the list of 10,000 samples
estimate_df = pd.DataFrame(data={"estimate": estimate_list})
estimate_df
# for calculating the meanof the sampling distribution
mean_sample_means = estimate_df["estimate"].mean()
mean_sample_means
# for making a histogram of the distibution
estimate_df["estimate"].hist()
# for calculating standard error
standard_error = estimate_df["estimate"].std()
standard_error
# for generating a grid of 100 values from xmin to xmax.
plt.hist(
estimate_df["estimate"],
bins=25,
density=True,
alpha=0.4,
label="histogram of sample means of 10000 random samples",
)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100) # generate a grid of 100 values from xmin to xmax.
p = stats.norm.pdf(x, population_mean, standard_error)
plt.plot(x, p, "k", linewidth=2, label="normal curve from central limit theorem")
plt.axvline(x=population_mean, color="g", linestyle="solid", label="population mean")
plt.axvline(
x=sample_mean,
color="r",
linestyle="--",
label="sample mean of the first random sample",
)
plt.axvline(
x=mean_sample_means,
color="b",
linestyle=":",
label="mean of sample means of 10000 random samples",
)
plt.title("Sampling distribution of sample mean")
plt.xlabel("sample mean")
plt.ylabel("density")
plt.legend(bbox_to_anchor=(1.04, 1))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/204/129204370.ipynb
| null | null |
[{"Id": 129204370, "ScriptId": 38411712, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/11/2023 19:50:25", "VersionNumber": 1.0, "Title": "Statistical Sampling in Python", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# for importing libraries and packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import statsmodels.api as sm
from scipy import stats
# for loading and reading file
epa_data = pd.read_csv("/kaggle/input/epa-air-quality/epa_air_quality.csv", index_col=0)
# for takign a look at the first 10 rows of data
epa_data.head(10)
# for generating a table of descriptive statistics
epa_data.describe(include="all")
# for creating the population mean
population_mean = epa_data["aqi"].mean()
# fr sampling data with replacmenet
sampled_data = epa_data.sample(n=50, replace=True, random_state=42)
# for observing the heasd of the sampled data
sampled_data.head(10)
# for computing the sample mean
sample_mean = sampled_data["aqi"].mean()
# for applying the Central Limit Theorem
# Summary for reference: Imagine repeating the the earlier sample with replacement 10,000 times
# and obtaining 10,000 point estimates of the mean.
# In other words, imagine taking 10,000 random samples of 50 AQI values and computing the mean for each sample.
# According to the central limit theorem, the mean of a sampling distribution should be roughly equal to the population mean.
# The following steps will compute the mean of the sampling distribution with 10,000 samples.
# creates an empty list
estimate_list = []
# the list will iterate for 10,000 times
# the estimate list will take a random sample of 50 AQI values from the total population
for i in range(10000):
estimate_list.append(epa_data["aqi"].sample(n=50, replace=True).mean())
# for creating a new dataframe from the list of 10,000 samples
estimate_df = pd.DataFrame(data={"estimate": estimate_list})
estimate_df
# for calculating the meanof the sampling distribution
mean_sample_means = estimate_df["estimate"].mean()
mean_sample_means
# for making a histogram of the distibution
estimate_df["estimate"].hist()
# for calculating standard error
standard_error = estimate_df["estimate"].std()
standard_error
# for generating a grid of 100 values from xmin to xmax.
plt.hist(
estimate_df["estimate"],
bins=25,
density=True,
alpha=0.4,
label="histogram of sample means of 10000 random samples",
)
xmin, xmax = plt.xlim()
x = np.linspace(xmin, xmax, 100) # generate a grid of 100 values from xmin to xmax.
p = stats.norm.pdf(x, population_mean, standard_error)
plt.plot(x, p, "k", linewidth=2, label="normal curve from central limit theorem")
plt.axvline(x=population_mean, color="g", linestyle="solid", label="population mean")
plt.axvline(
x=sample_mean,
color="r",
linestyle="--",
label="sample mean of the first random sample",
)
plt.axvline(
x=mean_sample_means,
color="b",
linestyle=":",
label="mean of sample means of 10000 random samples",
)
plt.title("Sampling distribution of sample mean")
plt.xlabel("sample mean")
plt.ylabel("density")
plt.legend(bbox_to_anchor=(1.04, 1))
| false | 0 | 873 | 0 | 873 | 873 |
||
129204994
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import torch
import warnings
warnings.filterwarnings("ignore")
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
sns.set_style("darkgrid")
pd.set_option("mode.chained_assignment", None)
def get_datasets(path):
return pd.read_csv(path)
train_path = "/kaggle/input/icr-identify-age-related-conditions/train.csv"
test_path = "/kaggle/input/icr-identify-age-related-conditions/test.csv"
greeks_path = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv"
get_datasets(train_path).head()
get_datasets(greeks_path).head()
get_datasets(test_path).head()
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
summ["duplicate"] = df.duplicated().sum()
return summ
# ## Summary of train dataset
# * There are no duplicates.
# * BQ,CB,CC,DU,EL,FC,FL,FS and GL are nulls
# * All the features are in float
summary("train", get_datasets(train_path))
summary("test", get_datasets(test_path))
def replace_null_with_mean(df, column):
return df.fillna(df[column].mean(), axis=1, inplace=True)
column = ["BQ", "CB", "CC", "DU", "EL", "FC", "FL", "FS", "GL"]
[replace_null_with_mean(train, col) for col in column]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/204/129204994.ipynb
| null | null |
[{"Id": 129204994, "ScriptId": 38412054, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3402809, "CreationDate": "05/11/2023 19:59:03", "VersionNumber": 2.0, "Title": "PyTorch-ICR", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 63.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 7.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import torch
import warnings
warnings.filterwarnings("ignore")
seed = 42
torch.manual_seed(seed)
np.random.seed(seed)
sns.set_style("darkgrid")
pd.set_option("mode.chained_assignment", None)
def get_datasets(path):
return pd.read_csv(path)
train_path = "/kaggle/input/icr-identify-age-related-conditions/train.csv"
test_path = "/kaggle/input/icr-identify-age-related-conditions/test.csv"
greeks_path = "/kaggle/input/icr-identify-age-related-conditions/greeks.csv"
get_datasets(train_path).head()
get_datasets(greeks_path).head()
get_datasets(test_path).head()
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
summ["duplicate"] = df.duplicated().sum()
return summ
# ## Summary of train dataset
# * There are no duplicates.
# * BQ,CB,CC,DU,EL,FC,FL,FS and GL are nulls
# * All the features are in float
summary("train", get_datasets(train_path))
summary("test", get_datasets(test_path))
def replace_null_with_mean(df, column):
return df.fillna(df[column].mean(), axis=1, inplace=True)
column = ["BQ", "CB", "CC", "DU", "EL", "FC", "FL", "FS", "GL"]
[replace_null_with_mean(train, col) for col in column]
| false | 0 | 582 | 0 | 582 | 582 |
||
129817281
|
# ### Table of contents
# - [Section 1](#1)
# - [Section 2](#2)
# - [Section 3](#3)
# Header 1
#
from IPython.display import HTML, display
def set_background(color):
script = (
"var cell = this.closest('.code_cell');"
"var editor = cell.querySelector('.input_area');"
"editor.style.background='{}';"
"this.parentNode.removeChild(this)"
).format(color)
display(HTML('<img src onerror="{}">'.format(script)))
set_background("#E9FDFF")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/817/129817281.ipynb
| null | null |
[{"Id": 129817281, "ScriptId": 38608782, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11109722, "CreationDate": "05/16/2023 17:16:59", "VersionNumber": 1.0, "Title": "HeatFlux", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 29.0, "LinesInsertedFromPrevious": 29.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Table of contents
# - [Section 1](#1)
# - [Section 2](#2)
# - [Section 3](#3)
# Header 1
#
from IPython.display import HTML, display
def set_background(color):
script = (
"var cell = this.closest('.code_cell');"
"var editor = cell.querySelector('.input_area');"
"editor.style.background='{}';"
"this.parentNode.removeChild(this)"
).format(color)
display(HTML('<img src onerror="{}">'.format(script)))
set_background("#E9FDFF")
| false | 0 | 148 | 0 | 148 | 148 |
||
129817667
|
<jupyter_start><jupyter_text>Preprocessed FOG Dataset
Kaggle dataset identifier: fog-dataset
<jupyter_script># Imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import average_precision_score
# Read data
import pandas as pd
data = pd.read_csv("/kaggle/input/fog-dataset/fog_dataset.csv")
# Shuffle
data = data.sample(frac=1).reset_index()
data.drop("index", axis=1, inplace=True)
# Get X and y
X = data[["AccV", "AccML", "AccAP"]]
y = data[["StartHesitation", "Turn", "Walking"]]
# Train/Test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# batch generator
def batch(x, y, batchsize=20000):
l = len(x)
for ndx in range(0, l, batchsize):
yield x[ndx : min(ndx + batchsize, l)], y[ndx : min(ndx + batchsize, l)]
# Learn model
clf_StartHes = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["StartHesitation"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_StartHes.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_StartHes.predict_proba(X_test)[:, 1]
print(
"StartHesitation:", average_precision_score(y_test["StartHesitation"], y_predicted)
)
clf_Turn = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["Turn"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_Turn.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_Turn.predict_proba(X_test)[:, 1]
print("Turn:", average_precision_score(y_test["Turn"], y_predicted))
clf_Walk = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["Walking"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_Walk.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_Walk.predict_proba(X_test)[:, 1]
print("Walking:", average_precision_score(y_test["Walking"], y_predicted))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/817/129817667.ipynb
|
fog-dataset
|
aerikg
|
[{"Id": 129817667, "ScriptId": 38519248, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6171471, "CreationDate": "05/16/2023 17:20:49", "VersionNumber": 5.0, "Title": "notebook1127797ef2", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 59.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 43.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186193399, "KernelVersionId": 129817667, "SourceDatasetVersionId": 5573463}]
|
[{"Id": 5573463, "DatasetId": 3168620, "DatasourceVersionId": 5648287, "CreatorUserId": 12406707, "LicenseName": "Unknown", "CreationDate": "05/01/2023 11:15:51", "VersionNumber": 4.0, "Title": "Preprocessed FOG Dataset", "Slug": "fog-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2023-05-01", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3168620, "CreatorUserId": 12406707, "OwnerUserId": 12406707.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5573463.0, "CurrentDatasourceVersionId": 5648287.0, "ForumId": 3232837, "Type": 2, "CreationDate": "04/22/2023 19:25:46", "LastActivityDate": "04/22/2023", "TotalViews": 176, "TotalDownloads": 19, "TotalVotes": 0, "TotalKernels": 4}]
|
[{"Id": 12406707, "UserName": "aerikg", "DisplayName": "\u042d\u0440\u0438\u043a \u0410\u0431\u0434\u0443\u0440\u0430\u0445\u043c\u0430\u043d\u043e\u0432", "RegisterDate": "11/14/2022", "PerformanceTier": 0}]
|
# Imports
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import average_precision_score
# Read data
import pandas as pd
data = pd.read_csv("/kaggle/input/fog-dataset/fog_dataset.csv")
# Shuffle
data = data.sample(frac=1).reset_index()
data.drop("index", axis=1, inplace=True)
# Get X and y
X = data[["AccV", "AccML", "AccAP"]]
y = data[["StartHesitation", "Turn", "Walking"]]
# Train/Test
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# batch generator
def batch(x, y, batchsize=20000):
l = len(x)
for ndx in range(0, l, batchsize):
yield x[ndx : min(ndx + batchsize, l)], y[ndx : min(ndx + batchsize, l)]
# Learn model
clf_StartHes = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["StartHesitation"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_StartHes.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_StartHes.predict_proba(X_test)[:, 1]
print(
"StartHesitation:", average_precision_score(y_test["StartHesitation"], y_predicted)
)
clf_Turn = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["Turn"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_Turn.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_Turn.predict_proba(X_test)[:, 1]
print("Turn:", average_precision_score(y_test["Turn"], y_predicted))
clf_Walk = SGDClassifier(loss="log_loss", n_jobs=-1, shuffle=True)
batch_generator = batch(X_train, y_train["Walking"])
for index, (batch_X, batch_y) in enumerate(batch_generator):
clf_Walk.partial_fit(batch_X, batch_y, classes=[0, 1])
y_predicted = clf_Walk.predict_proba(X_test)[:, 1]
print("Walking:", average_precision_score(y_test["Walking"], y_predicted))
| false | 1 | 693 | 0 | 714 | 693 |
||
129983907
|
import os
import cv2
import numpy as np
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from keras.optimizers import SGD, Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
# load the dataset
x_train = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv",
header=None,
)
y_train = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv",
header=None,
)
x_test = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv",
header=None,
)
y_test = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv",
header=None,
)
Raw_of_y = y_train
raw_y_test = y_test
# show its head
x_train.head()
print(
"x_train.shape =",
x_train.shape,
"\ny_train.shape =",
y_train.shape,
"\nx_test.shape =",
x_test.shape,
"\ny_test.shape =",
y_test.shape,
)
# # convert the data frame into array
x_train = x_train.iloc[:, :].values
x_test = x_test.iloc[:, :].values
y_train = y_train.iloc[:, :].values
y_test = y_test.iloc[:, :].values
Raw_of_y = Raw_of_y.iloc[:, :].values
raw_y_test = raw_y_test.iloc[:, :].values
# ### Reshape and normalize Data
x_train = x_train.reshape(-1, 32, 32, 1)
x_test = x_test.reshape(-1, 32, 32, 1)
x_train = x_train / 255.0
x_test = x_test / 255.0
print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)
# ## function to convert number to arabic letter
def number_to_arabic_letter(number):
if number < 1 or number > 28:
return None
# Arabic letters corresponding to numbers 1 to 28
arabic_letters = [
"ا",
"ب",
"ت",
"ث",
"ج",
"ح",
"خ",
"د",
"ذ",
"ر",
"ز",
"س",
"ش",
"ص",
"ض",
"ط",
"ظ",
"ع",
"غ",
"ف",
"ق",
"ك",
"ل",
"م",
"ن",
"ه",
"و",
"ي",
]
return arabic_letters[number - 1]
number_to_arabic_letter(1)
# ### Convert Labels to Categorical
total_classes = len(np.unique(y_train)) + 1
y_train = to_categorical(y_train, total_classes)
y_test = to_categorical(y_test, total_classes)
print(y_train.shape, y_test.shape)
# ## Show some Images from Training data
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(x_train), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(np.rot90(x_train[i][:, ::-1]))
plt.axis("off")
plt.title(number_to_arabic_letter(Raw_of_y[i][0]), fontsize=16)
# ### Build the Model
model = Sequential()
model.add(
Conv2D(filters=32, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1))
)
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="valid"))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(128, activation="relu"))
# output layer
model.add(Dense(29, activation="softmax"))
# compile
model.compile(
optimizer=Adam(learning_rate=0.001),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
# model summary
model.summary()
import time
start = time.time()
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=128,
verbose=2,
validation_data=(x_test, y_test),
)
end = time.time()
print("\n")
print(f"Execution Time :{round((end-start)/60,3)} minutes")
## show loss and accuracy scores
scores = model.evaluate(x_test, y_test, verbose=0)
print("Validation Loss : {:.2f}".format(scores[0]))
print("Validation Accuracy: {:.2f}".format(scores[1]))
# Plot training loss vs validation loss
plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(19, 7))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["training", "validation"])
ax1.set_title("Loss")
ax1.set_xlabel("epochs")
## plot training accuracy vs validation accuracy
ax2.plot(history.history["accuracy"])
ax2.plot(history.history["val_accuracy"])
ax2.legend(["training", "validation"])
ax2.set_title("Acurracy")
ax2.set_xlabel("epochs")
# ### Predicted Images vs Real Images Visualization
# Plot the predictions
preds = model.predict(x_test)
fig, axis = plt.subplots(6, 6, figsize=(20, 20))
for i, ax in enumerate(axis.flat):
ax.imshow(np.rot90(x_test[i][:, ::-1]))
ax.axis("off")
ax.set(
title=f"Actual letter is :{number_to_arabic_letter(raw_y_test[i][0])}\nPredicted Alphabet is:{number_to_arabic_letter(preds[i].argmax())} "
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/983/129983907.ipynb
| null | null |
[{"Id": 129983907, "ScriptId": 38662793, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11036160, "CreationDate": "05/17/2023 23:33:49", "VersionNumber": 1.0, "Title": "Arabic Hand Written Alphabets Recognizer", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 149.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 86.0, "LinesInsertedFromFork": 63.0, "LinesDeletedFromFork": 92.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 86.0, "TotalVotes": 1}]
| null | null | null | null |
import os
import cv2
import numpy as np
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Flatten, Conv2D, MaxPool2D, Dropout
from keras.optimizers import SGD, Adam
from keras.callbacks import ReduceLROnPlateau, EarlyStopping
from keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
# load the dataset
x_train = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTrainImages 13440x1024.csv",
header=None,
)
y_train = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTrainLabel 13440x1.csv",
header=None,
)
x_test = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTestImages 3360x1024.csv",
header=None,
)
y_test = pd.read_csv(
"/kaggle/input/csv-arabic-handwritten/Arabic Handwritten Characters Dataset CSV/csvTestLabel 3360x1.csv",
header=None,
)
Raw_of_y = y_train
raw_y_test = y_test
# show its head
x_train.head()
print(
"x_train.shape =",
x_train.shape,
"\ny_train.shape =",
y_train.shape,
"\nx_test.shape =",
x_test.shape,
"\ny_test.shape =",
y_test.shape,
)
# # convert the data frame into array
x_train = x_train.iloc[:, :].values
x_test = x_test.iloc[:, :].values
y_train = y_train.iloc[:, :].values
y_test = y_test.iloc[:, :].values
Raw_of_y = Raw_of_y.iloc[:, :].values
raw_y_test = raw_y_test.iloc[:, :].values
# ### Reshape and normalize Data
x_train = x_train.reshape(-1, 32, 32, 1)
x_test = x_test.reshape(-1, 32, 32, 1)
x_train = x_train / 255.0
x_test = x_test / 255.0
print(x_train.shape, x_test.shape)
print(y_train.shape, y_test.shape)
# ## function to convert number to arabic letter
def number_to_arabic_letter(number):
if number < 1 or number > 28:
return None
# Arabic letters corresponding to numbers 1 to 28
arabic_letters = [
"ا",
"ب",
"ت",
"ث",
"ج",
"ح",
"خ",
"د",
"ذ",
"ر",
"ز",
"س",
"ش",
"ص",
"ض",
"ط",
"ظ",
"ع",
"غ",
"ف",
"ق",
"ك",
"ل",
"م",
"ن",
"ه",
"و",
"ي",
]
return arabic_letters[number - 1]
number_to_arabic_letter(1)
# ### Convert Labels to Categorical
total_classes = len(np.unique(y_train)) + 1
y_train = to_categorical(y_train, total_classes)
y_test = to_categorical(y_test, total_classes)
print(y_train.shape, y_test.shape)
# ## Show some Images from Training data
import matplotlib.pyplot as plt
plt.figure(figsize=(20, 20))
for n, i in enumerate(list(np.random.randint(0, len(x_train), 36))):
plt.subplot(6, 6, n + 1)
plt.imshow(np.rot90(x_train[i][:, ::-1]))
plt.axis("off")
plt.title(number_to_arabic_letter(Raw_of_y[i][0]), fontsize=16)
# ### Build the Model
model = Sequential()
model.add(
Conv2D(filters=32, kernel_size=(3, 3), activation="relu", input_shape=(28, 28, 1))
)
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=64, kernel_size=(3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Conv2D(filters=128, kernel_size=(3, 3), activation="relu", padding="valid"))
model.add(MaxPool2D(pool_size=(2, 2), strides=2))
model.add(Flatten())
model.add(Dense(64, activation="relu"))
model.add(Dense(128, activation="relu"))
# output layer
model.add(Dense(29, activation="softmax"))
# compile
model.compile(
optimizer=Adam(learning_rate=0.001),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
# model summary
model.summary()
import time
start = time.time()
history = model.fit(
x_train,
y_train,
epochs=10,
batch_size=128,
verbose=2,
validation_data=(x_test, y_test),
)
end = time.time()
print("\n")
print(f"Execution Time :{round((end-start)/60,3)} minutes")
## show loss and accuracy scores
scores = model.evaluate(x_test, y_test, verbose=0)
print("Validation Loss : {:.2f}".format(scores[0]))
print("Validation Accuracy: {:.2f}".format(scores[1]))
# Plot training loss vs validation loss
plt.figure()
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(19, 7))
ax1.plot(history.history["loss"])
ax1.plot(history.history["val_loss"])
ax1.legend(["training", "validation"])
ax1.set_title("Loss")
ax1.set_xlabel("epochs")
## plot training accuracy vs validation accuracy
ax2.plot(history.history["accuracy"])
ax2.plot(history.history["val_accuracy"])
ax2.legend(["training", "validation"])
ax2.set_title("Acurracy")
ax2.set_xlabel("epochs")
# ### Predicted Images vs Real Images Visualization
# Plot the predictions
preds = model.predict(x_test)
fig, axis = plt.subplots(6, 6, figsize=(20, 20))
for i, ax in enumerate(axis.flat):
ax.imshow(np.rot90(x_test[i][:, ::-1]))
ax.axis("off")
ax.set(
title=f"Actual letter is :{number_to_arabic_letter(raw_y_test[i][0])}\nPredicted Alphabet is:{number_to_arabic_letter(preds[i].argmax())} "
)
| false | 0 | 1,859 | 1 | 1,859 | 1,859 |
||
129983146
|
# # Predicting Hotel Cancellations
# ## 🏨 Background
# This project is supporting a hotel aimed to increase revenue from their room bookings. They believe that they can use data science to help them reduce the number of cancellations. We are tasked with using appropriate methodology to identify what contributes to whether a booking will be fulfilled or cancelled. The hotel intends to use the results of this project to reduce the chance someone cancels their booking.
# There are two questions to this problem:
# 1. What is causing people to cancel their bookings?
# 2. Can we build a model to predict which bookings will be cancelled?
# Since the second question uses prediction modelling which is a subject I am not familiar with yet, I will use basic EDA (exploratory data analysis) to answer some statistical questions about the data and give some suggestions based on that. This will give us an idea of what might be causing cancellations. In the near future, I aim to come back to this problem and solve the second question.
# import all the libraries you'll need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
hotels = pd.read_csv("/kaggle/input/hotel-bookings/hotel_bookings.csv")
hotels.head()
# ## The Data
# They have provided us with their bookings data in a file called `hotel_bookings.csv`, which contains the following:
# | Column | Description |
# |------------|--------------------------|
# | `Booking_ID` | Unique identifier of the booking. |
# | `no_of_adults` | The number of adults. |
# | `no_of_children` | The number of children. |
# | `no_of_weekend_nights` | Number of weekend nights (Saturday or Sunday). |
# | `no_of_week_nights` | Number of week nights (Monday to Friday). |
# | `type_of_meal_plan` | Type of meal plan included in the booking. |
# | `required_car_parking_space` | Whether a car parking space is required. |
# | `room_type_reserved` | The type of room reserved. |
# | `lead_time` | Number of days before the arrival date the booking was made. |
# | `arrival_year` | Year of arrival. |
# | `arrival_month` | Month of arrival. |
# | `arrival_date` | Date of the month for arrival. |
# | `market_segment_type` | How the booking was made. |
# | `repeated_guest` | Whether the guest has previously stayed at the hotel. |
# | `no_of_previous_cancellations` | Number of previous cancellations. |
# | `no_of_previous_bookings_not_canceled` | Number of previous bookings that were canceled. |
# | `avg_price_per_room` | Average price per day of the booking. |
# | `no_of_special_requests` | Count of special requests made as part of the booking. |
# | `booking_status` | Whether the booking was cancelled or not. |
# Source (data has been modified): https://www.kaggle.com/datasets/ahsan81/hotel-reservations-classification-dataset
# count the number of null values in each column and find the percentage compared to the whole column
print(
pd.DataFrame(
{
"Number of null values in column:": hotels.isna().sum(),
"Precentage of null values:": round(hotels.isna().mean() * 100, 2),
}
)
)
# determine if there are any bookings where the number of adults and the number of children columns are equal to zero at the same time
people = hotels[(hotels["no_of_adults"] == 0) & (hotels["no_of_children"] == 0)]
people
# # Data Cleaning
# To address the null values in the dataset, we look at each column individually to see if removing or changing the null values is the best course of action. Using the information we have, we can impute probable estimated values into the dataset instead of just removing unknown values. This will help preserve more data points since we have over 12,000 null values.
# 1. The number of adult & children columns cannot both be zero at the same time. It also does not make sense to impute the mean value of guests since less than 2% of these columns have null values.
# 2. For the number of week and weekend nights booked, the median values (week nights = 2, weekend nights = 1) are used to replace any null values.
# 3. The null values in columns with object type data (room type, meal plan & market segment type) is changed to 'Unknown' which creates another category for the data to fall in.
# 4. Required parking space & special request null values are changed to zero, assuming that customers might have overlooked this option because they do not have a preference (meaning their input would probably have been 0).
# 5. Lead time null values are replaced by imputing the mean and rounding it to a whole number.
# 6. Average room price null values are replaced by imputing the mean and rounding it to 2 decimals.
# 7. For all other numeric values we drop the null values since we cannot make assumptions based on the information we have.
# calculate the mean or median of a column to use when imputing null values
round(hotels["avg_price_per_room"].mean(), 2)
hotels["no_of_week_nights"].median()
hotels["no_of_weekend_nights"].median()
round(hotels["lead_time"].mean())
# create a new catagory 'Unknown' and replace null values in catagorical data columns
hotels["room_type_reserved"] = hotels["room_type_reserved"].fillna("Unknown")
hotels["type_of_meal_plan"] = hotels["type_of_meal_plan"].fillna("Unknown")
hotels["market_segment_type"] = hotels["market_segment_type"].fillna("Unknown")
# change null values into zero where it would have probably been zero
hotels["required_car_parking_space"] = hotels["required_car_parking_space"].fillna(0)
hotels["no_of_special_requests"] = hotels["no_of_special_requests"].fillna(0)
# change null values into the mean and median
hotels["no_of_week_nights"] = hotels["no_of_week_nights"].fillna(
hotels["no_of_week_nights"].median()
)
hotels["no_of_weekend_nights"] = hotels["no_of_weekend_nights"].fillna(
hotels["no_of_weekend_nights"].median()
)
hotels["lead_time"] = hotels["lead_time"].fillna(round(hotels["lead_time"].mean()))
hotels["avg_price_per_room"] = hotels["avg_price_per_room"].fillna(
round(hotels["avg_price_per_room"].mean(), 2)
)
# drop the rest of the null values
hotels_clean = hotels.dropna()
# create histograms to show basi distribution of categorical data
plt.figure(figsize=(20, 25))
plt.subplot(4, 2, 1)
sns.countplot(data=hotels_clean, x="type_of_meal_plan", hue="booking_status")
plt.title("Number of bookings per meal types (cancelled vs not-canceled)")
plt.subplot(4, 2, 2)
sns.countplot(data=hotels_clean, x="required_car_parking_space", hue="booking_status")
plt.title("Number of bookings with required parking space (cancelled vs not-canceled)")
plt.subplot(4, 2, 3)
sns.countplot(data=hotels_clean, x="room_type_reserved", hue="booking_status")
plt.title("Number of bookings per room type (cancelled vs not-canceled)")
plt.subplot(4, 2, 4)
sns.countplot(data=hotels_clean, x="market_segment_type", hue="booking_status")
plt.title("Number of bookings per market segment type (cancelled vs not-canceled)")
plt.subplot(4, 2, 5)
sns.countplot(data=hotels_clean, x="no_of_special_requests", hue="booking_status")
plt.title("Number of bookings with special requests (cancelled vs not-canceled)")
# Using a couple of histograms for canceled vs not-canceled bookings, we generate a quick idea of what customers are interested in when they make a booking. We can see that Meal Plan 1 and Room Type 1 are the most popular options. Customers tend to cancel bookings less when they have at least one special requests. Most people book online and required car parking does not seem to have a major effect on booking cancellations.
# # Data Analysis
# In order to create a clearer picture of what a successful booking looks like, we can split the data using the Booking Status column and create two new dataframes. This will also give a more in depth understanding of what influences customers to cancel.
# - the Canceled dataframe has 10,737 entries
# - the Not-canceled dataframe has 21,901 entries
# This suggests that currently, around 49% of all bookings get canceled.
# split the data by booking status to create new dataframes
canceled = hotels_clean[hotels_clean["booking_status"] == "Canceled"]
not_canceled = hotels_clean[hotels_clean["booking_status"] == "Not_Canceled"]
# create a histogram to show the distribution of monthly bookings excluding cancelled bookings
plot1 = sns.histplot(
data=not_canceled,
x="arrival_month",
hue="arrival_year",
multiple="dodge",
discrete=True,
palette="viridis",
)
plot1.set_xticklabels(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
rotation=90,
)
plot1.set_title("Number of bookings per month (excludes canceled bookings)")
plot1.set_xlim(1, 12)
plot1.set_xticks(range(1, 13))
# The histogram above shows that the hotel is more popular during the Spring, Summer and Fall - especially during April, June & October. However, the histogram below suggests that the time of year of the booking has no clear effect on a cancelled booking.
# create a histogram ot see the distribution of monthly bookings for cancelled vs not-cancelled bookings
plot2 = sns.histplot(
data=hotels_clean,
x="arrival_month",
hue="booking_status",
multiple="dodge",
discrete=True,
palette="viridis",
)
plot2.set_xticklabels(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
rotation=90,
)
plot2.set_title("Number of bookings per month (canelled vs not-cancelled)")
plot2.set_xlim(1, 12)
plot2.set_xticks(range(1, 13))
# look at the difference between cancelled bookings for repeat guests vs not-cancelled bookings
count_repeated_c = canceled["repeated_guest"].value_counts()
count_repeated_nc = not_canceled["repeated_guest"].value_counts()
print(f"There were {count_repeated_c.iloc[1]} repeated guests who canceled bookings")
print(
f"There were {count_repeated_nc.iloc[1]} repeated guests who did not cancel bookings"
)
# Even though repeated guests make up a small percentage of overall bookings, they tend to honour their booking and cancel less than non-repeat guests.
plot3 = sns.barplot(
data=hotels_clean, x="booking_status", y="lead_time", palette="twilight"
)
plot3.set(title="Number of cancelled vs honoured bokings based on lead time")
# It's important to notice that lead time plays a major role in whether or not a booking is cancelled. The longer lead times tend to be cacnelled more often than shorter lead times. For average price, the story is a bit different. When we look at the average room price dataset as a whole there is clearly a large amount of outliers.
# When we remove the outliers in our average price per room column, it gives a clearer picture of the spread of the average price range. With the image below, we can see that a cancelled booking tend to have a higher average room price while an honoured booking tends to have a lower average room price.
# calculate the first quartile, third quartile and IQR
Q1 = hotels_clean["avg_price_per_room"].quantile(0.25)
Q3 = hotels_clean["avg_price_per_room"].quantile(0.75)
IQR = Q3 - Q1
avg_price_range_less = hotels_clean.loc[
(hotels_clean["avg_price_per_room"] > Q1)
& (hotels_clean["avg_price_per_room"] < Q3)
]
# create a boxplot to show the distribution of average room price without outliers
plot5 = sns.boxplot(
data=avg_price_range_less,
x="avg_price_per_room",
y="booking_status",
palette="viridis",
)
plot5.set_title(
"Average price per room for cancelled vs honoured bookings (outliers removed)"
)
# There does not seem to be a clear correlation between the average room price and how many days a customer chooses to stay. The graph below does suggest that a large majority of bookings tend to be less than 5 days, and the majority of stays longer than 10 days are cancelled.
# create a scatterplot to see if there is a relationship between number of nights booked and average room price
plot6 = sns.scatterplot(
data=hotels_clean,
x="no_of_week_nights",
y="avg_price_per_room",
hue="booking_status",
palette="magma",
)
plot7 = sns.scatterplot(
data=hotels_clean,
x="no_of_weekend_nights",
y="avg_price_per_room",
hue="booking_status",
palette="magma",
legend=False,
)
plot6.set_title("Relationship between average room price and no of nights booked")
plot6.set_xlim(-1, 18)
plot6.set_xticks(range(0, 18))
# create histograms to see what the distribution of categories for repeated guests are
repeat_guests = hotels_clean[hotels_clean["repeated_guest"] == 1]
plt.figure(figsize=(20, 25))
plt.subplot(4, 2, 1)
sns.countplot(data=repeat_guests, x="type_of_meal_plan", hue="booking_status")
plt.title("Number of bookings per meal types (cancelled vs not-canceled)")
plt.subplot(4, 2, 2)
sns.countplot(data=repeat_guests, x="required_car_parking_space", hue="booking_status")
plt.title("Number of bookings with required parking space (cancelled vs not-canceled)")
plt.subplot(4, 2, 3)
sns.countplot(data=repeat_guests, x="room_type_reserved", hue="booking_status")
plt.title("Number of bookings per room type (cancelled vs not-canceled)")
plt.subplot(4, 2, 4)
sns.countplot(data=repeat_guests, x="market_segment_type", hue="booking_status")
plt.title("Number of bookings per market segment type (cancelled vs not-canceled)")
plt.subplot(4, 2, 5)
sns.countplot(data=repeat_guests, x="no_of_special_requests", hue="booking_status")
plt.title("Number of bookings with special requests (cancelled vs not-canceled)")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/983/129983146.ipynb
| null | null |
[{"Id": 129983146, "ScriptId": 38666836, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10986429, "CreationDate": "05/17/2023 23:19:19", "VersionNumber": 1.0, "Title": "notebook1cd8a5bd8d", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 218.0, "LinesInsertedFromPrevious": 218.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Predicting Hotel Cancellations
# ## 🏨 Background
# This project is supporting a hotel aimed to increase revenue from their room bookings. They believe that they can use data science to help them reduce the number of cancellations. We are tasked with using appropriate methodology to identify what contributes to whether a booking will be fulfilled or cancelled. The hotel intends to use the results of this project to reduce the chance someone cancels their booking.
# There are two questions to this problem:
# 1. What is causing people to cancel their bookings?
# 2. Can we build a model to predict which bookings will be cancelled?
# Since the second question uses prediction modelling which is a subject I am not familiar with yet, I will use basic EDA (exploratory data analysis) to answer some statistical questions about the data and give some suggestions based on that. This will give us an idea of what might be causing cancellations. In the near future, I aim to come back to this problem and solve the second question.
# import all the libraries you'll need
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
hotels = pd.read_csv("/kaggle/input/hotel-bookings/hotel_bookings.csv")
hotels.head()
# ## The Data
# They have provided us with their bookings data in a file called `hotel_bookings.csv`, which contains the following:
# | Column | Description |
# |------------|--------------------------|
# | `Booking_ID` | Unique identifier of the booking. |
# | `no_of_adults` | The number of adults. |
# | `no_of_children` | The number of children. |
# | `no_of_weekend_nights` | Number of weekend nights (Saturday or Sunday). |
# | `no_of_week_nights` | Number of week nights (Monday to Friday). |
# | `type_of_meal_plan` | Type of meal plan included in the booking. |
# | `required_car_parking_space` | Whether a car parking space is required. |
# | `room_type_reserved` | The type of room reserved. |
# | `lead_time` | Number of days before the arrival date the booking was made. |
# | `arrival_year` | Year of arrival. |
# | `arrival_month` | Month of arrival. |
# | `arrival_date` | Date of the month for arrival. |
# | `market_segment_type` | How the booking was made. |
# | `repeated_guest` | Whether the guest has previously stayed at the hotel. |
# | `no_of_previous_cancellations` | Number of previous cancellations. |
# | `no_of_previous_bookings_not_canceled` | Number of previous bookings that were canceled. |
# | `avg_price_per_room` | Average price per day of the booking. |
# | `no_of_special_requests` | Count of special requests made as part of the booking. |
# | `booking_status` | Whether the booking was cancelled or not. |
# Source (data has been modified): https://www.kaggle.com/datasets/ahsan81/hotel-reservations-classification-dataset
# count the number of null values in each column and find the percentage compared to the whole column
print(
pd.DataFrame(
{
"Number of null values in column:": hotels.isna().sum(),
"Precentage of null values:": round(hotels.isna().mean() * 100, 2),
}
)
)
# determine if there are any bookings where the number of adults and the number of children columns are equal to zero at the same time
people = hotels[(hotels["no_of_adults"] == 0) & (hotels["no_of_children"] == 0)]
people
# # Data Cleaning
# To address the null values in the dataset, we look at each column individually to see if removing or changing the null values is the best course of action. Using the information we have, we can impute probable estimated values into the dataset instead of just removing unknown values. This will help preserve more data points since we have over 12,000 null values.
# 1. The number of adult & children columns cannot both be zero at the same time. It also does not make sense to impute the mean value of guests since less than 2% of these columns have null values.
# 2. For the number of week and weekend nights booked, the median values (week nights = 2, weekend nights = 1) are used to replace any null values.
# 3. The null values in columns with object type data (room type, meal plan & market segment type) is changed to 'Unknown' which creates another category for the data to fall in.
# 4. Required parking space & special request null values are changed to zero, assuming that customers might have overlooked this option because they do not have a preference (meaning their input would probably have been 0).
# 5. Lead time null values are replaced by imputing the mean and rounding it to a whole number.
# 6. Average room price null values are replaced by imputing the mean and rounding it to 2 decimals.
# 7. For all other numeric values we drop the null values since we cannot make assumptions based on the information we have.
# calculate the mean or median of a column to use when imputing null values
round(hotels["avg_price_per_room"].mean(), 2)
hotels["no_of_week_nights"].median()
hotels["no_of_weekend_nights"].median()
round(hotels["lead_time"].mean())
# create a new catagory 'Unknown' and replace null values in catagorical data columns
hotels["room_type_reserved"] = hotels["room_type_reserved"].fillna("Unknown")
hotels["type_of_meal_plan"] = hotels["type_of_meal_plan"].fillna("Unknown")
hotels["market_segment_type"] = hotels["market_segment_type"].fillna("Unknown")
# change null values into zero where it would have probably been zero
hotels["required_car_parking_space"] = hotels["required_car_parking_space"].fillna(0)
hotels["no_of_special_requests"] = hotels["no_of_special_requests"].fillna(0)
# change null values into the mean and median
hotels["no_of_week_nights"] = hotels["no_of_week_nights"].fillna(
hotels["no_of_week_nights"].median()
)
hotels["no_of_weekend_nights"] = hotels["no_of_weekend_nights"].fillna(
hotels["no_of_weekend_nights"].median()
)
hotels["lead_time"] = hotels["lead_time"].fillna(round(hotels["lead_time"].mean()))
hotels["avg_price_per_room"] = hotels["avg_price_per_room"].fillna(
round(hotels["avg_price_per_room"].mean(), 2)
)
# drop the rest of the null values
hotels_clean = hotels.dropna()
# create histograms to show basi distribution of categorical data
plt.figure(figsize=(20, 25))
plt.subplot(4, 2, 1)
sns.countplot(data=hotels_clean, x="type_of_meal_plan", hue="booking_status")
plt.title("Number of bookings per meal types (cancelled vs not-canceled)")
plt.subplot(4, 2, 2)
sns.countplot(data=hotels_clean, x="required_car_parking_space", hue="booking_status")
plt.title("Number of bookings with required parking space (cancelled vs not-canceled)")
plt.subplot(4, 2, 3)
sns.countplot(data=hotels_clean, x="room_type_reserved", hue="booking_status")
plt.title("Number of bookings per room type (cancelled vs not-canceled)")
plt.subplot(4, 2, 4)
sns.countplot(data=hotels_clean, x="market_segment_type", hue="booking_status")
plt.title("Number of bookings per market segment type (cancelled vs not-canceled)")
plt.subplot(4, 2, 5)
sns.countplot(data=hotels_clean, x="no_of_special_requests", hue="booking_status")
plt.title("Number of bookings with special requests (cancelled vs not-canceled)")
# Using a couple of histograms for canceled vs not-canceled bookings, we generate a quick idea of what customers are interested in when they make a booking. We can see that Meal Plan 1 and Room Type 1 are the most popular options. Customers tend to cancel bookings less when they have at least one special requests. Most people book online and required car parking does not seem to have a major effect on booking cancellations.
# # Data Analysis
# In order to create a clearer picture of what a successful booking looks like, we can split the data using the Booking Status column and create two new dataframes. This will also give a more in depth understanding of what influences customers to cancel.
# - the Canceled dataframe has 10,737 entries
# - the Not-canceled dataframe has 21,901 entries
# This suggests that currently, around 49% of all bookings get canceled.
# split the data by booking status to create new dataframes
canceled = hotels_clean[hotels_clean["booking_status"] == "Canceled"]
not_canceled = hotels_clean[hotels_clean["booking_status"] == "Not_Canceled"]
# create a histogram to show the distribution of monthly bookings excluding cancelled bookings
plot1 = sns.histplot(
data=not_canceled,
x="arrival_month",
hue="arrival_year",
multiple="dodge",
discrete=True,
palette="viridis",
)
plot1.set_xticklabels(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
rotation=90,
)
plot1.set_title("Number of bookings per month (excludes canceled bookings)")
plot1.set_xlim(1, 12)
plot1.set_xticks(range(1, 13))
# The histogram above shows that the hotel is more popular during the Spring, Summer and Fall - especially during April, June & October. However, the histogram below suggests that the time of year of the booking has no clear effect on a cancelled booking.
# create a histogram ot see the distribution of monthly bookings for cancelled vs not-cancelled bookings
plot2 = sns.histplot(
data=hotels_clean,
x="arrival_month",
hue="booking_status",
multiple="dodge",
discrete=True,
palette="viridis",
)
plot2.set_xticklabels(
[
"January",
"February",
"March",
"April",
"May",
"June",
"July",
"August",
"September",
"October",
"November",
"December",
],
rotation=90,
)
plot2.set_title("Number of bookings per month (canelled vs not-cancelled)")
plot2.set_xlim(1, 12)
plot2.set_xticks(range(1, 13))
# look at the difference between cancelled bookings for repeat guests vs not-cancelled bookings
count_repeated_c = canceled["repeated_guest"].value_counts()
count_repeated_nc = not_canceled["repeated_guest"].value_counts()
print(f"There were {count_repeated_c.iloc[1]} repeated guests who canceled bookings")
print(
f"There were {count_repeated_nc.iloc[1]} repeated guests who did not cancel bookings"
)
# Even though repeated guests make up a small percentage of overall bookings, they tend to honour their booking and cancel less than non-repeat guests.
plot3 = sns.barplot(
data=hotels_clean, x="booking_status", y="lead_time", palette="twilight"
)
plot3.set(title="Number of cancelled vs honoured bokings based on lead time")
# It's important to notice that lead time plays a major role in whether or not a booking is cancelled. The longer lead times tend to be cacnelled more often than shorter lead times. For average price, the story is a bit different. When we look at the average room price dataset as a whole there is clearly a large amount of outliers.
# When we remove the outliers in our average price per room column, it gives a clearer picture of the spread of the average price range. With the image below, we can see that a cancelled booking tend to have a higher average room price while an honoured booking tends to have a lower average room price.
# calculate the first quartile, third quartile and IQR
Q1 = hotels_clean["avg_price_per_room"].quantile(0.25)
Q3 = hotels_clean["avg_price_per_room"].quantile(0.75)
IQR = Q3 - Q1
avg_price_range_less = hotels_clean.loc[
(hotels_clean["avg_price_per_room"] > Q1)
& (hotels_clean["avg_price_per_room"] < Q3)
]
# create a boxplot to show the distribution of average room price without outliers
plot5 = sns.boxplot(
data=avg_price_range_less,
x="avg_price_per_room",
y="booking_status",
palette="viridis",
)
plot5.set_title(
"Average price per room for cancelled vs honoured bookings (outliers removed)"
)
# There does not seem to be a clear correlation between the average room price and how many days a customer chooses to stay. The graph below does suggest that a large majority of bookings tend to be less than 5 days, and the majority of stays longer than 10 days are cancelled.
# create a scatterplot to see if there is a relationship between number of nights booked and average room price
plot6 = sns.scatterplot(
data=hotels_clean,
x="no_of_week_nights",
y="avg_price_per_room",
hue="booking_status",
palette="magma",
)
plot7 = sns.scatterplot(
data=hotels_clean,
x="no_of_weekend_nights",
y="avg_price_per_room",
hue="booking_status",
palette="magma",
legend=False,
)
plot6.set_title("Relationship between average room price and no of nights booked")
plot6.set_xlim(-1, 18)
plot6.set_xticks(range(0, 18))
# create histograms to see what the distribution of categories for repeated guests are
repeat_guests = hotels_clean[hotels_clean["repeated_guest"] == 1]
plt.figure(figsize=(20, 25))
plt.subplot(4, 2, 1)
sns.countplot(data=repeat_guests, x="type_of_meal_plan", hue="booking_status")
plt.title("Number of bookings per meal types (cancelled vs not-canceled)")
plt.subplot(4, 2, 2)
sns.countplot(data=repeat_guests, x="required_car_parking_space", hue="booking_status")
plt.title("Number of bookings with required parking space (cancelled vs not-canceled)")
plt.subplot(4, 2, 3)
sns.countplot(data=repeat_guests, x="room_type_reserved", hue="booking_status")
plt.title("Number of bookings per room type (cancelled vs not-canceled)")
plt.subplot(4, 2, 4)
sns.countplot(data=repeat_guests, x="market_segment_type", hue="booking_status")
plt.title("Number of bookings per market segment type (cancelled vs not-canceled)")
plt.subplot(4, 2, 5)
sns.countplot(data=repeat_guests, x="no_of_special_requests", hue="booking_status")
plt.title("Number of bookings with special requests (cancelled vs not-canceled)")
| false | 0 | 3,979 | 0 | 3,979 | 3,979 |
||
129537830
|
<jupyter_start><jupyter_text>Loan Application Data
## About data set
Among all industries, Banking domain has the largest use of analytics & data science methods. This data set would provide you enough taste of working on data sets from insurance companies and banks, what challenges are faced, what strategies are used, etc. This is a classification problem. The data has 615 rows and 14 features to predict weather loan approved or not approved.
Company wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have given a problem to identify the customers segments, those are eligible for loan amount so that they can specifically target these customers. Here they have provided a partial data set.
I would like to thanks to UCI data science community to inspire me.
Kaggle dataset identifier: loan-application-data
<jupyter_script># Predicting Loan Approval using logistic regression and a pytorch neural network with some exploratory data analysis and visualisation.
# installing the correct version of numpy
# Importing the relevant libraries
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency
# Read the csv into the dataframe
df = pd.read_csv("/kaggle/input/loan-application-data/df1_loan.csv")
# To get an idea for the data
print(df.head())
# From here, we will preprocess the data, by encoding the category columns
# Encode category columns
categories = [
"Gender",
"Married",
"Education",
"Self_Employed",
"Dependents",
"Property_Area",
"Loan_Status",
]
encoder = OneHotEncoder()
cat_df = pd.DataFrame(
encoder.fit_transform(df[categories]).toarray(),
columns=encoder.get_feature_names_out(categories),
)
df = pd.concat([df.drop(columns=categories), cat_df], axis=1)
# To stop issues with the dollar sign infront of the value in the total income column, we will convert it
df["Total_Income"] = df["Total_Income"].str.replace("$", "").astype(float)
# Scale numerical columns
numbers = [
"ApplicantIncome",
"CoapplicantIncome",
"LoanAmount",
"Loan_Amount_Term",
"Total_Income",
]
scalar = StandardScaler()
df[numbers] = scalar.fit_transform(df[numbers])
# Get an idea of the preprocessed data
print(df.head())
# From here, we can do some basic data visualisation
plt.hist(df["ApplicantIncome"])
plt.title("Distribution of Applicant Income")
plt.xlabel("Applicant Income")
plt.ylabel("Frequency")
plt.show()
numeric_columns = df.select_dtypes(include=np.number)
corr = numeric_columns.corr()
sns.heatmap(corr, cmap="coolwarm", annot=False)
plt.title("Correlation Matrix")
plt.show()
# We see some correlation between certain property areas, credit history, and loan approval. Let's perform a chi-squared test to be sure
contingency_table = pd.crosstab(df["Property_Area_Semiurban"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
contingency_table = pd.crosstab(df["Property_Area_Rural"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
contingency_table = pd.crosstab(df["Credit_History"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
# We can see a clear correlation between these variables and loan approval. From here, we can perform a logistic regression using these variables.
# Select input variables
X = df[["Credit_History", "Property_Area_Rural", "Property_Area_Semiurban"]]
# Select target variable
y = df["Loan_Status_Y"]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=33
)
imputer = SimpleImputer(strategy="mean")
# Use the imputer on the training data and transform the sets
X_train_imputed = imputer.fit_transform(X_train)
X_test_imputed = imputer.transform(X_test)
# Create a logistic regression classifier
lr = LogisticRegression()
lr.fit(X_train_imputed, y_train)
# Predict the test set and calculate the accuracy
y_pred = lr.predict(X_test_imputed)
accuracy = (y_pred == y_test).mean()
print("Accuracy:", accuracy)
# We see a strong accuracy score, implying that we can use this logistic regression in the future.
# Now, for insight, we will compare this to a neural network, using the property area location as input variables.
X = df[["Property_Area_Rural", "Property_Area_Semiurban", "Property_Area_Urban"]].values
y = df["Loan_Status_Y"].values.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Define a model
class LoanStatusPredictor(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(3, 10)
self.fc2 = nn.Linear(10, 1)
def forward(self, x):
x = nn.functional.relu(self.fc1(x))
x = nn.functional.sigmoid(self.fc2(x))
return x
# Define the loss function and optimiser
model = LoanStatusPredictor()
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Train said model
epochs = 100
for epoch in range(epochs):
running_loss = 0.0
optimizer.zero_grad()
outputs = model(torch.tensor(X_train, dtype=torch.float32))
loss = criterion(outputs, torch.tensor(y_train, dtype=torch.float32).view(-1, 1))
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch+1}, loss: {running_loss:.4f}")
# Check the model on the test set and print the accuracy
with torch.no_grad():
outputs = model(torch.tensor(X_test, dtype=torch.float32))
predicted = (outputs > 0.5).float()
accuracy = (predicted == torch.tensor(y_test, dtype=torch.float32)).float().mean()
print(f"Test accuracy: {accuracy:.4f}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/537/129537830.ipynb
|
loan-application-data
|
vipin20
|
[{"Id": 129537830, "ScriptId": 38517162, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8236553, "CreationDate": "05/14/2023 16:23:50", "VersionNumber": 2.0, "Title": "Prediction of Loan Approval", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 152.0, "LinesInsertedFromPrevious": 3.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 149.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185700587, "KernelVersionId": 129537830, "SourceDatasetVersionId": 2400367}]
|
[{"Id": 2400367, "DatasetId": 1451518, "DatasourceVersionId": 2442398, "CreatorUserId": 3847086, "LicenseName": "CC0: Public Domain", "CreationDate": "07/06/2021 13:31:23", "VersionNumber": 1.0, "Title": "Loan Application Data", "Slug": "loan-application-data", "Subtitle": "Loan Prediction approved or not", "Description": "## About data set\n\nAmong all industries, Banking domain has the largest use of analytics & data science methods. This data set would provide you enough taste of working on data sets from insurance companies and banks, what challenges are faced, what strategies are used, etc. This is a classification problem. The data has 615 rows and 14 features to predict weather loan approved or not approved.\n\nCompany wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have given a problem to identify the customers segments, those are eligible for loan amount so that they can specifically target these customers. Here they have provided a partial data set.\n\nI would like to thanks to UCI data science community to inspire me.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1451518, "CreatorUserId": 3847086, "OwnerUserId": 3847086.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2400367.0, "CurrentDatasourceVersionId": 2442398.0, "ForumId": 1471070, "Type": 2, "CreationDate": "07/06/2021 13:31:23", "LastActivityDate": "07/06/2021", "TotalViews": 15646, "TotalDownloads": 1690, "TotalVotes": 38, "TotalKernels": 7}]
|
[{"Id": 3847086, "UserName": "vipin20", "DisplayName": "Vipin Kumar", "RegisterDate": "10/12/2019", "PerformanceTier": 3}]
|
# Predicting Loan Approval using logistic regression and a pytorch neural network with some exploratory data analysis and visualisation.
# installing the correct version of numpy
# Importing the relevant libraries
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import chi2_contingency
# Read the csv into the dataframe
df = pd.read_csv("/kaggle/input/loan-application-data/df1_loan.csv")
# To get an idea for the data
print(df.head())
# From here, we will preprocess the data, by encoding the category columns
# Encode category columns
categories = [
"Gender",
"Married",
"Education",
"Self_Employed",
"Dependents",
"Property_Area",
"Loan_Status",
]
encoder = OneHotEncoder()
cat_df = pd.DataFrame(
encoder.fit_transform(df[categories]).toarray(),
columns=encoder.get_feature_names_out(categories),
)
df = pd.concat([df.drop(columns=categories), cat_df], axis=1)
# To stop issues with the dollar sign infront of the value in the total income column, we will convert it
df["Total_Income"] = df["Total_Income"].str.replace("$", "").astype(float)
# Scale numerical columns
numbers = [
"ApplicantIncome",
"CoapplicantIncome",
"LoanAmount",
"Loan_Amount_Term",
"Total_Income",
]
scalar = StandardScaler()
df[numbers] = scalar.fit_transform(df[numbers])
# Get an idea of the preprocessed data
print(df.head())
# From here, we can do some basic data visualisation
plt.hist(df["ApplicantIncome"])
plt.title("Distribution of Applicant Income")
plt.xlabel("Applicant Income")
plt.ylabel("Frequency")
plt.show()
numeric_columns = df.select_dtypes(include=np.number)
corr = numeric_columns.corr()
sns.heatmap(corr, cmap="coolwarm", annot=False)
plt.title("Correlation Matrix")
plt.show()
# We see some correlation between certain property areas, credit history, and loan approval. Let's perform a chi-squared test to be sure
contingency_table = pd.crosstab(df["Property_Area_Semiurban"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
contingency_table = pd.crosstab(df["Property_Area_Rural"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
contingency_table = pd.crosstab(df["Credit_History"], df["Loan_Status_Y"])
chi2, pval, dof, exp_freq = chi2_contingency(contingency_table)
print("Chi-squared test of independence results:")
print("Chi-squared statistic: {:.2f}".format(chi2))
print("p-value: {:.4f}".format(pval))
# We can see a clear correlation between these variables and loan approval. From here, we can perform a logistic regression using these variables.
# Select input variables
X = df[["Credit_History", "Property_Area_Rural", "Property_Area_Semiurban"]]
# Select target variable
y = df["Loan_Status_Y"]
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=33
)
imputer = SimpleImputer(strategy="mean")
# Use the imputer on the training data and transform the sets
X_train_imputed = imputer.fit_transform(X_train)
X_test_imputed = imputer.transform(X_test)
# Create a logistic regression classifier
lr = LogisticRegression()
lr.fit(X_train_imputed, y_train)
# Predict the test set and calculate the accuracy
y_pred = lr.predict(X_test_imputed)
accuracy = (y_pred == y_test).mean()
print("Accuracy:", accuracy)
# We see a strong accuracy score, implying that we can use this logistic regression in the future.
# Now, for insight, we will compare this to a neural network, using the property area location as input variables.
X = df[["Property_Area_Rural", "Property_Area_Semiurban", "Property_Area_Urban"]].values
y = df["Loan_Status_Y"].values.astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Define a model
class LoanStatusPredictor(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(3, 10)
self.fc2 = nn.Linear(10, 1)
def forward(self, x):
x = nn.functional.relu(self.fc1(x))
x = nn.functional.sigmoid(self.fc2(x))
return x
# Define the loss function and optimiser
model = LoanStatusPredictor()
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.01)
# Train said model
epochs = 100
for epoch in range(epochs):
running_loss = 0.0
optimizer.zero_grad()
outputs = model(torch.tensor(X_train, dtype=torch.float32))
loss = criterion(outputs, torch.tensor(y_train, dtype=torch.float32).view(-1, 1))
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch+1}, loss: {running_loss:.4f}")
# Check the model on the test set and print the accuracy
with torch.no_grad():
outputs = model(torch.tensor(X_test, dtype=torch.float32))
predicted = (outputs > 0.5).float()
accuracy = (predicted == torch.tensor(y_test, dtype=torch.float32)).float().mean()
print(f"Test accuracy: {accuracy:.4f}")
| false | 1 | 1,707 | 0 | 1,934 | 1,707 |
||
129537391
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
# ### K-Means implementation description
# This is a simple implementation of the K-Means clustering algorithm in Python. The K-Means algorithm is an unsupervised learning algorithm that groups data points into k clusters based on their features.
# Class K_Means
# __init__(self, data, k=2)
# Initializes the K-Means clustering object.
# Parameters:
# data: A numpy array (matrix) representing the dataset.
# k: The number of clusters. Default is 2.
# _sqr_distance(array1, array2)
# Computes the squared Euclidean distance between two arrays.
# Parameters:
# array1: A numpy array representing a data instance.
# array2: A numpy array representing another data instance.
# Returns: A float representing the squared Euclidean distance between the two arrays.
# _match_data_to_centroid(self, ind)
# Finds the index of the closest centroid for a given data instance.
# Parameters:
# ind: An integer representing the index of the data instance in the dataset.
# Returns: An integer representing the index of the closest centroid.
# _assign_centroids(self)
# Assigns each data instance to the closest centroid.
# get_avg_point(self, centroid_num)
# Computes the average point for a cluster.
# Parameters:
# centroid_num: An integer representing the cluster number.
# Returns: A numpy array representing the average point of the cluster.
# _move_centroids(self)
# Updates the centroids by computing the average point of each cluster.
# compute_cost(self)
# Computes the average squared distance between data instances and their assigned centroids.
# Returns: A float representing the average squared distance.
# fit(self, epochs=5)
# Fits the K-Means clustering algorithm to the dataset and updates centroids for a given number of epochs.
# Parameters:
# epochs: The number of iterations to update the centroids. Default is 5.
class K_Means:
def __init__(self, data, k=2):
# data - numpy matrix
self.data = data
self.m = self.data.shape[0]
# format { ind of row of data : cluster_number}
# set default param
self.K = k
self.centroids = random.sample(list(self.data), self.K)
self.cluster_assignment = [None for i in range(self.m)]
@staticmethod
def _sqr_distance(array1, array2):
# array1 and array2 - line of features representing an instance of data
# assume numerical data (categorical must be encoded)
# euclidean distance
d = np.sum((array1 - array2) ** 2)
return d
def _match_data_to_centroid(self, ind): # returns index of centroid
sq_d = float("INF")
closest_c = 0
for k in range(self.K):
new_d = self._sqr_distance(self.centroids[k], self.data[ind])
if new_d < sq_d:
sq_d = new_d
closest_c = k
return closest_c
def _assign_centroids(self):
for i in range(self.m):
self.cluster_assignment[i] = self._match_data_to_centroid(i)
def get_avg_point(self, centroid_num):
# collect all data with this match
centroids_data = []
count = 0
for i in range(self.m):
if self.cluster_assignment[i] == centroid_num:
centroids_data.append(self.data[i])
count += 1
centroids_data = np.array(centroids_data)
return np.sum(centroids_data, axis=0) / count # return avg
def _move_centroids(self):
for k in range(self.K):
new_centroid_position = self.get_avg_point(k)
self.centroids[k] = new_centroid_position
def compute_cost(self):
avg_squared_dist = 0
for i in range(self.m):
instance = self.data[i]
nearst_cluster_ind = self.cluster_assignment[i]
nearst_cluster = self.centroids[nearst_cluster_ind]
distance = self._sqr_distance(nearst_cluster, instance)
avg_squared_dist += distance
return avg_squared_dist / self.m
def fit(self, epochs=5): # return ndarray of cluster assignments
for i in range(epochs):
self._assign_centroids()
self._move_centroids()
print("Cost = ", self.compute_cost())
# ### Testing code with artificial simple dataset
#
import numpy as np
import random
# Cluster 1
mean1 = [0, 0]
cov1 = [[1, 0], [0, 1]]
cluster1 = np.random.multivariate_normal(mean1, cov1, size=30)
# Cluster 2
mean2 = [6, 6]
cov2 = [[1, 0], [0, 1]]
cluster2 = np.random.multivariate_normal(mean2, cov2, size=20)
# Cluster 3
mean3 = [0, 6]
cov3 = [[1, 0], [0, 1]]
cluster3 = np.random.multivariate_normal(mean3, cov3, size=23)
# Combine clusters
data = np.concatenate((cluster1, cluster2, cluster3), axis=0)
# Shuffle data points
np.random.shuffle(data)
kmeans = K_Means(data, k=3)
kmeans.fit(5)
# VISUALIZING THE RESULT
plt.figure(figsize=(10, 7))
sns.set_style("whitegrid")
sns.set_palette("husl")
# Prepare the data for plotting
plot_data = np.column_stack((data, kmeans.cluster_assignment))
plot_data = pd.DataFrame(plot_data, columns=["Feature 1", "Feature 2", "Cluster"])
# Create a scatter plot of the clustered data
sns.scatterplot(
x="Feature 1", y="Feature 2", hue="Cluster", data=plot_data, palette="Set1", s=200
)
# Plot the centroids
for k in range(kmeans.K):
plt.scatter(
kmeans.centroids[k][0],
kmeans.centroids[k][1],
color="black",
marker="x",
s=250,
linewidths=2,
)
plt.title("K-Means Clustering (k=3)")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/537/129537391.ipynb
| null | null |
[{"Id": 129537391, "ScriptId": 38517822, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6066769, "CreationDate": "05/14/2023 16:19:23", "VersionNumber": 2.0, "Title": "K-means from scratch", "EvaluationDate": "05/14/2023", "IsChange": false, "TotalLines": 256.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 256.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import random
# ### K-Means implementation description
# This is a simple implementation of the K-Means clustering algorithm in Python. The K-Means algorithm is an unsupervised learning algorithm that groups data points into k clusters based on their features.
# Class K_Means
# __init__(self, data, k=2)
# Initializes the K-Means clustering object.
# Parameters:
# data: A numpy array (matrix) representing the dataset.
# k: The number of clusters. Default is 2.
# _sqr_distance(array1, array2)
# Computes the squared Euclidean distance between two arrays.
# Parameters:
# array1: A numpy array representing a data instance.
# array2: A numpy array representing another data instance.
# Returns: A float representing the squared Euclidean distance between the two arrays.
# _match_data_to_centroid(self, ind)
# Finds the index of the closest centroid for a given data instance.
# Parameters:
# ind: An integer representing the index of the data instance in the dataset.
# Returns: An integer representing the index of the closest centroid.
# _assign_centroids(self)
# Assigns each data instance to the closest centroid.
# get_avg_point(self, centroid_num)
# Computes the average point for a cluster.
# Parameters:
# centroid_num: An integer representing the cluster number.
# Returns: A numpy array representing the average point of the cluster.
# _move_centroids(self)
# Updates the centroids by computing the average point of each cluster.
# compute_cost(self)
# Computes the average squared distance between data instances and their assigned centroids.
# Returns: A float representing the average squared distance.
# fit(self, epochs=5)
# Fits the K-Means clustering algorithm to the dataset and updates centroids for a given number of epochs.
# Parameters:
# epochs: The number of iterations to update the centroids. Default is 5.
class K_Means:
def __init__(self, data, k=2):
# data - numpy matrix
self.data = data
self.m = self.data.shape[0]
# format { ind of row of data : cluster_number}
# set default param
self.K = k
self.centroids = random.sample(list(self.data), self.K)
self.cluster_assignment = [None for i in range(self.m)]
@staticmethod
def _sqr_distance(array1, array2):
# array1 and array2 - line of features representing an instance of data
# assume numerical data (categorical must be encoded)
# euclidean distance
d = np.sum((array1 - array2) ** 2)
return d
def _match_data_to_centroid(self, ind): # returns index of centroid
sq_d = float("INF")
closest_c = 0
for k in range(self.K):
new_d = self._sqr_distance(self.centroids[k], self.data[ind])
if new_d < sq_d:
sq_d = new_d
closest_c = k
return closest_c
def _assign_centroids(self):
for i in range(self.m):
self.cluster_assignment[i] = self._match_data_to_centroid(i)
def get_avg_point(self, centroid_num):
# collect all data with this match
centroids_data = []
count = 0
for i in range(self.m):
if self.cluster_assignment[i] == centroid_num:
centroids_data.append(self.data[i])
count += 1
centroids_data = np.array(centroids_data)
return np.sum(centroids_data, axis=0) / count # return avg
def _move_centroids(self):
for k in range(self.K):
new_centroid_position = self.get_avg_point(k)
self.centroids[k] = new_centroid_position
def compute_cost(self):
avg_squared_dist = 0
for i in range(self.m):
instance = self.data[i]
nearst_cluster_ind = self.cluster_assignment[i]
nearst_cluster = self.centroids[nearst_cluster_ind]
distance = self._sqr_distance(nearst_cluster, instance)
avg_squared_dist += distance
return avg_squared_dist / self.m
def fit(self, epochs=5): # return ndarray of cluster assignments
for i in range(epochs):
self._assign_centroids()
self._move_centroids()
print("Cost = ", self.compute_cost())
# ### Testing code with artificial simple dataset
#
import numpy as np
import random
# Cluster 1
mean1 = [0, 0]
cov1 = [[1, 0], [0, 1]]
cluster1 = np.random.multivariate_normal(mean1, cov1, size=30)
# Cluster 2
mean2 = [6, 6]
cov2 = [[1, 0], [0, 1]]
cluster2 = np.random.multivariate_normal(mean2, cov2, size=20)
# Cluster 3
mean3 = [0, 6]
cov3 = [[1, 0], [0, 1]]
cluster3 = np.random.multivariate_normal(mean3, cov3, size=23)
# Combine clusters
data = np.concatenate((cluster1, cluster2, cluster3), axis=0)
# Shuffle data points
np.random.shuffle(data)
kmeans = K_Means(data, k=3)
kmeans.fit(5)
# VISUALIZING THE RESULT
plt.figure(figsize=(10, 7))
sns.set_style("whitegrid")
sns.set_palette("husl")
# Prepare the data for plotting
plot_data = np.column_stack((data, kmeans.cluster_assignment))
plot_data = pd.DataFrame(plot_data, columns=["Feature 1", "Feature 2", "Cluster"])
# Create a scatter plot of the clustered data
sns.scatterplot(
x="Feature 1", y="Feature 2", hue="Cluster", data=plot_data, palette="Set1", s=200
)
# Plot the centroids
for k in range(kmeans.K):
plt.scatter(
kmeans.centroids[k][0],
kmeans.centroids[k][1],
color="black",
marker="x",
s=250,
linewidths=2,
)
plt.title("K-Means Clustering (k=3)")
plt.show()
| false | 0 | 1,593 | 0 | 1,593 | 1,593 |
||
129537037
|
<jupyter_start><jupyter_text>iterative-stratification
Kaggle dataset identifier: iterativestratification
<jupyter_script># Credits to: https://www.kaggle.com/code/datafan07/icr-simple-eda-baseline
import sys
sys.path.append("../input/iterativestratification")
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
Train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
greeks = pd.merge(Train[["Id", "Class"]], greeks, on="Id")
greeks.head(3)
Train["kfold"] = -1
kf = MultilabelStratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_indicies, valid_indicies) in enumerate(
kf.split(X=Train, y=greeks.iloc[:, 1:-1])
):
Train.loc[valid_indicies, "kfold"] = fold
Train.to_csv("train_folds.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/537/129537037.ipynb
|
iterativestratification
|
lucca9211
|
[{"Id": 129537037, "ScriptId": 38517550, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4246137, "CreationDate": "05/14/2023 16:15:48", "VersionNumber": 1.0, "Title": "ICR : create folds", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 22.0, "LinesInsertedFromPrevious": 22.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 12}]
|
[{"Id": 185698689, "KernelVersionId": 129537037, "SourceDatasetVersionId": 5657514}]
|
[{"Id": 5657514, "DatasetId": 930977, "DatasourceVersionId": 5732919, "CreatorUserId": 3252378, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "05/10/2023 17:13:59", "VersionNumber": 105.0, "Title": "iterative-stratification", "Slug": "iterativestratification", "Subtitle": "Cross validators with stratification for multilabel data.", "Description": NaN, "VersionNotes": "Automatic Update 2023-05-10", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 930977, "CreatorUserId": 3252378, "OwnerUserId": 3252378.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6249272.0, "CurrentDatasourceVersionId": 6329026.0, "ForumId": 946945, "Type": 2, "CreationDate": "10/20/2020 15:06:10", "LastActivityDate": "10/20/2020", "TotalViews": 5286, "TotalDownloads": 374, "TotalVotes": 32, "TotalKernels": 89}]
|
[{"Id": 3252378, "UserName": "lucca9211", "DisplayName": "Rajnish Singh", "RegisterDate": "05/21/2019", "PerformanceTier": 1}]
|
# Credits to: https://www.kaggle.com/code/datafan07/icr-simple-eda-baseline
import sys
sys.path.append("../input/iterativestratification")
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from iterstrat.ml_stratifiers import MultilabelStratifiedKFold
Train = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
greeks = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
greeks = pd.merge(Train[["Id", "Class"]], greeks, on="Id")
greeks.head(3)
Train["kfold"] = -1
kf = MultilabelStratifiedKFold(n_splits=5, shuffle=True, random_state=42)
for fold, (train_indicies, valid_indicies) in enumerate(
kf.split(X=Train, y=greeks.iloc[:, 1:-1])
):
Train.loc[valid_indicies, "kfold"] = fold
Train.to_csv("train_folds.csv", index=False)
| false | 2 | 300 | 12 | 323 | 300 |
||
129540805
|
<jupyter_start><jupyter_text>Historic sales of electric vehicles
The Global EV Outlook is an annual publication that identifies and discusses recent developments in electric mobility across the globe. It is developed with the support of the members of the Electric Vehicles Initiative (EVI).
Combining historical analysis with projections to 2030, the report examines key areas of interest such as electric vehicle and charging infrastructure deployment, energy use, CO2 emissions, battery demand and related policy developments. The report includes policy recommendations that incorporate lessons learned from leading markets to inform policy makers and stakeholders with regard to policy frameworks and market systems for electric vehicle adoption.
Kaggle dataset identifier: historic-sales-of-electric-vehicles
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# The dataset contains the following columns:
# region: country
# category: historical
# parameter: electric vehicle sales
# mode: cars
# powertrain: BEV, PHEV
# -year: 2011 - 2022
# -unit: vehicles
# -value: amount vehicles sales
df = pd.read_csv(
"/kaggle/input/historic-sales-of-electric-vehicles/IEA-EV-dataEV salesCarsHistorical.csv"
)
df.head()
# EDA
df.info()
df.columns
df.nunique()
df.region.unique()
# i have data of world that is not necessary for me
df = df[df["region"] != "World"]
df.shape
import matplotlib.pyplot as plt
import seaborn as sn
sn.heatmap(df.isnull())
df.describe()
plt.figure(figsize=(20, 20))
plt.subplot(3, 2, 1)
sn.barplot(x=df.powertrain, y=df.value)
plt.title("Powertrain by Sale")
plt.subplot(3, 2, 2)
sn.countplot(x=df.powertrain)
plt.title("Depands of Powertrain type")
plt.subplot(3, 2, 3)
powertrain = df["powertrain"].value_counts()
plt.pie(
data=powertrain,
x=powertrain.values,
labels=powertrain.index,
shadow=True,
explode=(0.1, 0),
)
plt.title("Year by Depand of product")
plt.subplot(3, 2, 4)
plt.plot(df.year, df.value, "r")
plt.xlabel("Year")
plt.ylabel("Price")
plt.title("Year by sale")
plt.subplot(3, 2, 5)
sn.countplot(x=df.region)
plt.xticks(rotation=90)
plt.title("Depand by Region")
plt.subplot(3, 2, 6)
sn.barplot(x=df.region, y=df.value)
plt.xticks(rotation=90)
plt.title("Sale by region")
plt.show()
# Build a Model
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import r2_score
# split dataset into x and y
x = df.loc[:, ("region", "powertrain", "year")].values
y = df.iloc[:, -1].values
# use label Encoder
lab = LabelEncoder()
x[:, 0] = lab.fit_transform(x[:, 0])
x[:, 1] = lab.fit_transform(x[:, 1])
x[:, 2] = lab.fit_transform(x[:, 2])
# split data into train and test dataset
x_train, x_test, y_train, y_test = tts(x, y, test_size=0.1)
# create model
model = LinearRegression()
model.fit(x_train, y_train)
# check the accuracy of model
y_pred = model.predict(x_test)
r2_score(y_test, y_pred)
# the model is pretty good
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/540/129540805.ipynb
|
historic-sales-of-electric-vehicles
|
edsonmarin
|
[{"Id": 129540805, "ScriptId": 38517302, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5939826, "CreationDate": "05/14/2023 16:52:26", "VersionNumber": 1.0, "Title": "HistoricSalesOfElectricVehicles on eda ®ession", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 116.0, "LinesInsertedFromPrevious": 116.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185707468, "KernelVersionId": 129540805, "SourceDatasetVersionId": 5672792}]
|
[{"Id": 5672792, "DatasetId": 3261164, "DatasourceVersionId": 5748329, "CreatorUserId": 14462775, "LicenseName": "CC BY-SA 4.0", "CreationDate": "05/12/2023 22:35:04", "VersionNumber": 1.0, "Title": "Historic sales of electric vehicles", "Slug": "historic-sales-of-electric-vehicles", "Subtitle": "Historical sales of cars/buses/trucks/vans in the world", "Description": "The Global EV Outlook is an annual publication that identifies and discusses recent developments in electric mobility across the globe. It is developed with the support of the members of the Electric Vehicles Initiative (EVI).\n\nCombining historical analysis with projections to 2030, the report examines key areas of interest such as electric vehicle and charging infrastructure deployment, energy use, CO2 emissions, battery demand and related policy developments. The report includes policy recommendations that incorporate lessons learned from leading markets to inform policy makers and stakeholders with regard to policy frameworks and market systems for electric vehicle adoption.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3261164, "CreatorUserId": 14462775, "OwnerUserId": 14462775.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5672792.0, "CurrentDatasourceVersionId": 5748329.0, "ForumId": 3326757, "Type": 2, "CreationDate": "05/12/2023 22:35:04", "LastActivityDate": "05/12/2023", "TotalViews": 4659, "TotalDownloads": 891, "TotalVotes": 26, "TotalKernels": 4}]
|
[{"Id": 14462775, "UserName": "edsonmarin", "DisplayName": "Edson Marin", "RegisterDate": "04/02/2023", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# The dataset contains the following columns:
# region: country
# category: historical
# parameter: electric vehicle sales
# mode: cars
# powertrain: BEV, PHEV
# -year: 2011 - 2022
# -unit: vehicles
# -value: amount vehicles sales
df = pd.read_csv(
"/kaggle/input/historic-sales-of-electric-vehicles/IEA-EV-dataEV salesCarsHistorical.csv"
)
df.head()
# EDA
df.info()
df.columns
df.nunique()
df.region.unique()
# i have data of world that is not necessary for me
df = df[df["region"] != "World"]
df.shape
import matplotlib.pyplot as plt
import seaborn as sn
sn.heatmap(df.isnull())
df.describe()
plt.figure(figsize=(20, 20))
plt.subplot(3, 2, 1)
sn.barplot(x=df.powertrain, y=df.value)
plt.title("Powertrain by Sale")
plt.subplot(3, 2, 2)
sn.countplot(x=df.powertrain)
plt.title("Depands of Powertrain type")
plt.subplot(3, 2, 3)
powertrain = df["powertrain"].value_counts()
plt.pie(
data=powertrain,
x=powertrain.values,
labels=powertrain.index,
shadow=True,
explode=(0.1, 0),
)
plt.title("Year by Depand of product")
plt.subplot(3, 2, 4)
plt.plot(df.year, df.value, "r")
plt.xlabel("Year")
plt.ylabel("Price")
plt.title("Year by sale")
plt.subplot(3, 2, 5)
sn.countplot(x=df.region)
plt.xticks(rotation=90)
plt.title("Depand by Region")
plt.subplot(3, 2, 6)
sn.barplot(x=df.region, y=df.value)
plt.xticks(rotation=90)
plt.title("Sale by region")
plt.show()
# Build a Model
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split as tts
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import r2_score
# split dataset into x and y
x = df.loc[:, ("region", "powertrain", "year")].values
y = df.iloc[:, -1].values
# use label Encoder
lab = LabelEncoder()
x[:, 0] = lab.fit_transform(x[:, 0])
x[:, 1] = lab.fit_transform(x[:, 1])
x[:, 2] = lab.fit_transform(x[:, 2])
# split data into train and test dataset
x_train, x_test, y_train, y_test = tts(x, y, test_size=0.1)
# create model
model = LinearRegression()
model.fit(x_train, y_train)
# check the accuracy of model
y_pred = model.predict(x_test)
r2_score(y_test, y_pred)
# the model is pretty good
| false | 1 | 977 | 2 | 1,143 | 977 |
||
129540734
|
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import RandomizedSearchCV
from skopt import BayesSearchCV
from skopt.space import Real, Integer, Categorical
from tqdm.notebook import tqdm
import plotly.express as px
plt.style.use("seaborn-colorblind")
df = pd.read_csv("/kaggle/input/asl-signs/train.csv")
sub_df = df[df["sign"].isin(["cat", "bug"])]
sub_df
# # **EDA : Exploratory Data Analysis**
sub_df.to_csv("sub_df.csv", index=False)
# number of unique signs
sub_df["sign"].value_counts()
sub_df["sign"].value_counts().head(30).sort_values().plot(
kind="barh", figsize=(8, 6), title="Top 30 signs of train data"
)
plt.xlabel("NO. of training samples")
plt.ylabel("Signs")
# This code snippet generates a horizontal bar plot that displays the frequency count of the top 30 sign classes in the dataset. The plot is created using the plot() method from Pandas, with the argument kind="barh" to specify the plot type as a horizontal bar plot. The plot size is specified using figsize=(8, 6). The plot title is set using title="Top 30 signs of train data". The x and y-axis labels are set using plt.xlabel("NO. of training samples") and plt.ylabel("Signs"), respectively. The value_counts() method is used to count the frequency of each unique sign class, head(30) is used to select the top 30 sign classes by frequency, and sort_values() is used to sort the sign classes in ascending order by frequency.
sub_df["sign"].value_counts().tail(30).sort_values().plot(
kind="barh", figsize=(8, 6), title="bottom 30 signs of train data"
)
plt.xlabel("NO. of training samples")
plt.ylabel("Signs")
# The second code snippet generates a similar plot, but this time for the bottom 30 sign classes in the dataset. The tail(30) method is used to select the bottom 30 sign classes by frequency.
# Since the bug have less training samples compared to cat, we can do **Data Augmentation** to generate more data !
# # Analysing Single Parquet file
# **for sign = "cat"**
p1 = sub_df.query("sign == 'cat'")["path"].iloc[0]
p1
root_dir = "/kaggle/input/asl-signs/"
p1_file = pd.read_parquet(root_dir + p1)
frames = p1_file["frame"]
types = p1_file["type"]
print("frame:\n", frames.value_counts())
print(f"this file has {frames.nunique()} unique frames \n")
print("type:\n", types.value_counts())
print(f"this file has {types.nunique()} unique types")
# This code reads the "frame" and "type" columns from the "p1_file" dataframe and prints some information about the unique values and their counts in each column.
# The first line assigns the "frame" column to the "frames" variable and the "type" column to the "types" variable.
# The second line prints the counts of each unique value in the "frames" column using the "value_counts()" method. This provides an overview of how frequently each unique value appears in the "frames" column.
# The third line prints the number of unique values in the "frames" column using the "nunique()" method.
# The fourth line prints the counts of each unique value in the "types" column using the "value_counts()" method.
# The fifth line prints the number of unique values in the "types" column using the "nunique()" method.
# The output is showing the number of occurrences of each frame and type in the subset of data that contains only the "cat" sign.
# For the "frame" column, there are 11 unique frames (frame numbers 21 to 31), and each frame has 543 samples.
# For the "type" column, there are 4 unique types: "face", "pose", "left_hand", and "right_hand". The "face" type is the most frequent with 5148 samples, followed by "pose" with 363 samples, "left_hand" with 231 samples, and "right_hand" with 231 samples.
# The "frame" column represents the frame number in the raw video where the landmark data was extracted, and it can be used to identify the time when the sign was performed by the candidate. In this case, the sub dataset only includes landmark data for signs of cats, so the output is showing the number of samples in each of the 11 frames where the cat sign was performed.
# # Metadata for training
# Counting the number of occurrences of each type of hand pose in the selected ASL sign language gesture file (specified by the p1 variable) {for cat and bug}
p1_file["type"].value_counts()
# Counting the number of occurrences of each type of hand pose in the selected ASL sign language gesture file that has non-null values for the x, y, and z coordinates of each hand landmark.
p1_file.dropna(subset=["x", "y", "z"])["type"].value_counts()
# The following loop is then used to iterate through each row of the selected subset of the ASL sign language dataset (sub_df) and for each row, the following actions are performed:
# 1. Reading in the corresponding ASL sign language gesture file specified by the path column of the current row.
# 2. Counting the number of non-null x, y, and z coordinates for each hand pose type in the current file using the dropna method and creating a dictionary (meta) that stores this information along with the number of unique frames in the file.
# 3. Calculating summary statistics (minimum, maximum, and mean) for the x, y, and z coordinates of each hand landmark in the current file and storing these statistics in the meta dictionary.
# 4. Storing the meta dictionary in another dictionary (metadata) with the current file's path as the key.
new_data = []
for i, d in tqdm(sub_df.iterrows(), total=len(sub_df)):
file_path = d["path"]
parquet_file = pd.read_parquet(root_dir + file_path)
new_data.append(parquet_file)
df = pd.concat(new_data) # Combine all Parquet files into one DataFrame
grouped_data = (
df.groupby(["type", "landmark_index"]).mean().reset_index()
) # Groupby 'type' and 'landmark' and calculate mean
grouped_data
X = grouped_data[["x", "y", "z"]]
y = grouped_data["type"]
# Calculate the number of labeled samples
n_labeled = int(0.01 * len(grouped_data)) # 1% of the total samples
# Calculate the number of unlabeled samples
n_unlabeled = len(grouped_data) - n_labeled
indices = np.arange(len(X))
rng = np.random.RandomState(42)
rng.shuffle(indices)
X_labeled = X.iloc[indices[:n_labeled]]
y_labeled = y.iloc[indices[:n_labeled]]
X_unlabeled = X.iloc[indices[n_labeled:]]
y_unlabeled = y.iloc[indices[n_labeled:]]
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
n_iterations = 10
n_samples_per_iter = 10
train_accuracy_list = []
test_accuracy_list = []
# Split the unlabeled dataset into a test set and a smaller unlabeled set
X_test, X_unlabeled, y_test, y_unlabeled = train_test_split(
X_unlabeled, y_unlabeled, test_size=0.3
)
# # Active Learning usign SVM
# These code blocks are used to create and train a Support Vector Machine (SVM) classifier.
# The first line creates an instance of an SVM classifier with the "probability" parameter set to "True". This means that the classifier will be able to output probability estimates for each class in addition to the predicted class.
# The second line trains the classifier using the labeled dataset X_labeled and y_labeled. This is done using the "fit" method, which fits the SVM to the data and determines the decision boundary that separates the different classes. Once the SVM is trained, it can be used to make predictions on new, unlabeled data.
# Create an SVM classifier
clf = SVC(probability=True)
# Train the classifier on the initial labeled set
clf.fit(X_labeled, y_labeled)
# These code blocks are used to create a pipeline that includes a StandardScaler for feature scaling, an SVM classifier with RBF kernel, and L2 regularization. The pipeline is then used for hyperparameter tuning using GridSearchCV.
# The first line creates the pipeline using the "make_pipeline" function from Scikit-Learn. The pipeline includes a StandardScaler, which scales the features to have zero mean and unit variance, and an SVM classifier with an RBF kernel. The "probability" parameter is set to "True" to allow for probability estimates, and the "class_weight" parameter is set to "balanced" to account for class imbalance. The "random_state" parameter is set to 42 for reproducibility.
# The second line sets up the parameter grid for the hyperparameter tuning using GridSearchCV. The "C" parameter and "gamma" parameter are both varied over a range of values to find the optimal combination of hyperparameters.
# The third line creates a GridSearchCV object, which takes the pipeline and parameter grid as input. The "cv" parameter is set to 5 for 5-fold cross-validation. The GridSearchCV object then searches over the specified hyperparameter space using cross-validation to determine the optimal hyperparameters for the SVM classifier in the pipeline.
# Create a pipeline with L2 regularization and SVM classifier
pipeline = make_pipeline(
StandardScaler(),
SVC(kernel="rbf", probability=True, class_weight="balanced", random_state=42),
)
# Set up the parameter grid for grid search
param_grid = {
"svc__C": [0.001, 0.01, 0.1, 1, 10, 100],
"svc__gamma": [0.001, 0.01, 0.1, 1, 10, 100],
}
# Create the grid search object
grid_search = GridSearchCV(pipeline, param_grid=param_grid, cv=2)
# These code blocks define two functions for measuring uncertainty in a set of predicted probabilities.
# The first function, "least_confident", takes a matrix of predicted probabilities as input and returns an array of values that represent the least confident prediction for each instance in the input. This is done by taking the maximum probability for each instance and subtracting it from 1, which gives the probability of the least confident prediction.
# The second function, "entropy", also takes a matrix of predicted probabilities as input and returns an array of values that represent the entropy of the predicted probabilities for each instance in the input. This is done by taking the sum of the product of each probability and its logarithm (with a small epsilon added to avoid numerical instability), and negating the result. The intuition behind this measure is that higher entropy indicates greater uncertainty in the predicted probabilities, since the probabilities are more evenly spread across the classes.
def least_confident(proba):
return 1 - np.max(proba, axis=1)
def entropy(proba):
return -np.sum(proba * np.log2(proba + 1e-10), axis=1)
# This code block defines a function that implements an active learning loop using a specified query strategy. The function takes as input the following:
# - The query strategy to use (which should be a function that takes a matrix of predicted probabilities and returns an array of uncertainty scores).
# - The initial labeled set (as X_labeled and y_labeled).
# - The initial unlabeled set (as X_unlabeled and y_unlabeled).
# - The test set (as X_test and y_test).
# - The number of iterations to run (as n_iterations).
# - The number of samples to select per iteration (as n_samples_per_iter).
# The function then iteratively selects the n_samples_per_iter samples from the unlabeled set with the highest uncertainty scores using the specified query strategy. It then adds these samples to the labeled set and retrains the classifier on the updated labeled set. After each iteration, the function calculates the accuracy of the classifier on both the training set and the test set and stores these values in lists for plotting. If there are no more unlabeled samples to select, the function stops early and prints a message.
# Finally, the function plots the accuracy values for each iteration of the active learning loop.
# It should be noted that this code block assumes that the grid_search and clf objects have already been defined and that the train_accuracy_list and test_accuracy_list arrays have already been initialized outside of the function.
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
if len(X_unlabeled) == 0:
print("No more unlabeled samples to select.")
break
# Train the classifier on the initial labeled set
grid_search.fit(X_labeled, y_labeled)
# Get the best estimator from the grid search
clf = grid_search.best_estimator_
# Predict the labels and probabilities for the test and unlabeled sets
y_pred_test = clf.predict(X_test)
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
# Calculate uncertainty scores using the least confident method
uncertainty_scores = 1 - np.max(y_proba_unlabeled, axis=1)
# Select samples with the highest uncertainty scores
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# Add selected samples to the labeled set
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# Remove selected samples from the unlabeled set
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
# Retrain the classifier on the updated labeled set
clf.fit(X_labeled, y_labeled)
# Check the performance of the classifier on the training set
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
# Check the performance of the classifier on the test set
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
print(
f"Iteration {i + 1}: Percentage of Labeled Data = {percentage_labeled:.2%}, Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list_svm = []
test_accuracy_list_svm = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list_svm,
)
test_accuracy_list_1_svm = test_accuracy_list_svm.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list_svm = []
test_accuracy_list_svm = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list_svm,
)
test_accuracy_list_2_svm = test_accuracy_list_svm.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1_svm)
std_test_accuracy_1 = np.std(test_accuracy_list_1_svm)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2_svm)
std_test_accuracy_2 = np.std(test_accuracy_list_2_svm)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1_svm))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1_svm, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2_svm, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
# # Active Learning with RandomForestClassifier
# This code block creates a `RandomForestClassifier` object and trains it on the initial labeled data `X_labeled` and `y_labeled` using the `fit` method. The `RandomForestClassifier` is an ensemble learning method that combines multiple decision trees to produce a more accurate and robust model. The `random_state` parameter is set to 42 to ensure reproducibility of the results.
# Create a Random Forest classifier
clf = RandomForestClassifier(random_state=42)
# Train the classifier on the initial labeled set
clf.fit(X_labeled, y_labeled)
# This code block creates a pipeline object that chains two transformers: `StandardScaler` and `RandomForestClassifier` using the `make_pipeline` function.
# The `StandardScaler` is used to standardize the input data by subtracting the mean and dividing by the standard deviation. The `RandomForestClassifier` is an ensemble learning method that combines multiple decision trees to produce a more accurate and robust model. The `random_state` parameter is set to 42 to ensure reproducibility of the results.
# The `param_grid` dictionary specifies the hyperparameters to be tuned using grid search cross-validation. The `n_estimators` hyperparameter controls the number of decision trees in the forest, and the `max_depth` hyperparameter controls the maximum depth of each tree.
# The `GridSearchCV` object is created to perform the grid search with 5-fold cross-validation, and it will search over all combinations of hyperparameters specified in `param_grid`.
# Create a pipeline with StandardScaler and Random Forest classifier
pipeline = make_pipeline(StandardScaler(), RandomForestClassifier(random_state=42))
# Set up the parameter grid for grid search
param_grid = {
"randomforestclassifier__n_estimators": [10, 50, 100, 200, 500],
"randomforestclassifier__max_depth": [None, 5, 10, 20, 50],
}
# Create the grid search object
grid_search = GridSearchCV(pipeline, param_grid=param_grid, cv=2)
# Here we are again with the active learning loop, but this time we replace the SVM classifier with the RandomForestClassifier
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
if len(X_unlabeled) == 0:
print("No more unlabeled samples to select.")
break
# Train the classifier on the initial labeled set
grid_search.fit(X_labeled, y_labeled)
# Get the best estimator from the grid search
clf = grid_search.best_estimator_
# Predict the labels and probabilities for the test and unlabeled sets
y_pred_test = clf.predict(X_test)
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
# Calculate uncertainty scores using the least confident method
uncertainty_scores = 1 - np.max(y_proba_unlabeled, axis=1)
# Select samples with the highest uncertainty scores
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# Add selected samples to the labeled set
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# Remove selected samples from the unlabeled set
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
# Retrain the classifier on the updated labeled set
clf.fit(X_labeled, y_labeled)
# Check the performance of the classifier on the training set
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
# Check the performance of the classifier on the test set
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
print(
f"Iteration {i + 1}: Percentage of Labeled Data = {percentage_labeled:.2%}, Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Here's the call for the active learning loop for both queries : 'entropy' and 'least_confident'
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_1 = test_accuracy_list.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_2 = test_accuracy_list.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1)
std_test_accuracy_1 = np.std(test_accuracy_list_1)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2)
std_test_accuracy_2 = np.std(test_accuracy_list_2)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
# # Active Learning with Neural Networks (MLPClassifier)
# **MLPClassifier** is a class in the scikit-learn library used for implementing a Multi-layer Perceptron (MLP) neural network. An MLP is a feedforward artificial neural network that is commonly used for classification tasks. The MLPClassifier allows the user to specify various hyperparameters such as the number of hidden layers, the number of neurons in each hidden layer, the activation function, the solver for weight optimization, and regularization parameters. It uses backpropagation to train the network by adjusting the weights to minimize the error between the predicted and actual outputs. Once trained, the MLP can be used to predict the class labels of new data.
# This is a function definition for an active learning loop that performs iterative model training and query selection to improve the accuracy of a machine learning model. The function takes in several arguments:
# - `query_strategy`: a string indicating the method for selecting the most informative samples to label at each iteration. The options are 'least_confident' or 'entropy'.
# - `X_labeled`: a pandas dataframe containing the features of the labeled data.
# - `y_labeled`: a pandas series containing the labels of the labeled data.
# - `X_unlabeled`: a pandas dataframe containing the features of the unlabeled data.
# - `y_unlabeled`: a pandas series containing the labels of the unlabeled data.
# - `X_test`: a pandas dataframe containing the features of the test data.
# - `y_test`: a pandas series containing the labels of the test data.
# - `n_iterations`: an integer indicating the number of iterations to run the active learning loop.
# - `n_samples_per_iter`: an integer indicating the number of samples to select and label at each iteration.
# The function uses a multi-layer perceptron (MLP) classifier with a grid search over hyperparameters to fit the model to the labeled data at each iteration. It then uses the chosen query strategy to select the most informative samples from the unlabeled data and adds them to the labeled data for the next iteration. The accuracy scores of the model on the training and test data are calculated and stored for each iteration, and the function returns the lists of these accuracy scores, as well as the best test accuracy score and hyperparameters found during the iterations.
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# train_accuracy_list = []
# test_accuracy_list = []
best_score = 0
best_params = None
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
clf = MLPClassifier(random_state=1, max_iter=1000)
# Define the parameter grid to search over
param_grid = {
"hidden_layer_sizes": [(32,), (64,), (128,), (32, 16), (64, 32), (128, 64)],
"activation": ["relu", "tanh"],
"solver": ["adam", "sgd"],
"alpha": [0.0001, 0.001, 0.01, 0.1],
"learning_rate": ["constant", "adaptive"],
}
# perform a grid search over the hyperparameter grid
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(
clf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=1
)
grid_search.fit(X_labeled, y_labeled)
# get the best model and its test accuracy score
clf = grid_search.best_estimator_
test_score = clf.score(X_test, y_test)
# update the best model if it has the highest test accuracy score so far
if test_score > best_score:
best_score = test_score
best_params = grid_search.best_params_
# fit the model to the labeled data
clf.fit(X_labeled, y_labeled)
# use the query strategy to select the most informative samples
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
if query_strategy == "least_confident":
uncertainty_scores = least_confident(y_proba_unlabeled)
elif query_strategy == "entropy":
uncertainty_scores = entropy(y_proba_unlabeled)
else:
raise ValueError("Invalid query strategy")
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# add the selected samples to the labeled data
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# remove the selected samples from the unlabeled data
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
# calculate and store the accuracy scores for the training and test data
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
# print the iteration number and accuracy scores
print(
f"Iteration {i + 1}: Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
# Return the accuracy lists after all iterations are completed
# return train_accuracy_list, test_accuracy_list, best_score, best_params
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_1 = test_accuracy_list.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_2 = test_accuracy_list.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1)
std_test_accuracy_1 = np.std(test_accuracy_list_1)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2)
std_test_accuracy_2 = np.std(test_accuracy_list_2)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/540/129540734.ipynb
| null | null |
[{"Id": 129540734, "ScriptId": 38484153, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5908714, "CreationDate": "05/14/2023 16:51:47", "VersionNumber": 1.0, "Title": "Isolated Sign Language Recognition-Active Learning", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 627.0, "LinesInsertedFromPrevious": 627.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import make_pipeline
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import RandomizedSearchCV
from skopt import BayesSearchCV
from skopt.space import Real, Integer, Categorical
from tqdm.notebook import tqdm
import plotly.express as px
plt.style.use("seaborn-colorblind")
df = pd.read_csv("/kaggle/input/asl-signs/train.csv")
sub_df = df[df["sign"].isin(["cat", "bug"])]
sub_df
# # **EDA : Exploratory Data Analysis**
sub_df.to_csv("sub_df.csv", index=False)
# number of unique signs
sub_df["sign"].value_counts()
sub_df["sign"].value_counts().head(30).sort_values().plot(
kind="barh", figsize=(8, 6), title="Top 30 signs of train data"
)
plt.xlabel("NO. of training samples")
plt.ylabel("Signs")
# This code snippet generates a horizontal bar plot that displays the frequency count of the top 30 sign classes in the dataset. The plot is created using the plot() method from Pandas, with the argument kind="barh" to specify the plot type as a horizontal bar plot. The plot size is specified using figsize=(8, 6). The plot title is set using title="Top 30 signs of train data". The x and y-axis labels are set using plt.xlabel("NO. of training samples") and plt.ylabel("Signs"), respectively. The value_counts() method is used to count the frequency of each unique sign class, head(30) is used to select the top 30 sign classes by frequency, and sort_values() is used to sort the sign classes in ascending order by frequency.
sub_df["sign"].value_counts().tail(30).sort_values().plot(
kind="barh", figsize=(8, 6), title="bottom 30 signs of train data"
)
plt.xlabel("NO. of training samples")
plt.ylabel("Signs")
# The second code snippet generates a similar plot, but this time for the bottom 30 sign classes in the dataset. The tail(30) method is used to select the bottom 30 sign classes by frequency.
# Since the bug have less training samples compared to cat, we can do **Data Augmentation** to generate more data !
# # Analysing Single Parquet file
# **for sign = "cat"**
p1 = sub_df.query("sign == 'cat'")["path"].iloc[0]
p1
root_dir = "/kaggle/input/asl-signs/"
p1_file = pd.read_parquet(root_dir + p1)
frames = p1_file["frame"]
types = p1_file["type"]
print("frame:\n", frames.value_counts())
print(f"this file has {frames.nunique()} unique frames \n")
print("type:\n", types.value_counts())
print(f"this file has {types.nunique()} unique types")
# This code reads the "frame" and "type" columns from the "p1_file" dataframe and prints some information about the unique values and their counts in each column.
# The first line assigns the "frame" column to the "frames" variable and the "type" column to the "types" variable.
# The second line prints the counts of each unique value in the "frames" column using the "value_counts()" method. This provides an overview of how frequently each unique value appears in the "frames" column.
# The third line prints the number of unique values in the "frames" column using the "nunique()" method.
# The fourth line prints the counts of each unique value in the "types" column using the "value_counts()" method.
# The fifth line prints the number of unique values in the "types" column using the "nunique()" method.
# The output is showing the number of occurrences of each frame and type in the subset of data that contains only the "cat" sign.
# For the "frame" column, there are 11 unique frames (frame numbers 21 to 31), and each frame has 543 samples.
# For the "type" column, there are 4 unique types: "face", "pose", "left_hand", and "right_hand". The "face" type is the most frequent with 5148 samples, followed by "pose" with 363 samples, "left_hand" with 231 samples, and "right_hand" with 231 samples.
# The "frame" column represents the frame number in the raw video where the landmark data was extracted, and it can be used to identify the time when the sign was performed by the candidate. In this case, the sub dataset only includes landmark data for signs of cats, so the output is showing the number of samples in each of the 11 frames where the cat sign was performed.
# # Metadata for training
# Counting the number of occurrences of each type of hand pose in the selected ASL sign language gesture file (specified by the p1 variable) {for cat and bug}
p1_file["type"].value_counts()
# Counting the number of occurrences of each type of hand pose in the selected ASL sign language gesture file that has non-null values for the x, y, and z coordinates of each hand landmark.
p1_file.dropna(subset=["x", "y", "z"])["type"].value_counts()
# The following loop is then used to iterate through each row of the selected subset of the ASL sign language dataset (sub_df) and for each row, the following actions are performed:
# 1. Reading in the corresponding ASL sign language gesture file specified by the path column of the current row.
# 2. Counting the number of non-null x, y, and z coordinates for each hand pose type in the current file using the dropna method and creating a dictionary (meta) that stores this information along with the number of unique frames in the file.
# 3. Calculating summary statistics (minimum, maximum, and mean) for the x, y, and z coordinates of each hand landmark in the current file and storing these statistics in the meta dictionary.
# 4. Storing the meta dictionary in another dictionary (metadata) with the current file's path as the key.
new_data = []
for i, d in tqdm(sub_df.iterrows(), total=len(sub_df)):
file_path = d["path"]
parquet_file = pd.read_parquet(root_dir + file_path)
new_data.append(parquet_file)
df = pd.concat(new_data) # Combine all Parquet files into one DataFrame
grouped_data = (
df.groupby(["type", "landmark_index"]).mean().reset_index()
) # Groupby 'type' and 'landmark' and calculate mean
grouped_data
X = grouped_data[["x", "y", "z"]]
y = grouped_data["type"]
# Calculate the number of labeled samples
n_labeled = int(0.01 * len(grouped_data)) # 1% of the total samples
# Calculate the number of unlabeled samples
n_unlabeled = len(grouped_data) - n_labeled
indices = np.arange(len(X))
rng = np.random.RandomState(42)
rng.shuffle(indices)
X_labeled = X.iloc[indices[:n_labeled]]
y_labeled = y.iloc[indices[:n_labeled]]
X_unlabeled = X.iloc[indices[n_labeled:]]
y_unlabeled = y.iloc[indices[n_labeled:]]
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
n_iterations = 10
n_samples_per_iter = 10
train_accuracy_list = []
test_accuracy_list = []
# Split the unlabeled dataset into a test set and a smaller unlabeled set
X_test, X_unlabeled, y_test, y_unlabeled = train_test_split(
X_unlabeled, y_unlabeled, test_size=0.3
)
# # Active Learning usign SVM
# These code blocks are used to create and train a Support Vector Machine (SVM) classifier.
# The first line creates an instance of an SVM classifier with the "probability" parameter set to "True". This means that the classifier will be able to output probability estimates for each class in addition to the predicted class.
# The second line trains the classifier using the labeled dataset X_labeled and y_labeled. This is done using the "fit" method, which fits the SVM to the data and determines the decision boundary that separates the different classes. Once the SVM is trained, it can be used to make predictions on new, unlabeled data.
# Create an SVM classifier
clf = SVC(probability=True)
# Train the classifier on the initial labeled set
clf.fit(X_labeled, y_labeled)
# These code blocks are used to create a pipeline that includes a StandardScaler for feature scaling, an SVM classifier with RBF kernel, and L2 regularization. The pipeline is then used for hyperparameter tuning using GridSearchCV.
# The first line creates the pipeline using the "make_pipeline" function from Scikit-Learn. The pipeline includes a StandardScaler, which scales the features to have zero mean and unit variance, and an SVM classifier with an RBF kernel. The "probability" parameter is set to "True" to allow for probability estimates, and the "class_weight" parameter is set to "balanced" to account for class imbalance. The "random_state" parameter is set to 42 for reproducibility.
# The second line sets up the parameter grid for the hyperparameter tuning using GridSearchCV. The "C" parameter and "gamma" parameter are both varied over a range of values to find the optimal combination of hyperparameters.
# The third line creates a GridSearchCV object, which takes the pipeline and parameter grid as input. The "cv" parameter is set to 5 for 5-fold cross-validation. The GridSearchCV object then searches over the specified hyperparameter space using cross-validation to determine the optimal hyperparameters for the SVM classifier in the pipeline.
# Create a pipeline with L2 regularization and SVM classifier
pipeline = make_pipeline(
StandardScaler(),
SVC(kernel="rbf", probability=True, class_weight="balanced", random_state=42),
)
# Set up the parameter grid for grid search
param_grid = {
"svc__C": [0.001, 0.01, 0.1, 1, 10, 100],
"svc__gamma": [0.001, 0.01, 0.1, 1, 10, 100],
}
# Create the grid search object
grid_search = GridSearchCV(pipeline, param_grid=param_grid, cv=2)
# These code blocks define two functions for measuring uncertainty in a set of predicted probabilities.
# The first function, "least_confident", takes a matrix of predicted probabilities as input and returns an array of values that represent the least confident prediction for each instance in the input. This is done by taking the maximum probability for each instance and subtracting it from 1, which gives the probability of the least confident prediction.
# The second function, "entropy", also takes a matrix of predicted probabilities as input and returns an array of values that represent the entropy of the predicted probabilities for each instance in the input. This is done by taking the sum of the product of each probability and its logarithm (with a small epsilon added to avoid numerical instability), and negating the result. The intuition behind this measure is that higher entropy indicates greater uncertainty in the predicted probabilities, since the probabilities are more evenly spread across the classes.
def least_confident(proba):
return 1 - np.max(proba, axis=1)
def entropy(proba):
return -np.sum(proba * np.log2(proba + 1e-10), axis=1)
# This code block defines a function that implements an active learning loop using a specified query strategy. The function takes as input the following:
# - The query strategy to use (which should be a function that takes a matrix of predicted probabilities and returns an array of uncertainty scores).
# - The initial labeled set (as X_labeled and y_labeled).
# - The initial unlabeled set (as X_unlabeled and y_unlabeled).
# - The test set (as X_test and y_test).
# - The number of iterations to run (as n_iterations).
# - The number of samples to select per iteration (as n_samples_per_iter).
# The function then iteratively selects the n_samples_per_iter samples from the unlabeled set with the highest uncertainty scores using the specified query strategy. It then adds these samples to the labeled set and retrains the classifier on the updated labeled set. After each iteration, the function calculates the accuracy of the classifier on both the training set and the test set and stores these values in lists for plotting. If there are no more unlabeled samples to select, the function stops early and prints a message.
# Finally, the function plots the accuracy values for each iteration of the active learning loop.
# It should be noted that this code block assumes that the grid_search and clf objects have already been defined and that the train_accuracy_list and test_accuracy_list arrays have already been initialized outside of the function.
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
if len(X_unlabeled) == 0:
print("No more unlabeled samples to select.")
break
# Train the classifier on the initial labeled set
grid_search.fit(X_labeled, y_labeled)
# Get the best estimator from the grid search
clf = grid_search.best_estimator_
# Predict the labels and probabilities for the test and unlabeled sets
y_pred_test = clf.predict(X_test)
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
# Calculate uncertainty scores using the least confident method
uncertainty_scores = 1 - np.max(y_proba_unlabeled, axis=1)
# Select samples with the highest uncertainty scores
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# Add selected samples to the labeled set
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# Remove selected samples from the unlabeled set
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
# Retrain the classifier on the updated labeled set
clf.fit(X_labeled, y_labeled)
# Check the performance of the classifier on the training set
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
# Check the performance of the classifier on the test set
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
print(
f"Iteration {i + 1}: Percentage of Labeled Data = {percentage_labeled:.2%}, Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list_svm = []
test_accuracy_list_svm = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list_svm,
)
test_accuracy_list_1_svm = test_accuracy_list_svm.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list_svm = []
test_accuracy_list_svm = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list_svm,
)
test_accuracy_list_2_svm = test_accuracy_list_svm.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1_svm)
std_test_accuracy_1 = np.std(test_accuracy_list_1_svm)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2_svm)
std_test_accuracy_2 = np.std(test_accuracy_list_2_svm)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1_svm))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1_svm, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2_svm, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
# # Active Learning with RandomForestClassifier
# This code block creates a `RandomForestClassifier` object and trains it on the initial labeled data `X_labeled` and `y_labeled` using the `fit` method. The `RandomForestClassifier` is an ensemble learning method that combines multiple decision trees to produce a more accurate and robust model. The `random_state` parameter is set to 42 to ensure reproducibility of the results.
# Create a Random Forest classifier
clf = RandomForestClassifier(random_state=42)
# Train the classifier on the initial labeled set
clf.fit(X_labeled, y_labeled)
# This code block creates a pipeline object that chains two transformers: `StandardScaler` and `RandomForestClassifier` using the `make_pipeline` function.
# The `StandardScaler` is used to standardize the input data by subtracting the mean and dividing by the standard deviation. The `RandomForestClassifier` is an ensemble learning method that combines multiple decision trees to produce a more accurate and robust model. The `random_state` parameter is set to 42 to ensure reproducibility of the results.
# The `param_grid` dictionary specifies the hyperparameters to be tuned using grid search cross-validation. The `n_estimators` hyperparameter controls the number of decision trees in the forest, and the `max_depth` hyperparameter controls the maximum depth of each tree.
# The `GridSearchCV` object is created to perform the grid search with 5-fold cross-validation, and it will search over all combinations of hyperparameters specified in `param_grid`.
# Create a pipeline with StandardScaler and Random Forest classifier
pipeline = make_pipeline(StandardScaler(), RandomForestClassifier(random_state=42))
# Set up the parameter grid for grid search
param_grid = {
"randomforestclassifier__n_estimators": [10, 50, 100, 200, 500],
"randomforestclassifier__max_depth": [None, 5, 10, 20, 50],
}
# Create the grid search object
grid_search = GridSearchCV(pipeline, param_grid=param_grid, cv=2)
# Here we are again with the active learning loop, but this time we replace the SVM classifier with the RandomForestClassifier
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
if len(X_unlabeled) == 0:
print("No more unlabeled samples to select.")
break
# Train the classifier on the initial labeled set
grid_search.fit(X_labeled, y_labeled)
# Get the best estimator from the grid search
clf = grid_search.best_estimator_
# Predict the labels and probabilities for the test and unlabeled sets
y_pred_test = clf.predict(X_test)
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
# Calculate uncertainty scores using the least confident method
uncertainty_scores = 1 - np.max(y_proba_unlabeled, axis=1)
# Select samples with the highest uncertainty scores
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# Add selected samples to the labeled set
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# Remove selected samples from the unlabeled set
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
X_unlabeled = X_unlabeled.reset_index(drop=True)
y_unlabeled = y_unlabeled.reset_index(drop=True)
# Retrain the classifier on the updated labeled set
clf.fit(X_labeled, y_labeled)
# Check the performance of the classifier on the training set
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
# Check the performance of the classifier on the test set
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
print(
f"Iteration {i + 1}: Percentage of Labeled Data = {percentage_labeled:.2%}, Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Here's the call for the active learning loop for both queries : 'entropy' and 'least_confident'
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_1 = test_accuracy_list.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_2 = test_accuracy_list.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1)
std_test_accuracy_1 = np.std(test_accuracy_list_1)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2)
std_test_accuracy_2 = np.std(test_accuracy_list_2)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
# # Active Learning with Neural Networks (MLPClassifier)
# **MLPClassifier** is a class in the scikit-learn library used for implementing a Multi-layer Perceptron (MLP) neural network. An MLP is a feedforward artificial neural network that is commonly used for classification tasks. The MLPClassifier allows the user to specify various hyperparameters such as the number of hidden layers, the number of neurons in each hidden layer, the activation function, the solver for weight optimization, and regularization parameters. It uses backpropagation to train the network by adjusting the weights to minimize the error between the predicted and actual outputs. Once trained, the MLP can be used to predict the class labels of new data.
# This is a function definition for an active learning loop that performs iterative model training and query selection to improve the accuracy of a machine learning model. The function takes in several arguments:
# - `query_strategy`: a string indicating the method for selecting the most informative samples to label at each iteration. The options are 'least_confident' or 'entropy'.
# - `X_labeled`: a pandas dataframe containing the features of the labeled data.
# - `y_labeled`: a pandas series containing the labels of the labeled data.
# - `X_unlabeled`: a pandas dataframe containing the features of the unlabeled data.
# - `y_unlabeled`: a pandas series containing the labels of the unlabeled data.
# - `X_test`: a pandas dataframe containing the features of the test data.
# - `y_test`: a pandas series containing the labels of the test data.
# - `n_iterations`: an integer indicating the number of iterations to run the active learning loop.
# - `n_samples_per_iter`: an integer indicating the number of samples to select and label at each iteration.
# The function uses a multi-layer perceptron (MLP) classifier with a grid search over hyperparameters to fit the model to the labeled data at each iteration. It then uses the chosen query strategy to select the most informative samples from the unlabeled data and adds them to the labeled data for the next iteration. The accuracy scores of the model on the training and test data are calculated and stored for each iteration, and the function returns the lists of these accuracy scores, as well as the best test accuracy score and hyperparameters found during the iterations.
def active_learning_loop(
query_strategy,
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
):
# train_accuracy_list = []
# test_accuracy_list = []
best_score = 0
best_params = None
# Calculate the initial percentage of labeled data
initial_percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
for i in range(n_iterations):
clf = MLPClassifier(random_state=1, max_iter=1000)
# Define the parameter grid to search over
param_grid = {
"hidden_layer_sizes": [(32,), (64,), (128,), (32, 16), (64, 32), (128, 64)],
"activation": ["relu", "tanh"],
"solver": ["adam", "sgd"],
"alpha": [0.0001, 0.001, 0.01, 0.1],
"learning_rate": ["constant", "adaptive"],
}
# perform a grid search over the hyperparameter grid
from sklearn.model_selection import GridSearchCV
grid_search = GridSearchCV(
clf, param_grid=param_grid, cv=2, n_jobs=-1, verbose=1
)
grid_search.fit(X_labeled, y_labeled)
# get the best model and its test accuracy score
clf = grid_search.best_estimator_
test_score = clf.score(X_test, y_test)
# update the best model if it has the highest test accuracy score so far
if test_score > best_score:
best_score = test_score
best_params = grid_search.best_params_
# fit the model to the labeled data
clf.fit(X_labeled, y_labeled)
# use the query strategy to select the most informative samples
y_proba_unlabeled = clf.predict_proba(X_unlabeled)
if query_strategy == "least_confident":
uncertainty_scores = least_confident(y_proba_unlabeled)
elif query_strategy == "entropy":
uncertainty_scores = entropy(y_proba_unlabeled)
else:
raise ValueError("Invalid query strategy")
selected_indices = np.argsort(-uncertainty_scores)[:n_samples_per_iter]
# add the selected samples to the labeled data
X_labeled = pd.concat([X_labeled, X_unlabeled.iloc[selected_indices]])
y_labeled = pd.concat([y_labeled, y_unlabeled.iloc[selected_indices]])
# remove the selected samples from the unlabeled data
X_unlabeled = X_unlabeled.drop(X_unlabeled.index[selected_indices])
y_unlabeled = y_unlabeled.drop(y_unlabeled.index[selected_indices])
# calculate and store the accuracy scores for the training and test data
y_pred_train = clf.predict(X_labeled)
train_accuracy = accuracy_score(y_labeled, y_pred_train)
train_accuracy_list.append(train_accuracy)
y_pred_test = clf.predict(X_test)
test_accuracy = accuracy_score(y_test, y_pred_test)
test_accuracy_list.append(test_accuracy)
# Calculate the percentage of labeled data
percentage_labeled = len(X_labeled) / (len(X_labeled) + len(X_unlabeled))
# print the iteration number and accuracy scores
print(
f"Iteration {i + 1}: Train Accuracy = {train_accuracy:.2f}, Test Accuracy = {test_accuracy:.2f}"
)
# Return the accuracy lists after all iterations are completed
# return train_accuracy_list, test_accuracy_list, best_score, best_params
if len(X_unlabeled) > 0:
print(
"Stopped before all iterations completed: No more unlabeled samples to select."
)
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
# Call the active_learning_loop function for each query strategy
active_learning_loop(
"entropy",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_1 = test_accuracy_list.copy()
# Reset the train_accuracy_list and test_accuracy_list before each call
train_accuracy_list = []
test_accuracy_list = []
active_learning_loop(
"least_confident",
X_labeled,
y_labeled,
X_unlabeled,
y_unlabeled,
X_test,
y_test,
n_iterations,
n_samples_per_iter,
test_accuracy_list,
)
test_accuracy_list_2 = test_accuracy_list.copy()
# Calculate mean and standard deviation for each strategy
mean_test_accuracy_1 = np.mean(test_accuracy_list_1)
std_test_accuracy_1 = np.std(test_accuracy_list_1)
mean_test_accuracy_2 = np.mean(test_accuracy_list_2)
std_test_accuracy_2 = np.std(test_accuracy_list_2)
# Create x-axis values for plotting
x_values = np.linspace(0, 100, num=len(test_accuracy_list_1))
# Create upper and lower bounds for shading
upper_entropy = mean_test_accuracy_1 + std_test_accuracy_1
lower_entropy = mean_test_accuracy_1 - std_test_accuracy_1
upper_least_confident = mean_test_accuracy_2 + std_test_accuracy_2
lower_least_confident = mean_test_accuracy_2 - std_test_accuracy_2
# Plot mean test accuracies and shade area between upper and lower bounds for each strategy
plt.plot(x_values, test_accuracy_list_1, label="Entropy")
plt.fill_between(x_values, lower_entropy, upper_entropy, alpha=0.2)
plt.plot(x_values, test_accuracy_list_2, label="Least Confident")
plt.fill_between(x_values, lower_least_confident, upper_least_confident, alpha=0.2)
# Set plot title, axis labels, and legend
plt.title("Active Learning Test Accuracy Comparison")
plt.xlabel("Percentage of Labeled Data")
plt.ylabel("Test Accuracy")
plt.legend()
# Show plot
plt.show()
| false | 0 | 8,679 | 3 | 8,679 | 8,679 |
||
129540274
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
# # Importing DataSets
df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
# # Exploring Data
df.head()
df.info()
df.describe()
# # Dropping the id column
df.drop(labels="id", axis=1, inplace=True)
# # Removing Outliers
def detect_outliers(df, features, thold):
outlier_indices = []
for fe in features:
# 1st quartile or 25%
q1 = np.percentile(df[fe], 25)
# 3rd quartile or 75%
q3 = np.percentile(df[fe], 75)
# IQR formula
IQR = q3 - q1
# Outlier step
outlier_step = IQR * thold
# Detect outlier and their indices
outlier_list = df[
(df[fe] < q1 - outlier_step) | (df[fe] > q3 + outlier_step)
].index
# store indices
outlier_indices.extend(outlier_list)
return outlier_indices
features = [
"clonesize",
"honeybee",
"bumbles",
"andrena",
"osmia",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"AverageRainingDays",
"fruitset",
"fruitmass",
"seeds",
]
outliers = detect_outliers(df, features, 1.5)
df = df.drop(df.loc[outliers].index, axis=0)
# # Spliting Data
X = df.drop("yield", axis=1)
y = df["yield"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# # Pre-processing the Data
minmax_scaler = MinMaxScaler()
X_train_normal = minmax_scaler.fit_transform(X_train)
X_test_normal = minmax_scaler.transform(X_test)
X_train_normal
# # Creating Model
tf.random.set_seed(42)
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(1),
]
)
model.compile(loss="mae", optimizer=tf.keras.optimizers.Adam(), metrics="mae")
history = model.fit(X_train_normal, y_train, epochs=50, verbose=0)
model.evaluate(X_test_normal, y_test)
pd.DataFrame(history.history).plot()
plt.xlabel("epochs")
plt.ylabel("loss")
# # Submission
df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
df_test_X = df_test.drop(labels="id", axis=1)
final_pred = model.predict(minmax_scaler.transform(df_test_X))
final_pred = final_pred.squeeze()
df_submission = pd.DataFrame({"id": df_test["id"], "yield": final_pred})
df_submission.to_csv("submission.csv", index=False)
sub = pd.read_csv("/kaggle/working/submission.csv")
sub.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/540/129540274.ipynb
| null | null |
[{"Id": 129540274, "ScriptId": 38517671, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7588642, "CreationDate": "05/14/2023 16:47:29", "VersionNumber": 1.0, "Title": "yield_tf", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 124.0, "LinesInsertedFromPrevious": 124.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
# # Importing DataSets
df = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
# # Exploring Data
df.head()
df.info()
df.describe()
# # Dropping the id column
df.drop(labels="id", axis=1, inplace=True)
# # Removing Outliers
def detect_outliers(df, features, thold):
outlier_indices = []
for fe in features:
# 1st quartile or 25%
q1 = np.percentile(df[fe], 25)
# 3rd quartile or 75%
q3 = np.percentile(df[fe], 75)
# IQR formula
IQR = q3 - q1
# Outlier step
outlier_step = IQR * thold
# Detect outlier and their indices
outlier_list = df[
(df[fe] < q1 - outlier_step) | (df[fe] > q3 + outlier_step)
].index
# store indices
outlier_indices.extend(outlier_list)
return outlier_indices
features = [
"clonesize",
"honeybee",
"bumbles",
"andrena",
"osmia",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"AverageRainingDays",
"fruitset",
"fruitmass",
"seeds",
]
outliers = detect_outliers(df, features, 1.5)
df = df.drop(df.loc[outliers].index, axis=0)
# # Spliting Data
X = df.drop("yield", axis=1)
y = df["yield"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# # Pre-processing the Data
minmax_scaler = MinMaxScaler()
X_train_normal = minmax_scaler.fit_transform(X_train)
X_test_normal = minmax_scaler.transform(X_test)
X_train_normal
# # Creating Model
tf.random.set_seed(42)
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(50),
tf.keras.layers.Dense(1),
]
)
model.compile(loss="mae", optimizer=tf.keras.optimizers.Adam(), metrics="mae")
history = model.fit(X_train_normal, y_train, epochs=50, verbose=0)
model.evaluate(X_test_normal, y_test)
pd.DataFrame(history.history).plot()
plt.xlabel("epochs")
plt.ylabel("loss")
# # Submission
df_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
df_test_X = df_test.drop(labels="id", axis=1)
final_pred = model.predict(minmax_scaler.transform(df_test_X))
final_pred = final_pred.squeeze()
df_submission = pd.DataFrame({"id": df_test["id"], "yield": final_pred})
df_submission.to_csv("submission.csv", index=False)
sub = pd.read_csv("/kaggle/working/submission.csv")
sub.head()
| false | 0 | 1,128 | 0 | 1,128 | 1,128 |
||
129540884
|
# ## 2.1 Построение модели
import pandas as pd
# __Загрузка набора данных, получившегося в результате первого модуля__
df = pd.read_csv("dataset.csv")
df.head()
# Удалю атрибут "Unnamed: 0", т.к. это дубликат индексов.
df = df.drop("Unnamed: 0", axis=1)
df.head()
df = df[["name", "prep_text", "target_code"]]
df.head()
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
# __Выбранные алгоритмы__
# Возможности методов классификации DecisionTreeClassifier, KNeighborsClassifier и LogisticRegression покрывают многочисленные задачи классификации. DecisionTreeClassifier может использоваться для решения задач с большим количеством категориальных признаков и небольшим количеством числовых признаков. KNeighborsClassifier может эффективно работать с данными нелинейной природы. LogisticRegression может обрабатывать данные как с двоичными (binary) так и с многоклассовыми (multiclass) метками.
# ### DecisionTreeClassifier
dtc = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("dtc", DecisionTreeClassifier()),
]
)
# __Разделим выборку на тестовую и обучающую__
# Разбиение выборки на тестовую и обучающую позволяет оценить производительность модели на новых данных, которые еще не использовались в процессе обучения. Обучающая выборка используется для обучения модели, а тестовая выборка используется для проверки работоспособности обученной модели на новых данных. Это позволяет обнаружить переобучение модели на обучающем наборе данных и понять, насколько точно и обобщающе работает модель. Кратко говоря, разбиение выборки на тестовую и обучающую является важным шагом в процессе построения модели машинного обучения.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df["prep_text"], df["target_code"], test_size=0.33, random_state=42
)
dtc.fit(X_train, y_train)
y_pred = dtc.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
from sklearn.preprocessing import LabelEncoder
from yellowbrick.features import RadViz
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from yellowbrick.classifier import ROCAUC
visualizer = ROCAUC(dtc)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
# ### KNeighborsClassifier
knn = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("knn", KNeighborsClassifier()),
]
)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
visualizer = ROCAUC(knn)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
# ### LogisticRegression
log = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("mul", LogisticRegression()),
]
)
log.fit(X_train, y_train)
y_pred = log.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
visualizer = ROCAUC(log)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/540/129540884.ipynb
| null | null |
[{"Id": 129540884, "ScriptId": 38519008, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 16:53:03", "VersionNumber": 1.0, "Title": "notebook6dd2576f55", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 108.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## 2.1 Построение модели
import pandas as pd
# __Загрузка набора данных, получившегося в результате первого модуля__
df = pd.read_csv("dataset.csv")
df.head()
# Удалю атрибут "Unnamed: 0", т.к. это дубликат индексов.
df = df.drop("Unnamed: 0", axis=1)
df.head()
df = df[["name", "prep_text", "target_code"]]
df.head()
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
# __Выбранные алгоритмы__
# Возможности методов классификации DecisionTreeClassifier, KNeighborsClassifier и LogisticRegression покрывают многочисленные задачи классификации. DecisionTreeClassifier может использоваться для решения задач с большим количеством категориальных признаков и небольшим количеством числовых признаков. KNeighborsClassifier может эффективно работать с данными нелинейной природы. LogisticRegression может обрабатывать данные как с двоичными (binary) так и с многоклассовыми (multiclass) метками.
# ### DecisionTreeClassifier
dtc = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("dtc", DecisionTreeClassifier()),
]
)
# __Разделим выборку на тестовую и обучающую__
# Разбиение выборки на тестовую и обучающую позволяет оценить производительность модели на новых данных, которые еще не использовались в процессе обучения. Обучающая выборка используется для обучения модели, а тестовая выборка используется для проверки работоспособности обученной модели на новых данных. Это позволяет обнаружить переобучение модели на обучающем наборе данных и понять, насколько точно и обобщающе работает модель. Кратко говоря, разбиение выборки на тестовую и обучающую является важным шагом в процессе построения модели машинного обучения.
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
df["prep_text"], df["target_code"], test_size=0.33, random_state=42
)
dtc.fit(X_train, y_train)
y_pred = dtc.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
from sklearn.preprocessing import LabelEncoder
from yellowbrick.features import RadViz
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from yellowbrick.classifier import ROCAUC
visualizer = ROCAUC(dtc)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
# ### KNeighborsClassifier
knn = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("knn", KNeighborsClassifier()),
]
)
knn.fit(X_train, y_train)
y_pred = knn.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
visualizer = ROCAUC(knn)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
# ### LogisticRegression
log = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("mul", LogisticRegression()),
]
)
log.fit(X_train, y_train)
y_pred = log.predict(X_test)
print(f"accuracy_score = {accuracy_score(y_pred, y_test)}")
print("classification_report:" + classification_report(y_pred, y_test))
visualizer = ROCAUC(log)
visualizer.fit(X_train, y_train)
visualizer.score(X_test, y_test)
visualizer.show()
| false | 0 | 1,257 | 0 | 1,257 | 1,257 |
||
129540979
|
<jupyter_start><jupyter_text>cuave-audio-wav
Kaggle dataset identifier: cuave-audio-wav
<jupyter_script>import os
import librosa
import cv2
import numpy as np
from keras.preprocessing import image
from keras.utils import to_categorical
# Define the input shapes for audio and video data
audio_input_shape = (800, 1) # Assumes 800 MFCC coefficients
video_input_shape = (64, 64, 3) # Assumes 64x64 RGB frames
# Define the path to your video files
video_folder = "/kaggle/input/fyp-fyp/CodesUsed"
# Initialize the lists to store the audio and video features
audio_features = []
video_features = []
labels = []
# Process each video file
counter = 0
for filename in os.listdir(video_folder):
if filename.endswith(".mp4"):
counter += 1
print(counter, filename)
# Extract the label from the filename
label = int(filename[0])
labels.append(label)
# Read the video file
video_path = os.path.join(video_folder, filename)
cap = cv2.VideoCapture(video_path)
# Extract video frames
frames = []
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (video_input_shape[1], video_input_shape[0]))
frames.append(frame)
cap.release()
# Convert video frames to array
video_data = np.array(frames)
# Extract audio features using librosa
audio_data, _ = librosa.load(
video_path, sr=22050
) # Adjust the sampling rate if necessary
mfccs = librosa.feature.mfcc(
y=audio_data, sr=22050, n_mfcc=audio_input_shape[0]
)
mfccs = np.expand_dims(mfccs.T, axis=-1)
# Store the features
video_features.append(video_data)
audio_features.append(mfccs)
len(audio_features)
video_path = "/kaggle/working/video_features.npy"
audio_path = "/kaggle/working/audio_features.npy"
# Convert the lists to arrays
video_features = np.array(video_features)
audio_features = np.array(audio_features)
labels = to_categorical(labels)
np.save(video_path, video_features)
np.save(audio_path, audio_features)
# Split the data into training and testing sets (adjust the split ratio as needed)
split_ratio = 0.8
split_index = int(len(video_features) * split_ratio)
train_video = video_features[:split_index]
train_audio = audio_features[:split_index]
train_labels = labels[:split_index]
test_video = video_features[split_index:]
test_audio = audio_features[split_index:]
test_labels = labels[split_index:]
# Define the input shapes for audio and video data
audio_input_shape = (800, 1) # Assumes 800 MFCC coefficients
video_input_shape = (64, 64, 3) # Assumes 64x64 RGB frames
from keras.layers import (
Input,
Conv1D,
MaxPooling1D,
Dropout,
Flatten,
Dense,
Conv2D,
MaxPooling2D,
concatenate,
)
from keras.models import Model
import tensorflow.keras as keras
# Define the audio processing stream
audio_input = Input(shape=audio_input_shape, name="audio_input")
audio_conv1 = Conv1D(filters=16, kernel_size=3, padding="same", activation="relu")(
audio_input
)
audio_pool1 = MaxPooling1D(pool_size=2)(audio_conv1)
audio_drop1 = Dropout(0.25)(audio_pool1)
audio_conv2 = Conv1D(filters=32, kernel_size=3, padding="same", activation="relu")(
audio_drop1
)
audio_pool2 = MaxPooling1D(pool_size=2)(audio_conv2)
audio_drop2 = Dropout(0.25)(audio_pool2)
audio_flatten = Flatten()(audio_drop2)
audio_output = Dense(units=128, activation="relu")(audio_flatten)
# Define the video processing stream
video_input = Input(shape=video_input_shape, name="video_input")
video_conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu")(
video_input
)
video_pool1 = MaxPooling2D(pool_size=(2, 2))(video_conv1)
video_drop1 = Dropout(0.25)(video_pool1)
video_conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu")(
video_drop1
)
video_pool2 = MaxPooling2D(pool_size=(2, 2))(video_conv2)
video_drop2 = Dropout(0.25)(video_pool2)
video_flatten = Flatten()(video_drop2)
video_output = Dense(units=128, activation="relu")(video_flatten)
# Concatenate the audio and video outputs
merged = concatenate([audio_output, video_output])
# Add fully connected layers for classification
dense1 = Dense(units=64, activation="relu")(merged)
dense2 = Dense(units=10, activation="softmax")(dense1)
# Create the model
model = Model(inputs=[audio_input, video_input], outputs=dense2)
# Compile the model
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Print the model summary
model.summary()
train_audio_float.dtype
train_audio_col = train_audio[:1]
train_audio_float = np.vstack(train_audio_col).astype("float")
train_video_col = train_video[:1]
train_video_float = np.vstack(train_video_col).astype("float")
test_audio_col = test_audio[:1]
test_audio_float = np.vstack(test_audio_col).astype("float")
test_video_col = test_video[:1]
test_video_float = np.vstack(test_video_col).astype("float")
len(train_video)
train_video.shape
test_audio.shape
test_video.shape
train_labels.shape
test_labels.shape
# train_audio_float = train_audio_float[:1000]
# test_audio_float = test_audio_float[:1000]
# train_video_float = train_video_float[:1000]
# test_video_float = test_video_float[:1000]
# Train the model
# model.fit([train_audio_float, train_video_float], train_labels, batch_size=32, epochs=10, validation_data=([test_audio_float, test_video_float], test_labels))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/540/129540979.ipynb
|
cuave-audio-wav
|
nkmnkmk
|
[{"Id": 129540979, "ScriptId": 38512203, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9851002, "CreationDate": "05/14/2023 16:54:02", "VersionNumber": 1.0, "Title": "notebookb86fea2867", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 165.0, "LinesInsertedFromPrevious": 165.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185707739, "KernelVersionId": 129540979, "SourceDatasetVersionId": 5682406}, {"Id": 185707738, "KernelVersionId": 129540979, "SourceDatasetVersionId": 5681761}]
|
[{"Id": 5682406, "DatasetId": 3266759, "DatasourceVersionId": 5757970, "CreatorUserId": 9738795, "LicenseName": "Unknown", "CreationDate": "05/14/2023 11:45:42", "VersionNumber": 1.0, "Title": "cuave-audio-wav", "Slug": "cuave-audio-wav", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3266759, "CreatorUserId": 9738795, "OwnerUserId": 9738795.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682406.0, "CurrentDatasourceVersionId": 5757970.0, "ForumId": 3332375, "Type": 2, "CreationDate": "05/14/2023 11:45:42", "LastActivityDate": "05/14/2023", "TotalViews": 2, "TotalDownloads": 0, "TotalVotes": 0, "TotalKernels": 0}]
|
[{"Id": 9738795, "UserName": "nkmnkmk", "DisplayName": "NKMNKM", "RegisterDate": "02/22/2022", "PerformanceTier": 0}]
|
import os
import librosa
import cv2
import numpy as np
from keras.preprocessing import image
from keras.utils import to_categorical
# Define the input shapes for audio and video data
audio_input_shape = (800, 1) # Assumes 800 MFCC coefficients
video_input_shape = (64, 64, 3) # Assumes 64x64 RGB frames
# Define the path to your video files
video_folder = "/kaggle/input/fyp-fyp/CodesUsed"
# Initialize the lists to store the audio and video features
audio_features = []
video_features = []
labels = []
# Process each video file
counter = 0
for filename in os.listdir(video_folder):
if filename.endswith(".mp4"):
counter += 1
print(counter, filename)
# Extract the label from the filename
label = int(filename[0])
labels.append(label)
# Read the video file
video_path = os.path.join(video_folder, filename)
cap = cv2.VideoCapture(video_path)
# Extract video frames
frames = []
while True:
ret, frame = cap.read()
if not ret:
break
frame = cv2.resize(frame, (video_input_shape[1], video_input_shape[0]))
frames.append(frame)
cap.release()
# Convert video frames to array
video_data = np.array(frames)
# Extract audio features using librosa
audio_data, _ = librosa.load(
video_path, sr=22050
) # Adjust the sampling rate if necessary
mfccs = librosa.feature.mfcc(
y=audio_data, sr=22050, n_mfcc=audio_input_shape[0]
)
mfccs = np.expand_dims(mfccs.T, axis=-1)
# Store the features
video_features.append(video_data)
audio_features.append(mfccs)
len(audio_features)
video_path = "/kaggle/working/video_features.npy"
audio_path = "/kaggle/working/audio_features.npy"
# Convert the lists to arrays
video_features = np.array(video_features)
audio_features = np.array(audio_features)
labels = to_categorical(labels)
np.save(video_path, video_features)
np.save(audio_path, audio_features)
# Split the data into training and testing sets (adjust the split ratio as needed)
split_ratio = 0.8
split_index = int(len(video_features) * split_ratio)
train_video = video_features[:split_index]
train_audio = audio_features[:split_index]
train_labels = labels[:split_index]
test_video = video_features[split_index:]
test_audio = audio_features[split_index:]
test_labels = labels[split_index:]
# Define the input shapes for audio and video data
audio_input_shape = (800, 1) # Assumes 800 MFCC coefficients
video_input_shape = (64, 64, 3) # Assumes 64x64 RGB frames
from keras.layers import (
Input,
Conv1D,
MaxPooling1D,
Dropout,
Flatten,
Dense,
Conv2D,
MaxPooling2D,
concatenate,
)
from keras.models import Model
import tensorflow.keras as keras
# Define the audio processing stream
audio_input = Input(shape=audio_input_shape, name="audio_input")
audio_conv1 = Conv1D(filters=16, kernel_size=3, padding="same", activation="relu")(
audio_input
)
audio_pool1 = MaxPooling1D(pool_size=2)(audio_conv1)
audio_drop1 = Dropout(0.25)(audio_pool1)
audio_conv2 = Conv1D(filters=32, kernel_size=3, padding="same", activation="relu")(
audio_drop1
)
audio_pool2 = MaxPooling1D(pool_size=2)(audio_conv2)
audio_drop2 = Dropout(0.25)(audio_pool2)
audio_flatten = Flatten()(audio_drop2)
audio_output = Dense(units=128, activation="relu")(audio_flatten)
# Define the video processing stream
video_input = Input(shape=video_input_shape, name="video_input")
video_conv1 = Conv2D(filters=32, kernel_size=(3, 3), padding="same", activation="relu")(
video_input
)
video_pool1 = MaxPooling2D(pool_size=(2, 2))(video_conv1)
video_drop1 = Dropout(0.25)(video_pool1)
video_conv2 = Conv2D(filters=64, kernel_size=(3, 3), padding="same", activation="relu")(
video_drop1
)
video_pool2 = MaxPooling2D(pool_size=(2, 2))(video_conv2)
video_drop2 = Dropout(0.25)(video_pool2)
video_flatten = Flatten()(video_drop2)
video_output = Dense(units=128, activation="relu")(video_flatten)
# Concatenate the audio and video outputs
merged = concatenate([audio_output, video_output])
# Add fully connected layers for classification
dense1 = Dense(units=64, activation="relu")(merged)
dense2 = Dense(units=10, activation="softmax")(dense1)
# Create the model
model = Model(inputs=[audio_input, video_input], outputs=dense2)
# Compile the model
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Print the model summary
model.summary()
train_audio_float.dtype
train_audio_col = train_audio[:1]
train_audio_float = np.vstack(train_audio_col).astype("float")
train_video_col = train_video[:1]
train_video_float = np.vstack(train_video_col).astype("float")
test_audio_col = test_audio[:1]
test_audio_float = np.vstack(test_audio_col).astype("float")
test_video_col = test_video[:1]
test_video_float = np.vstack(test_video_col).astype("float")
len(train_video)
train_video.shape
test_audio.shape
test_video.shape
train_labels.shape
test_labels.shape
# train_audio_float = train_audio_float[:1000]
# test_audio_float = test_audio_float[:1000]
# train_video_float = train_video_float[:1000]
# test_video_float = test_video_float[:1000]
# Train the model
# model.fit([train_audio_float, train_video_float], train_labels, batch_size=32, epochs=10, validation_data=([test_audio_float, test_video_float], test_labels))
| false | 0 | 1,743 | 0 | 1,769 | 1,743 |
||
129107895
|
# # Importing Libraries
import re # for text preprocessing
import pandas as pd # for data manipulation and analysis
import json # to load jason file
import numpy as np # to deal with mul
import matplotlib.pyplot as plt # for visualization
import seaborn as sns # for visualization
from tabulate import tabulate # to create tabels
from ipywidgets import interact # to make intractive functions
pd.set_option("display.max_columns", 30) # to limit the number of rows displayed
from datetime import datetime # for preprocessing the data for time-series analysis
import warnings
warnings.filterwarnings("ignore")
# import mysql.connector as sql
from google.colab import drive # to access the drive files
drive.mount("/content/drive")
data_df = pd.read_csv("Clean_data.csv")
# # **Natural Language Processing**
import spacy
from spacy.lang.en import English
nlp = spacy.load("en_core_web_lg")
data_df.drop(columns=["Unnamed: 0", "index"], inplace=True)
data_df.head(2)
import re
from spacy.lang.en.stop_words import STOP_WORDS
def Textclean(text):
doc = nlp(text)
vector = []
for i in doc:
if (i.is_stop == False) and (i.is_punct == False) and (i.is_digit == False):
if i not in STOP_WORDS:
vector.append(i.lemma_)
a = " ".join(vector)
a = re.sub(r"\d", " ", a)
a = re.sub(r"\W+", " ", a)
a = re.sub("\s", " ", a)
a = a.lower().strip()
return a
def mostcommon(data_df, i, j):
from collections import Counter
counts = Counter()
for i in data_df[i]:
counts[i] += 1
a = counts.most_common(j)
return pd.DataFrame(a, columns=["Name", "Count"])
text = "hello yogesh @34 yadav wassup"
Textclean(text)
def vector(text):
doc = nlp(text)
return doc.vector
text = "hello yogesh yadav wassup"
a = vector(text)
a.shape
# data_df['reviewText']=data_df['reviewText'].apply(Textclean)
data_df["reviewText"][0]
# data_df['summary']=data_df['summary'].apply(Textclean)
data_df["summary"][0]
# data_df['review']=data_df['reviewText']+' '+data_df['summary']
# data_df['review']=data_df['review'].apply(Textclean)
data_df["review"][0]
# data_df['vector']=data_df['review'].apply(vector)
# data_df['vector'][0].shape
data_df["Sentiments"] = data_df["overall"].map(
{1: "Negative", 2: "Negative", 3: "Neutral", 4: "Positive", 5: "Positive"}
)
plt.plot(data_df["Sentiments"].value_counts())
# data_df.to_csv('Clean_data.csv',header=True,index=True)
def sentanalysis(Sentiment="Positive", cat="Industrial & Scientific"):
data_df4 = data_df[
(data_df["main_cat"] == cat) & (data_df["Sentiments"] == Sentiment)
]
a = mostcommon(data_df4, "brand", 10)
# display(a)
a.plot(kind="bar", x="Name", rot=90)
interact(
sentanalysis,
Sentiment=["Positive", "Negative", "Neutral"],
cat=[x for x in data_df["main_cat"].unique()],
)
# # Sentimental Analysis - Classification
import re # for text preprocessing
import pandas as pd # for data manipulation and analysis
import json # to load jason file
import numpy as np # to deal with mul
import matplotlib.pyplot as plt # for visualization
import seaborn as sns # for visualization
from tabulate import tabulate # to create tabels
from ipywidgets import interact # to make intractive functions
pd.set_option("display.max_columns", 30) # to limit the number of rows displayed
from datetime import datetime # for preprocessing the data for time-series analysis
import warnings
warnings.filterwarnings("ignore")
# import mysql.connector as sql
from google.colab import drive # to access the drive files
drive.mount("/content/drive")
data_df = pd.read_csv("Clean_data.csv")
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import plot_confusion_matrix, classification_report
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
# ## Label Encoding
# Label Encoding refers to converting the labels into a numeric form so as to convert them into the machine-readable form. Machine learning algorithms can then decide in a better way how those labels must be operated. It is an important pre-processing step for the structured dataset in supervised learning.
label = LabelEncoder()
data_df["N_sents"] = label.fit_transform(data_df["Sentiments"])
# ## **SMOTE**
# ### SMOTE stands for Synthetic Minority Oversampling Technique , it is a statistical technique for increasing the number of cases in your dataset in a balanced way. The component works by generating new instances from existing minority cases that you supply as input.
# ### Because the classes are not equally spread thats why our model is not highly trained on other classes so we will use the SMOTE technique to increase the samples of the class which have less data.
data_df["Sentiments"].value_counts()
smote = SMOTE(sampling_strategy="auto")
x = data_df["review"]
y = data_df["N_sents"]
tfidf = TfidfVectorizer(max_features=20000)
a = tfidf.fit_transform(x)
a.toarray()
X, Y = smote.fit_resample(a, y)
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3, random_state=12
)
# # TFIDF
# TF-IDF (Term Frequency - Inverse Document Frequency) is a handy algorithm that uses the frequency of words to determine how relevant those words are to a given document.
# ## MultinomialNB
# The Multinomial Naive Bayes algorithm is a Bayesian learning approach popular in Natural Language Processing (NLP). The program guesses the tag of a text, such as an email or a newspaper story, using the Bayes theorem. It calculates each tag's likelihood for a given sample and outputs the tag with the greatest chance.
model1 = MultinomialNB()
model1.fit(x_train, y_train)
y_pred = model1.predict(x_test)
print("The Accuracy of the model :", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(model1, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = pd.DataFrame(report).transpose().reset_index()
df.insert(0, "Technique", "TFIDF")
df.insert(1, "Model", "MultinomialNB")
name = []
score = []
name.append("MultonomialNB")
score.append(model1.score(x_test, y_test))
# Predicting on unknown data
text = [
"After a few days, there is a yellow color over the mouse rubber. I think rubber reacted with sweat. It doesn't look good now. And no way to clean it. So please go for dark color."
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[model1.predict(test)]
# ## One vs Rest Classifier with Logistic Regression
# It is used for predicting the categorical dependent variable using a given set of independent variables. Logistic regression predicts the output of a categorical dependent variable. Therefore the outcome must be a categorical or discrete value. It can be either Yes or No, 0 or 1, true or False, etc. but instead of giving the exact value as 0 and 1, it gives the probabilistic values which lie between 0 and 1 We used this ML model with OneVsRest Classifier because we had 3 classes to classify. Logistic Regression can be used to classify the observations using different types of data and can easily determine the most effective variables used for the classification.
modell = OneVsRestClassifier(LogisticRegression(class_weight="balanced"))
modell.fit(x_train, y_train)
y_pred = modell.predict(x_test)
print("The Accuracy of the model :", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modell, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df1 = pd.DataFrame(report).transpose().reset_index()
df1.insert(0, "Technique", "TFIDF")
df1.insert(1, "Model", "LogisticRegression")
name.append("LogisticRegression")
score.append(modell.score(x_test, y_test))
# Predicting on unknown data
text = [
"After a few days, there is a yellow color over the mouse rubber. I think rubber reacted with sweat.\
It doesn't look good now. And no way to clean it. So please go for dark color."
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modell.predict(test)]
# ## RandomForestClassifier
# A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting.
# The Random forest classifier creates a set of decision trees from a randomly selected subset of the training set. It is basically a set of decision trees (DT) from a randomly selected subset of the training set and then It collects the votes from different decision trees to decide the final prediction.
from sklearn.ensemble import RandomForestClassifier
RandomForestClassifier().get_params().keys()
modelr = RandomForestClassifier(
class_weight="balanced",
)
modelr.fit(x_train, y_train)
y_pred = modelr.predict(x_test)
print("The Accuracy of the model is : ", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modelr, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df2 = pd.DataFrame(report).transpose().reset_index()
df2.insert(0, "Technique", "TFIDF")
df2.insert(1, "Model", "RandomForestClassifier")
name.append("RandomForestClassifier")
score.append(modelr.score(x_test, y_test))
# Predicting on unknown data
text = [
"Blades seem ok, but they come in a tiny box and not in a well-sorted box that is labeled and separated in size like the picture shows"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modelr.predict(test)]
# ## AdaBoostClassifier
# An AdaBoost classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases.
from sklearn.ensemble import AdaBoostClassifier
modela = AdaBoostClassifier()
modela.fit(x_train, y_train)
y_pred = modela.predict(x_test)
print("The Accuracy of the model is : ", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modela, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df3 = pd.DataFrame(report).transpose().reset_index()
df3.insert(0, "Technique", "TFIDF")
df3.insert(1, "Model", "AdaBoostClassifier")
name.append("AdaBoostClassifier")
score.append(modela.score(x_test, y_test))
# Predicting on unknown data
text = [
"This product is very handy and easy to use even my grandparents understood how to use it. It is good\
for a small room but if the room is big you might need to turn it on for a longer time. The cord length is not too much so it needs to be plugged \
in close to switch board. 3 plug pin can also be bought along with this product"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modela.predict(test)]
# Comapring different models for TFIDF
dd = pd.concat([df, df1, df2, df3], axis=0)
ddd = dd.groupby(["Technique", "Model", "index"])[
"precision", "recall", "f1-score"
].mean()
ddd.head()
pd.DataFrame({"Model": name, "Score": score}, index=[1, 2, 3, 4]).plot(
kind="bar", x="Model", y="Score", ylabel="Score"
)
# ## HyperParameter Tunning
# A Machine Learning model is defined as a mathematical model with a number of parameters that need to be learned from the data. By training a model with existing data, we are able to fit the model parameters.
# However, there is another kind of parameter, known as Hyperparameters, that cannot be directly learned from the regular training process. They are usually fixed before the actual training process begins. These parameters express important properties of the model such as its complexity or how fast it should learn.
modell.get_params().keys()
from sklearn.model_selection import GridSearchCV
params = {"estimator__penalty": ["l1", "l2"], "estimator__C": np.logspace(0, 3, 20)}
grid = GridSearchCV(modell, param_grid=params, n_jobs=-1)
grid.fit(x_train, y_train)
print(grid.best_score_) # the best score we get tunning these parameters
model_grid = grid.best_estimator_ # This gives the best model with the parameter
model_grid
y_pred = model_grid.predict(x_test)
print(classification_report(y_pred, y_test))
plot_confusion_matrix(model_grid, x_test, y_test)
# Predicting on unknown data
text = [
"It appears to be decent set of blades but when you actually use them they are not very sturdy. Cuts come out angled and wobbly. You get what you pay for"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[model_grid.predict(test)]
report = classification_report(y_test, y_pred, output_dict=True)
df5 = pd.DataFrame(report).transpose().reset_index()
df5.insert(0, "Technique", "TFIDF")
df5.insert(1, "Model", "Tuned LogisticRegression")
name.append("Tuned Logistic")
score.append(model_grid.score(x_test, y_test))
# Final Comparison
pd.DataFrame({"Model": name, "Score": score}, index=[1, 2, 3, 4, 5]).plot(
kind="bar", x="Model", y="Score", ylabel="Score"
)
# # Spacy vectorization
# spaCy is a free, open-source library for NLP in Python written in Cython. spaCy is designed to make it easy to build systems for information extraction or general-purpose natural language processing.
data_df["N_sents"].value_counts()
data_df["vector"] = data_df["review"].apply(vector)
x = data_df["vector"]
y = data_df["N_sents"]
x1 = np.stack(x)
sm = SMOTE()
X_s, Y_s = sm.fit_resample(x1, y)
x_trainn, x_testt, y_trainn, y_testt = train_test_split(
X_s, Y_s, test_size=0.3, random_state=12
)
x_train_2d = np.stack(x_trainn)
x_test_2d = np.stack(x_testt)
x_train_2d.shape
from sklearn.utils import class_weight
class_weight = class_weight.compute_class_weight(
"balanced", classes=np.unique(Y_s), y=Y_s
)
class_weight
# ## Logistic Regression
a = OneVsRestClassifier(LogisticRegression())
a.get_params().keys()
from sklearn.preprocessing import MinMaxScaler
pipe5 = Pipeline(
[
("scaler", MinMaxScaler()),
("lr", OneVsRestClassifier(LogisticRegression(class_weight="balanced"))),
]
)
pipe5.fit(x_train_2d, y_trainn)
y_pred = pipe5.predict(x_test_2d)
print("The Accuracy of this model is ", accuracy_score(y_testt, y_pred) * 100, "%")
print(classification_report(y_pred, y_testt))
# plot_confusion_matrix(pipe5,x_test,y_test)
report = classification_report(y_testt, y_pred, output_dict=True)
df6 = pd.DataFrame(report).transpose().reset_index()
df6.insert(0, "Technique", "Spacy")
df6.insert(1, "Model", "LogisticRegression")
# Predicting on unknown data
text = "It appears to be decent set of blades but when you actually use them they are not very sturdy. Cuts come out angled and wobbly. You get what you pay for"
doc = nlp(text).vector
doc = doc.reshape(-1, 300)
label.inverse_transform(np.unique(data_df["N_sents"]))[pipe5.predict(doc)]
# ## RandomForestClassifier
RandomForestClassifier().get_params().keys()
pipe6 = Pipeline(
[
("scaler", MinMaxScaler()),
("rf", RandomForestClassifier(class_weight="balanced")),
]
)
pipe6.fit(x_train_2d, y_trainn)
y_pred = pipe6.predict(x_test_2d)
print("The Accuracy of this model is ", accuracy_score(y_testt, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_testt))
report = classification_report(y_testt, y_pred, output_dict=True)
df7 = pd.DataFrame(report).transpose().reset_index()
df7.insert(0, "Technique", "Spacy")
df7.insert(1, "Model", "RandomForestClassifier")
# Predicting on unknown data
text = " fine okay"
doc = nlp(text).vector
doc = doc.reshape(-1, 300)
label.inverse_transform(np.unique(data_df["N_sents"]))[pipe6.predict(doc)]
# Comapring different techniques and models
d2 = pd.concat([dd, df5, df6, df7], axis=0)
d3 = d2.groupby(["Technique", "Model", "index"])[
"precision", "recall", "f1-score"
].mean()
d3
# # Fasttext
# FastText is an open-source, free library from Facebook AI Research(FAIR) for learning word embeddings and word classifications. This model allows creating unsupervised learning or supervised learning algorithm for obtaining vector representations for words. It also evaluates these models.
#
import fasttext
from sklearn.model_selection import train_test_split
data_fast = data_df[["Sentiments", "review"]]
data_fast["sentiment"] = "__label__" + data_fast["Sentiments"].astype(str)
data_fast["sentiment_review"] = data_fast["sentiment"] + " " + data_fast["review"]
train_df, test_df = train_test_split(data_fast, test_size=0.2)
train_df.to_csv("Train", columns=["sentiment_review"], index=False, header=False)
test_df.to_csv("Test", columns=["sentiment_review"], index=False, header=False)
data_fast.head()
model = fasttext.train_supervised(input="Train", lr=0.2, epoch=20, wordNgrams=2)
test_num, precision, recall = model.test("Test")
f_score = (2 * precision * recall) / (precision + recall)
print("No. of Test Samples:", test_num)
print("Precision Score:", precision)
print("Recall:", recall)
print("F1 score:", f_score)
model.predict("half")
model.predict(" i am very disappointed with this product")
model.predict(" The product worked really well")
model.get_nearest_neighbors("disappointed")
model.predict(" product is working ")
# # Text Analysis:
d = pd.read_csv("Clean_data.csv")
data_d = d[
[
"review",
"price",
"unixReviewTime",
"overall",
"summary",
"main_cat",
"brand",
"title",
]
]
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
data_d.dropna(inplace=True)
data_d["Reviews"] = data_d["review"].apply(
lambda words: " ".join(word for word in words.split() if word not in STOP_WORDS)
)
data_d["summary"] = data_d["summary"].apply(
lambda words: " ".join(word for word in words.split() if word not in STOP_WORDS)
)
stop_words = d["brand"].apply(lambda x: re.findall("[a-zA-Z]+", x[:10]))
stop_words_user_define = []
for i in stop_words:
for j in i:
if j not in stop_words_user_define:
stop_words_user_define.append(j)
else:
pass
unique_st_words = set(stop_words_user_define)
data_d["Reviews"] = data_d["review"].apply(
lambda words: " ".join(
word for word in words.split() if word not in unique_st_words
)
)
data_d.dropna(inplace=True)
import gensim
review_text = data_d["Reviews"].apply(gensim.utils.simple_preprocess)
model = gensim.models.Word2Vec(window=10, min_count=2)
model.build_vocab(review_text, progress_per=100)
model.train(review_text, total_examples=model.corpus_count, epochs=model.epochs)
pos_df = data_d[data_d["overall"] > 3]
nt_df = data_d[data_d["overall"] == 3]
neg_df = data_d[data_d["overall"] < 3]
review_text1 = pos_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count1 = Counter()
for i in review_text1:
for j in i:
count1[j] += 1
review_text2 = nt_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count2 = Counter()
for i in review_text2:
for j in i:
count2[j] += 1
review_text3 = neg_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count3 = Counter()
for i in review_text3:
for j in i:
count3[j] += 1
post_lst = [i[0] for i in count1.most_common(1500)]
neu_lst = [i[0] for i in count2.most_common(1500)]
neg_lst = [i[0] for i in count3.most_common(1500)]
neg_word = []
for i in neg_lst:
if i in (post_lst and neu_lst):
pass
else:
neg_word.append(i)
pos_word = []
for i in post_lst:
if i in (neg_lst and neu_lst):
pass
else:
pos_word.append(i)
neu_word = []
for i in neu_lst:
if i in (post_lst and neg_lst):
pass
else:
neu_word.append(i)
# # **Analyzing Text**
def rew_analyzer(st):
p = 0
n = 0
nu = 0
for i in st.split():
if i in pos_word:
# print(i,'po')
p += 1
elif i in neg_word:
n += 1
# print(i,'ne')
elif i in neu_word:
nu += 1
# print(i,'nu')
if p > n and p > nu:
return "Positive Review"
elif p == n and p == nu:
return "Positive Review"
elif n > p and n > nu:
return "Negetive Review"
elif p < n and n == nu:
return "Negative Review"
elif nu > p and nu > n:
return "Neutral Review"
elif p < n and p == nu:
return "Negative Review"
else:
return "Neutral"
data_d["Reviews Text Analysis"] = data_d["Reviews"].apply(rew_analyzer)
data_d["y_test"] = data_d["overall"].map({5: 3, 4: 3, 3: 2, 2: 1, 1: 1})
data_d["y_pred"] = data_d["Reviews Text Analysis"].map(
{"Positive Review": 3, "Neutral Review": 2, "Negetive Review": 1}
)
data_d.dropna(inplace=True)
accuracy_score(data_d["y_test"], data_d["y_pred"])
# # Word Cloud
from wordcloud import WordCloud
# This is an image composed of words used in a particular text or subject, in which the size of each word indicates its frequency or importance
# ### Postive Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(post_lst))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ### Negative Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(neg_word))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ### Neutral Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(neu_word))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# # **Translator**
from textblob import TextBlob
def score(a):
ans = str()
if a < 0.1:
ans = "Negative"
elif a > 0.1 and a < 0.5:
ans = "Neutral"
else:
ans = "Positive"
return print("This is a {0} statement".format(ans))
blob = TextBlob("This is a good product")
b = str(blob.translate(from_lang="en", to="fr"))
b
blob = TextBlob(b)
a = TextBlob(str(blob.translate(from_lang="fr", to="en")))
score(a.sentiment.polarity)
# # Analysis of customers and products
# We will see in detail about the top products and the customers
data1 = pd.get_dummies(data_df["verified"])
data_df = pd.concat([data_df, data1], axis=1)
data = pd.get_dummies(data_df["Sentiments"])
data_df = pd.concat([data_df, data], axis=1)
data_df["Negative"] = data_df["Negative"].replace(0, np.nan, regex=True)
data_df["Neutral"] = data_df["Neutral"].replace(0, np.nan, regex=True)
data_df["Positive"] = data_df["Positive"].replace(0, np.nan, regex=True)
data_df[0] = data_df[0].replace(0, np.nan, regex=True)
data_df[1] = data_df[1].replace(0, np.nan, regex=True)
# ### Product Analysis
cl1 = (
data_df.groupby(["asin"])
.agg(
Total_Sold=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
Price=("price", "mean"),
Sales=("price", "sum"),
Rank=("rank", "median"),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
)
.reset_index()
)
cl1["Main_Cat"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "main_cat"].unique()[0]
)
cl1["Brand"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "brand"].unique()[0]
)
cl1["Title"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "title"].unique()[0]
)
def product(
Column="Total_Sold",
sort=True,
main_cat="Tools & Home Improvement",
sold=20,
visual="df",
):
if visual == "plot":
cl1[(cl1["Main_Cat"] == main_cat) & (cl1["Total_Sold"] >= sold)].sort_values(
by=[Column], ascending=sort
).head().plot(x="asin", kind="bar", y=Column)
else:
display(
cl1[
(cl1["Main_Cat"] == main_cat) & (cl1["Total_Sold"] >= sold)
].sort_values(by=[Column], ascending=sort)
)
interact(
product,
Column=[x for x in cl1.columns],
sort=[True, False],
main_cat=[y for y in data_df["main_cat"].unique()],
sold=(1, 500, 10),
visual=["plot", "df"],
)
# ### Customers Analysis
cl2 = (
data_df.groupby(["reviewerID"])
.agg(
Item_bought=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
# Price=('price','mean'),
Amount=("price", "sum"),
# Rank=('rank','median'),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
)
.reset_index()
)
def customer(Column="Item_bought", sort=True, Bought=20, visual="df"):
if visual == "plot":
cl2.loc[cl2["Item_bought"] >= Bought].sort_values(
by=[Column], ascending=sort
).head().plot(x="reviewerID", kind="bar", y=Column)
else:
display(
cl2[cl2["Item_bought"] >= Bought].sort_values(by=[Column], ascending=sort)
)
interact(
customer,
Column=[x for x in cl2.columns],
sort=[True, False],
sold=(1, 500, 10),
visual=["plot", "df"],
)
# # **Product based classification**
import statistics as st
mode = lambda x: st.mode(x)
cl = (
data_df.groupby(["asin"])
.agg(
Total_Sold=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
Price=("price", "mean"),
Sales=("price", "sum"),
Rank=("rank", "median"),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
Rating=("overall", mode),
)
.reset_index()
)
display(cl.head(2))
numerics = [
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
] # list of all numeric type in daataframe
con_df = cl.select_dtypes(include=numerics) # create dataframe for all numeric type
# con_df.drop('asin',axis=1,inplace=True)
con_cols = con_df.columns # taking colums
num_cols = 4 # how many column we want in subplot
n_bins = 20 # bin size for histogram
num_rows = int(len(con_cols) / num_cols) + 1 # rows for subplot
figs, axes = plt.subplots(num_rows, num_cols, tight_layout=True, figsize=(20, 8))
for col, ax in zip(con_cols, axes.flatten()):
sns.boxplot(y=con_df[col], ax=ax, color="tan")
ax.set_title(col)
plt.show()
numerics = [
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
] # list of all numeric type in daataframe
con_df = cl.select_dtypes(include=numerics) # create dataframe for all numeric type
# con_df.drop('asin',axis=1,inplace=True)
con_cols = con_df.columns # taking colums
num_cols = 4 # how many column we want in subplot
n_bins = 20 # bin size for histogram
num_rows = int(len(con_cols) / num_cols) + 1 # rows for subplot
figs, axes = plt.subplots(num_rows, num_cols, tight_layout=True, figsize=(20, 8))
for col, ax in zip(con_cols, axes.flatten()):
sns.histplot(x=con_df[col], ax=ax, color="tan")
ax.set_title(col)
plt.show()
cl.Rating.value_counts()
smote = SMOTE(sampling_strategy="auto")
X = cl.iloc[:, 1:9]
y = cl.Rating
X_s, Y_s = smote.fit_resample(X, y)
x = pd.concat([X_s, Y_s], axis=1)
x.Rating.value_counts()
from sklearn.tree import DecisionTreeClassifier # model
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
recall_score,
f1_score,
precision_score,
) # to get the score of classifier
X_train, X_test, y_train, y_test = train_test_split(X_s, Y_s, train_size=0.7)
dt = DecisionTreeClassifier(random_state=1) # crating classifier
dt.fit(X_test, y_test) # fitting the train data
DT_pred = dt.predict(X_test) # predicting test data
DT_pred1 = dt.predict(X_train) # predicting train data
a_s = accuracy_score(
y_train, DT_pred1
) # gives the accuracy of model how well our classifier
r_s = recall_score(
y_train, DT_pred1, average="macro"
) # to get the recall score used predefined function
p_s = precision_score(
y_train, DT_pred1, average="macro"
) # to get the precision_score used predefined function
f1_s = f1_score(
y_train, DT_pred1, average="macro"
) # to get the f1_score used predefined function
print("Accuracy is:", a_s)
print("Recall_score is:", r_s)
print("Precision_score is:", p_s)
print("F1_score is:", f1_s)
a_s = accuracy_score(
y_test, DT_pred
) # gives the accuracy of model how well our classifier
r_s = recall_score(
y_test, DT_pred, average="macro"
) # to get the recall score used predefined function
p_s = precision_score(
y_test, DT_pred, average="macro"
) # to get the precision_score used predefined function
f1_s = f1_score(
y_test, DT_pred, average="macro"
) # to get the f1_score used predefined function
print("Accuracy is:", a_s)
print("Recall_score is:", r_s)
print("Precision_score is:", p_s)
print("F1_score is:", f1_s)
# # **Search Recommendation**
ans = pd.DataFrame(index=range(len(data_df)))
a = str(input("Enter the word :")).lower()
data_df["description"] = data_df["description"].apply(lambda x: str(x))
ans["index"] = data_df["description"].apply(
lambda x: re.findall(a, x)[0] if len(re.findall(a, x)) != 0 else np.nan
)
ans.dropna(inplace=True)
ans1 = data_df.loc[ans.index, :]
def recoomendation(column="Price", sort=True, n=5):
a = ans1.loc[ans1["overall"] > 3, :]
cl3 = (
a.groupby("asin")
.agg(
Total_Review=(1, "count"),
Rating=("overall", "mean"),
# Negative=('Negative','count'),
# Neutral=('Neutral','count'),
# Positive=('Positive','count'),
Price=("price", "mean"),
)
.reset_index()
)
cl3["brand"] = cl3["asin"].apply(
lambda x: a.loc[a["asin"] == x, "brand"].unique()[0]
)
if len(cl3) == 0:
print("Thanks for searching , please look for another product")
else:
display(cl3.sort_values(by=column, ascending=sort).head(n))
interact(
recoomendation, column=[i for i in cl3.columns], sort=[True, False], n=(1, 50, 1)
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/107/129107895.ipynb
| null | null |
[{"Id": 129107895, "ScriptId": 38381399, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14295886, "CreationDate": "05/11/2023 04:22:02", "VersionNumber": 1.0, "Title": "Amazon Review Data-NLP and ML", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 803.0, "LinesInsertedFromPrevious": 803.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Importing Libraries
import re # for text preprocessing
import pandas as pd # for data manipulation and analysis
import json # to load jason file
import numpy as np # to deal with mul
import matplotlib.pyplot as plt # for visualization
import seaborn as sns # for visualization
from tabulate import tabulate # to create tabels
from ipywidgets import interact # to make intractive functions
pd.set_option("display.max_columns", 30) # to limit the number of rows displayed
from datetime import datetime # for preprocessing the data for time-series analysis
import warnings
warnings.filterwarnings("ignore")
# import mysql.connector as sql
from google.colab import drive # to access the drive files
drive.mount("/content/drive")
data_df = pd.read_csv("Clean_data.csv")
# # **Natural Language Processing**
import spacy
from spacy.lang.en import English
nlp = spacy.load("en_core_web_lg")
data_df.drop(columns=["Unnamed: 0", "index"], inplace=True)
data_df.head(2)
import re
from spacy.lang.en.stop_words import STOP_WORDS
def Textclean(text):
doc = nlp(text)
vector = []
for i in doc:
if (i.is_stop == False) and (i.is_punct == False) and (i.is_digit == False):
if i not in STOP_WORDS:
vector.append(i.lemma_)
a = " ".join(vector)
a = re.sub(r"\d", " ", a)
a = re.sub(r"\W+", " ", a)
a = re.sub("\s", " ", a)
a = a.lower().strip()
return a
def mostcommon(data_df, i, j):
from collections import Counter
counts = Counter()
for i in data_df[i]:
counts[i] += 1
a = counts.most_common(j)
return pd.DataFrame(a, columns=["Name", "Count"])
text = "hello yogesh @34 yadav wassup"
Textclean(text)
def vector(text):
doc = nlp(text)
return doc.vector
text = "hello yogesh yadav wassup"
a = vector(text)
a.shape
# data_df['reviewText']=data_df['reviewText'].apply(Textclean)
data_df["reviewText"][0]
# data_df['summary']=data_df['summary'].apply(Textclean)
data_df["summary"][0]
# data_df['review']=data_df['reviewText']+' '+data_df['summary']
# data_df['review']=data_df['review'].apply(Textclean)
data_df["review"][0]
# data_df['vector']=data_df['review'].apply(vector)
# data_df['vector'][0].shape
data_df["Sentiments"] = data_df["overall"].map(
{1: "Negative", 2: "Negative", 3: "Neutral", 4: "Positive", 5: "Positive"}
)
plt.plot(data_df["Sentiments"].value_counts())
# data_df.to_csv('Clean_data.csv',header=True,index=True)
def sentanalysis(Sentiment="Positive", cat="Industrial & Scientific"):
data_df4 = data_df[
(data_df["main_cat"] == cat) & (data_df["Sentiments"] == Sentiment)
]
a = mostcommon(data_df4, "brand", 10)
# display(a)
a.plot(kind="bar", x="Name", rot=90)
interact(
sentanalysis,
Sentiment=["Positive", "Negative", "Neutral"],
cat=[x for x in data_df["main_cat"].unique()],
)
# # Sentimental Analysis - Classification
import re # for text preprocessing
import pandas as pd # for data manipulation and analysis
import json # to load jason file
import numpy as np # to deal with mul
import matplotlib.pyplot as plt # for visualization
import seaborn as sns # for visualization
from tabulate import tabulate # to create tabels
from ipywidgets import interact # to make intractive functions
pd.set_option("display.max_columns", 30) # to limit the number of rows displayed
from datetime import datetime # for preprocessing the data for time-series analysis
import warnings
warnings.filterwarnings("ignore")
# import mysql.connector as sql
from google.colab import drive # to access the drive files
drive.mount("/content/drive")
data_df = pd.read_csv("Clean_data.csv")
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import plot_confusion_matrix, classification_report
from imblearn.over_sampling import SMOTE
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.metrics import accuracy_score
# ## Label Encoding
# Label Encoding refers to converting the labels into a numeric form so as to convert them into the machine-readable form. Machine learning algorithms can then decide in a better way how those labels must be operated. It is an important pre-processing step for the structured dataset in supervised learning.
label = LabelEncoder()
data_df["N_sents"] = label.fit_transform(data_df["Sentiments"])
# ## **SMOTE**
# ### SMOTE stands for Synthetic Minority Oversampling Technique , it is a statistical technique for increasing the number of cases in your dataset in a balanced way. The component works by generating new instances from existing minority cases that you supply as input.
# ### Because the classes are not equally spread thats why our model is not highly trained on other classes so we will use the SMOTE technique to increase the samples of the class which have less data.
data_df["Sentiments"].value_counts()
smote = SMOTE(sampling_strategy="auto")
x = data_df["review"]
y = data_df["N_sents"]
tfidf = TfidfVectorizer(max_features=20000)
a = tfidf.fit_transform(x)
a.toarray()
X, Y = smote.fit_resample(a, y)
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3, random_state=12
)
# # TFIDF
# TF-IDF (Term Frequency - Inverse Document Frequency) is a handy algorithm that uses the frequency of words to determine how relevant those words are to a given document.
# ## MultinomialNB
# The Multinomial Naive Bayes algorithm is a Bayesian learning approach popular in Natural Language Processing (NLP). The program guesses the tag of a text, such as an email or a newspaper story, using the Bayes theorem. It calculates each tag's likelihood for a given sample and outputs the tag with the greatest chance.
model1 = MultinomialNB()
model1.fit(x_train, y_train)
y_pred = model1.predict(x_test)
print("The Accuracy of the model :", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(model1, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df = pd.DataFrame(report).transpose().reset_index()
df.insert(0, "Technique", "TFIDF")
df.insert(1, "Model", "MultinomialNB")
name = []
score = []
name.append("MultonomialNB")
score.append(model1.score(x_test, y_test))
# Predicting on unknown data
text = [
"After a few days, there is a yellow color over the mouse rubber. I think rubber reacted with sweat. It doesn't look good now. And no way to clean it. So please go for dark color."
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[model1.predict(test)]
# ## One vs Rest Classifier with Logistic Regression
# It is used for predicting the categorical dependent variable using a given set of independent variables. Logistic regression predicts the output of a categorical dependent variable. Therefore the outcome must be a categorical or discrete value. It can be either Yes or No, 0 or 1, true or False, etc. but instead of giving the exact value as 0 and 1, it gives the probabilistic values which lie between 0 and 1 We used this ML model with OneVsRest Classifier because we had 3 classes to classify. Logistic Regression can be used to classify the observations using different types of data and can easily determine the most effective variables used for the classification.
modell = OneVsRestClassifier(LogisticRegression(class_weight="balanced"))
modell.fit(x_train, y_train)
y_pred = modell.predict(x_test)
print("The Accuracy of the model :", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modell, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df1 = pd.DataFrame(report).transpose().reset_index()
df1.insert(0, "Technique", "TFIDF")
df1.insert(1, "Model", "LogisticRegression")
name.append("LogisticRegression")
score.append(modell.score(x_test, y_test))
# Predicting on unknown data
text = [
"After a few days, there is a yellow color over the mouse rubber. I think rubber reacted with sweat.\
It doesn't look good now. And no way to clean it. So please go for dark color."
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modell.predict(test)]
# ## RandomForestClassifier
# A random forest is a meta estimator that fits a number of decision tree classifiers on various sub-samples of the dataset and uses averaging to improve the predictive accuracy and control over-fitting.
# The Random forest classifier creates a set of decision trees from a randomly selected subset of the training set. It is basically a set of decision trees (DT) from a randomly selected subset of the training set and then It collects the votes from different decision trees to decide the final prediction.
from sklearn.ensemble import RandomForestClassifier
RandomForestClassifier().get_params().keys()
modelr = RandomForestClassifier(
class_weight="balanced",
)
modelr.fit(x_train, y_train)
y_pred = modelr.predict(x_test)
print("The Accuracy of the model is : ", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modelr, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df2 = pd.DataFrame(report).transpose().reset_index()
df2.insert(0, "Technique", "TFIDF")
df2.insert(1, "Model", "RandomForestClassifier")
name.append("RandomForestClassifier")
score.append(modelr.score(x_test, y_test))
# Predicting on unknown data
text = [
"Blades seem ok, but they come in a tiny box and not in a well-sorted box that is labeled and separated in size like the picture shows"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modelr.predict(test)]
# ## AdaBoostClassifier
# An AdaBoost classifier is a meta-estimator that begins by fitting a classifier on the original dataset and then fits additional copies of the classifier on the same dataset but where the weights of incorrectly classified instances are adjusted such that subsequent classifiers focus more on difficult cases.
from sklearn.ensemble import AdaBoostClassifier
modela = AdaBoostClassifier()
modela.fit(x_train, y_train)
y_pred = modela.predict(x_test)
print("The Accuracy of the model is : ", accuracy_score(y_test, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_test))
plot_confusion_matrix(modela, x_test, y_test)
report = classification_report(y_test, y_pred, output_dict=True)
df3 = pd.DataFrame(report).transpose().reset_index()
df3.insert(0, "Technique", "TFIDF")
df3.insert(1, "Model", "AdaBoostClassifier")
name.append("AdaBoostClassifier")
score.append(modela.score(x_test, y_test))
# Predicting on unknown data
text = [
"This product is very handy and easy to use even my grandparents understood how to use it. It is good\
for a small room but if the room is big you might need to turn it on for a longer time. The cord length is not too much so it needs to be plugged \
in close to switch board. 3 plug pin can also be bought along with this product"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[modela.predict(test)]
# Comapring different models for TFIDF
dd = pd.concat([df, df1, df2, df3], axis=0)
ddd = dd.groupby(["Technique", "Model", "index"])[
"precision", "recall", "f1-score"
].mean()
ddd.head()
pd.DataFrame({"Model": name, "Score": score}, index=[1, 2, 3, 4]).plot(
kind="bar", x="Model", y="Score", ylabel="Score"
)
# ## HyperParameter Tunning
# A Machine Learning model is defined as a mathematical model with a number of parameters that need to be learned from the data. By training a model with existing data, we are able to fit the model parameters.
# However, there is another kind of parameter, known as Hyperparameters, that cannot be directly learned from the regular training process. They are usually fixed before the actual training process begins. These parameters express important properties of the model such as its complexity or how fast it should learn.
modell.get_params().keys()
from sklearn.model_selection import GridSearchCV
params = {"estimator__penalty": ["l1", "l2"], "estimator__C": np.logspace(0, 3, 20)}
grid = GridSearchCV(modell, param_grid=params, n_jobs=-1)
grid.fit(x_train, y_train)
print(grid.best_score_) # the best score we get tunning these parameters
model_grid = grid.best_estimator_ # This gives the best model with the parameter
model_grid
y_pred = model_grid.predict(x_test)
print(classification_report(y_pred, y_test))
plot_confusion_matrix(model_grid, x_test, y_test)
# Predicting on unknown data
text = [
"It appears to be decent set of blades but when you actually use them they are not very sturdy. Cuts come out angled and wobbly. You get what you pay for"
]
test = tfidf.transform(text)
label.inverse_transform(np.unique(data_df["N_sents"]))[model_grid.predict(test)]
report = classification_report(y_test, y_pred, output_dict=True)
df5 = pd.DataFrame(report).transpose().reset_index()
df5.insert(0, "Technique", "TFIDF")
df5.insert(1, "Model", "Tuned LogisticRegression")
name.append("Tuned Logistic")
score.append(model_grid.score(x_test, y_test))
# Final Comparison
pd.DataFrame({"Model": name, "Score": score}, index=[1, 2, 3, 4, 5]).plot(
kind="bar", x="Model", y="Score", ylabel="Score"
)
# # Spacy vectorization
# spaCy is a free, open-source library for NLP in Python written in Cython. spaCy is designed to make it easy to build systems for information extraction or general-purpose natural language processing.
data_df["N_sents"].value_counts()
data_df["vector"] = data_df["review"].apply(vector)
x = data_df["vector"]
y = data_df["N_sents"]
x1 = np.stack(x)
sm = SMOTE()
X_s, Y_s = sm.fit_resample(x1, y)
x_trainn, x_testt, y_trainn, y_testt = train_test_split(
X_s, Y_s, test_size=0.3, random_state=12
)
x_train_2d = np.stack(x_trainn)
x_test_2d = np.stack(x_testt)
x_train_2d.shape
from sklearn.utils import class_weight
class_weight = class_weight.compute_class_weight(
"balanced", classes=np.unique(Y_s), y=Y_s
)
class_weight
# ## Logistic Regression
a = OneVsRestClassifier(LogisticRegression())
a.get_params().keys()
from sklearn.preprocessing import MinMaxScaler
pipe5 = Pipeline(
[
("scaler", MinMaxScaler()),
("lr", OneVsRestClassifier(LogisticRegression(class_weight="balanced"))),
]
)
pipe5.fit(x_train_2d, y_trainn)
y_pred = pipe5.predict(x_test_2d)
print("The Accuracy of this model is ", accuracy_score(y_testt, y_pred) * 100, "%")
print(classification_report(y_pred, y_testt))
# plot_confusion_matrix(pipe5,x_test,y_test)
report = classification_report(y_testt, y_pred, output_dict=True)
df6 = pd.DataFrame(report).transpose().reset_index()
df6.insert(0, "Technique", "Spacy")
df6.insert(1, "Model", "LogisticRegression")
# Predicting on unknown data
text = "It appears to be decent set of blades but when you actually use them they are not very sturdy. Cuts come out angled and wobbly. You get what you pay for"
doc = nlp(text).vector
doc = doc.reshape(-1, 300)
label.inverse_transform(np.unique(data_df["N_sents"]))[pipe5.predict(doc)]
# ## RandomForestClassifier
RandomForestClassifier().get_params().keys()
pipe6 = Pipeline(
[
("scaler", MinMaxScaler()),
("rf", RandomForestClassifier(class_weight="balanced")),
]
)
pipe6.fit(x_train_2d, y_trainn)
y_pred = pipe6.predict(x_test_2d)
print("The Accuracy of this model is ", accuracy_score(y_testt, y_pred) * 100, "%")
print("\n", classification_report(y_pred, y_testt))
report = classification_report(y_testt, y_pred, output_dict=True)
df7 = pd.DataFrame(report).transpose().reset_index()
df7.insert(0, "Technique", "Spacy")
df7.insert(1, "Model", "RandomForestClassifier")
# Predicting on unknown data
text = " fine okay"
doc = nlp(text).vector
doc = doc.reshape(-1, 300)
label.inverse_transform(np.unique(data_df["N_sents"]))[pipe6.predict(doc)]
# Comapring different techniques and models
d2 = pd.concat([dd, df5, df6, df7], axis=0)
d3 = d2.groupby(["Technique", "Model", "index"])[
"precision", "recall", "f1-score"
].mean()
d3
# # Fasttext
# FastText is an open-source, free library from Facebook AI Research(FAIR) for learning word embeddings and word classifications. This model allows creating unsupervised learning or supervised learning algorithm for obtaining vector representations for words. It also evaluates these models.
#
import fasttext
from sklearn.model_selection import train_test_split
data_fast = data_df[["Sentiments", "review"]]
data_fast["sentiment"] = "__label__" + data_fast["Sentiments"].astype(str)
data_fast["sentiment_review"] = data_fast["sentiment"] + " " + data_fast["review"]
train_df, test_df = train_test_split(data_fast, test_size=0.2)
train_df.to_csv("Train", columns=["sentiment_review"], index=False, header=False)
test_df.to_csv("Test", columns=["sentiment_review"], index=False, header=False)
data_fast.head()
model = fasttext.train_supervised(input="Train", lr=0.2, epoch=20, wordNgrams=2)
test_num, precision, recall = model.test("Test")
f_score = (2 * precision * recall) / (precision + recall)
print("No. of Test Samples:", test_num)
print("Precision Score:", precision)
print("Recall:", recall)
print("F1 score:", f_score)
model.predict("half")
model.predict(" i am very disappointed with this product")
model.predict(" The product worked really well")
model.get_nearest_neighbors("disappointed")
model.predict(" product is working ")
# # Text Analysis:
d = pd.read_csv("Clean_data.csv")
data_d = d[
[
"review",
"price",
"unixReviewTime",
"overall",
"summary",
"main_cat",
"brand",
"title",
]
]
import spacy
from spacy.lang.en.stop_words import STOP_WORDS
data_d.dropna(inplace=True)
data_d["Reviews"] = data_d["review"].apply(
lambda words: " ".join(word for word in words.split() if word not in STOP_WORDS)
)
data_d["summary"] = data_d["summary"].apply(
lambda words: " ".join(word for word in words.split() if word not in STOP_WORDS)
)
stop_words = d["brand"].apply(lambda x: re.findall("[a-zA-Z]+", x[:10]))
stop_words_user_define = []
for i in stop_words:
for j in i:
if j not in stop_words_user_define:
stop_words_user_define.append(j)
else:
pass
unique_st_words = set(stop_words_user_define)
data_d["Reviews"] = data_d["review"].apply(
lambda words: " ".join(
word for word in words.split() if word not in unique_st_words
)
)
data_d.dropna(inplace=True)
import gensim
review_text = data_d["Reviews"].apply(gensim.utils.simple_preprocess)
model = gensim.models.Word2Vec(window=10, min_count=2)
model.build_vocab(review_text, progress_per=100)
model.train(review_text, total_examples=model.corpus_count, epochs=model.epochs)
pos_df = data_d[data_d["overall"] > 3]
nt_df = data_d[data_d["overall"] == 3]
neg_df = data_d[data_d["overall"] < 3]
review_text1 = pos_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count1 = Counter()
for i in review_text1:
for j in i:
count1[j] += 1
review_text2 = nt_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count2 = Counter()
for i in review_text2:
for j in i:
count2[j] += 1
review_text3 = neg_df["summary"].apply(gensim.utils.simple_preprocess)
from collections import Counter
count3 = Counter()
for i in review_text3:
for j in i:
count3[j] += 1
post_lst = [i[0] for i in count1.most_common(1500)]
neu_lst = [i[0] for i in count2.most_common(1500)]
neg_lst = [i[0] for i in count3.most_common(1500)]
neg_word = []
for i in neg_lst:
if i in (post_lst and neu_lst):
pass
else:
neg_word.append(i)
pos_word = []
for i in post_lst:
if i in (neg_lst and neu_lst):
pass
else:
pos_word.append(i)
neu_word = []
for i in neu_lst:
if i in (post_lst and neg_lst):
pass
else:
neu_word.append(i)
# # **Analyzing Text**
def rew_analyzer(st):
p = 0
n = 0
nu = 0
for i in st.split():
if i in pos_word:
# print(i,'po')
p += 1
elif i in neg_word:
n += 1
# print(i,'ne')
elif i in neu_word:
nu += 1
# print(i,'nu')
if p > n and p > nu:
return "Positive Review"
elif p == n and p == nu:
return "Positive Review"
elif n > p and n > nu:
return "Negetive Review"
elif p < n and n == nu:
return "Negative Review"
elif nu > p and nu > n:
return "Neutral Review"
elif p < n and p == nu:
return "Negative Review"
else:
return "Neutral"
data_d["Reviews Text Analysis"] = data_d["Reviews"].apply(rew_analyzer)
data_d["y_test"] = data_d["overall"].map({5: 3, 4: 3, 3: 2, 2: 1, 1: 1})
data_d["y_pred"] = data_d["Reviews Text Analysis"].map(
{"Positive Review": 3, "Neutral Review": 2, "Negetive Review": 1}
)
data_d.dropna(inplace=True)
accuracy_score(data_d["y_test"], data_d["y_pred"])
# # Word Cloud
from wordcloud import WordCloud
# This is an image composed of words used in a particular text or subject, in which the size of each word indicates its frequency or importance
# ### Postive Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(post_lst))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ### Negative Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(neg_word))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ### Neutral Words
wordcloud = WordCloud(
width=1600, height=1000, background_color="lightgrey", min_font_size=10
).generate(" ".join(neu_word))
# plot the WordCloud image
plt.figure(figsize=(12, 8), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# # **Translator**
from textblob import TextBlob
def score(a):
ans = str()
if a < 0.1:
ans = "Negative"
elif a > 0.1 and a < 0.5:
ans = "Neutral"
else:
ans = "Positive"
return print("This is a {0} statement".format(ans))
blob = TextBlob("This is a good product")
b = str(blob.translate(from_lang="en", to="fr"))
b
blob = TextBlob(b)
a = TextBlob(str(blob.translate(from_lang="fr", to="en")))
score(a.sentiment.polarity)
# # Analysis of customers and products
# We will see in detail about the top products and the customers
data1 = pd.get_dummies(data_df["verified"])
data_df = pd.concat([data_df, data1], axis=1)
data = pd.get_dummies(data_df["Sentiments"])
data_df = pd.concat([data_df, data], axis=1)
data_df["Negative"] = data_df["Negative"].replace(0, np.nan, regex=True)
data_df["Neutral"] = data_df["Neutral"].replace(0, np.nan, regex=True)
data_df["Positive"] = data_df["Positive"].replace(0, np.nan, regex=True)
data_df[0] = data_df[0].replace(0, np.nan, regex=True)
data_df[1] = data_df[1].replace(0, np.nan, regex=True)
# ### Product Analysis
cl1 = (
data_df.groupby(["asin"])
.agg(
Total_Sold=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
Price=("price", "mean"),
Sales=("price", "sum"),
Rank=("rank", "median"),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
)
.reset_index()
)
cl1["Main_Cat"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "main_cat"].unique()[0]
)
cl1["Brand"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "brand"].unique()[0]
)
cl1["Title"] = cl1["asin"].apply(
lambda x: data_df.loc[data_df["asin"] == x, "title"].unique()[0]
)
def product(
Column="Total_Sold",
sort=True,
main_cat="Tools & Home Improvement",
sold=20,
visual="df",
):
if visual == "plot":
cl1[(cl1["Main_Cat"] == main_cat) & (cl1["Total_Sold"] >= sold)].sort_values(
by=[Column], ascending=sort
).head().plot(x="asin", kind="bar", y=Column)
else:
display(
cl1[
(cl1["Main_Cat"] == main_cat) & (cl1["Total_Sold"] >= sold)
].sort_values(by=[Column], ascending=sort)
)
interact(
product,
Column=[x for x in cl1.columns],
sort=[True, False],
main_cat=[y for y in data_df["main_cat"].unique()],
sold=(1, 500, 10),
visual=["plot", "df"],
)
# ### Customers Analysis
cl2 = (
data_df.groupby(["reviewerID"])
.agg(
Item_bought=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
# Price=('price','mean'),
Amount=("price", "sum"),
# Rank=('rank','median'),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
)
.reset_index()
)
def customer(Column="Item_bought", sort=True, Bought=20, visual="df"):
if visual == "plot":
cl2.loc[cl2["Item_bought"] >= Bought].sort_values(
by=[Column], ascending=sort
).head().plot(x="reviewerID", kind="bar", y=Column)
else:
display(
cl2[cl2["Item_bought"] >= Bought].sort_values(by=[Column], ascending=sort)
)
interact(
customer,
Column=[x for x in cl2.columns],
sort=[True, False],
sold=(1, 500, 10),
visual=["plot", "df"],
)
# # **Product based classification**
import statistics as st
mode = lambda x: st.mode(x)
cl = (
data_df.groupby(["asin"])
.agg(
Total_Sold=(1, "count"),
Negative=("Negative", "count"),
Neutral=("Neutral", "count"),
Positive=("Positive", "count"),
Price=("price", "mean"),
Sales=("price", "sum"),
Rank=("rank", "median"),
Verified_True=(1, "count"),
Verified_False=(0, "count"),
Rating=("overall", mode),
)
.reset_index()
)
display(cl.head(2))
numerics = [
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
] # list of all numeric type in daataframe
con_df = cl.select_dtypes(include=numerics) # create dataframe for all numeric type
# con_df.drop('asin',axis=1,inplace=True)
con_cols = con_df.columns # taking colums
num_cols = 4 # how many column we want in subplot
n_bins = 20 # bin size for histogram
num_rows = int(len(con_cols) / num_cols) + 1 # rows for subplot
figs, axes = plt.subplots(num_rows, num_cols, tight_layout=True, figsize=(20, 8))
for col, ax in zip(con_cols, axes.flatten()):
sns.boxplot(y=con_df[col], ax=ax, color="tan")
ax.set_title(col)
plt.show()
numerics = [
"int16",
"int32",
"int64",
"float16",
"float32",
"float64",
] # list of all numeric type in daataframe
con_df = cl.select_dtypes(include=numerics) # create dataframe for all numeric type
# con_df.drop('asin',axis=1,inplace=True)
con_cols = con_df.columns # taking colums
num_cols = 4 # how many column we want in subplot
n_bins = 20 # bin size for histogram
num_rows = int(len(con_cols) / num_cols) + 1 # rows for subplot
figs, axes = plt.subplots(num_rows, num_cols, tight_layout=True, figsize=(20, 8))
for col, ax in zip(con_cols, axes.flatten()):
sns.histplot(x=con_df[col], ax=ax, color="tan")
ax.set_title(col)
plt.show()
cl.Rating.value_counts()
smote = SMOTE(sampling_strategy="auto")
X = cl.iloc[:, 1:9]
y = cl.Rating
X_s, Y_s = smote.fit_resample(X, y)
x = pd.concat([X_s, Y_s], axis=1)
x.Rating.value_counts()
from sklearn.tree import DecisionTreeClassifier # model
from sklearn.metrics import (
confusion_matrix,
accuracy_score,
recall_score,
f1_score,
precision_score,
) # to get the score of classifier
X_train, X_test, y_train, y_test = train_test_split(X_s, Y_s, train_size=0.7)
dt = DecisionTreeClassifier(random_state=1) # crating classifier
dt.fit(X_test, y_test) # fitting the train data
DT_pred = dt.predict(X_test) # predicting test data
DT_pred1 = dt.predict(X_train) # predicting train data
a_s = accuracy_score(
y_train, DT_pred1
) # gives the accuracy of model how well our classifier
r_s = recall_score(
y_train, DT_pred1, average="macro"
) # to get the recall score used predefined function
p_s = precision_score(
y_train, DT_pred1, average="macro"
) # to get the precision_score used predefined function
f1_s = f1_score(
y_train, DT_pred1, average="macro"
) # to get the f1_score used predefined function
print("Accuracy is:", a_s)
print("Recall_score is:", r_s)
print("Precision_score is:", p_s)
print("F1_score is:", f1_s)
a_s = accuracy_score(
y_test, DT_pred
) # gives the accuracy of model how well our classifier
r_s = recall_score(
y_test, DT_pred, average="macro"
) # to get the recall score used predefined function
p_s = precision_score(
y_test, DT_pred, average="macro"
) # to get the precision_score used predefined function
f1_s = f1_score(
y_test, DT_pred, average="macro"
) # to get the f1_score used predefined function
print("Accuracy is:", a_s)
print("Recall_score is:", r_s)
print("Precision_score is:", p_s)
print("F1_score is:", f1_s)
# # **Search Recommendation**
ans = pd.DataFrame(index=range(len(data_df)))
a = str(input("Enter the word :")).lower()
data_df["description"] = data_df["description"].apply(lambda x: str(x))
ans["index"] = data_df["description"].apply(
lambda x: re.findall(a, x)[0] if len(re.findall(a, x)) != 0 else np.nan
)
ans.dropna(inplace=True)
ans1 = data_df.loc[ans.index, :]
def recoomendation(column="Price", sort=True, n=5):
a = ans1.loc[ans1["overall"] > 3, :]
cl3 = (
a.groupby("asin")
.agg(
Total_Review=(1, "count"),
Rating=("overall", "mean"),
# Negative=('Negative','count'),
# Neutral=('Neutral','count'),
# Positive=('Positive','count'),
Price=("price", "mean"),
)
.reset_index()
)
cl3["brand"] = cl3["asin"].apply(
lambda x: a.loc[a["asin"] == x, "brand"].unique()[0]
)
if len(cl3) == 0:
print("Thanks for searching , please look for another product")
else:
display(cl3.sort_values(by=column, ascending=sort).head(n))
interact(
recoomendation, column=[i for i in cl3.columns], sort=[True, False], n=(1, 50, 1)
)
| false | 0 | 9,842 | 0 | 9,842 | 9,842 |
||
129107176
|
# Math Problem Categorization Community Challenge
# Overview:
# The goal of this competition is to develop a machine-learning model that can accurately categorize math problems based on the text of the problem. This challenge is an excellent opportunity to apply and sharpen your text classification skills in a unique context, that of Mathematical problems.
# Mathematics is a vast field with various sub-domains, each with its unique problem types and solving methods. While it's straightforward for humans, especially educators, to categorize math problems into their respective domains like algebra, calculus, statistics, etc., doing so automatically using machine learning can be a daunting task. However, such automated classification can make it easier for educators to curate problems and help students focus on specific math domains.
# Dataset Description:
# The dataset for this competition contains a collection of math problems in English, each labeled with their respective categories. These categories represent the mathematical domain they fall into.
# The dataset has been anonymized and randomized to maintain the fairness of the competition. It is split into training and test sets. The training set contains the problems and their labels, while the test set only contains the problems. Your task is to predict the labels.
# Columns
#
# problem : Math Problem
# category : Category code for Math Problem
#
# Importing Libraries
#
import pandas as pd # To handle csv files as dataframes
import numpy as np # To handle vector mathematical computations
import re # For Text preprocessing like pattern matching, removal etc
import matplotlib.pyplot as plt # For Data Visualisation
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Reading the dataset
train_df = pd.read_csv("/kaggle/input/math-problem-categorization/train.csv")
train_df.head() # Displaying first 5 records
train_df.tail() # Displaying last 5 records
train_df.sample(5) # Displaying random 5 rows from the train data
# Explorartory Data Analysis
train_df.shape
# Inference: It contains 250 Rows and 2 Columns [Problem, category]
train_df.info()
train_df.isna().sum()
# Inference: No Null Values in the dataset.
train_df["category"].unique()
train_df["category"].value_counts()
plt.pie(
train_df["category"].value_counts(),
labels=train_df["category"].value_counts().index,
)
plt.title("Distribution of Categories")
plt.show()
# Inference: There are 25 different categories and almost the data seems to be balanced with category 7 having the highest entries.
for i in sorted(train_df["category"].unique()):
print("----------Category{}---------\n".format(i))
problem = train_df[train_df["category"] == i].sample(2)["problem"]
for i in problem:
print(i)
print()
# Inference: Category 0 - mostly is about finding terms coefficients and constants - Algebra
# Text Preprocessing
# Define a function to perform text preprocessing
def preprocess_text(text):
# Remove punctuations
text = text.translate(str.maketrans("", "", string.punctuation))
# Convert to lowercase
text = text.lower()
# Remove numbers
text = re.sub("\d+", "", text)
# Remove stopwords
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
words = nltk.word_tokenize(text)
words = [word for word in words if word not in stop_words]
"""Lemmatize words
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]"""
# Join words back into a string
text = " ".join(words)
# Return preprocessed text
return text
# Apply text preprocessing to the text column in the DataFrame
train_df["problem"] = train_df["problem"].apply(preprocess_text)
# Train Test Split Stratified Sampling & TFIDF
# Split the data into training and testing sets using stratified sampling
X_train, X_test, y_train, y_test = train_test_split(
train_df["problem"],
train_df["category"],
test_size=0.3,
stratify=train_df["category"],
random_state=42,
)
text_corpus = ""
for i in range(len(X_train)):
text_corpus += X_train.iloc[i]
from sklearn.feature_extraction.text import TfidfVectorizer
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1])[::-1]
# Step 3: Select the k least frequent words.
k = 400
least_frequent_words = [w[0] for w in sorted_words[:k]]
len(word_freq)
tfidf_vectorizer = TfidfVectorizer(
vocabulary=[
"expression",
"many",
"system",
"number",
"match",
"write",
"following",
"total",
"equations",
"using",
"per",
"phrase",
"students",
"substitution",
"area",
"exploratory",
"terms",
"times",
"product",
"quotient",
"algebraic",
"represent",
"elimination",
"equation",
"phrasen",
]
)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
# Convert the text data into numerical features using TF-IDF vectorization
vectorizer = TfidfVectorizer()
X_train_vectorized = vectorizer.fit_transform(X_train)
X_test_vectorized = vectorizer.transform(X_test)
# Model Building
# Support Vector Machine Classifier
# Train a Support Vector Machine (SVM) classifier on the training data
SVMclassifier = SVC(kernel="linear", C=1, random_state=42)
SVMclassifier.fit(X_train_vectorized, y_train)
# Predict the categories of the testing data using the trained classifier
SVMy_pred = SVMclassifier.predict(X_test_vectorized)
# Evaluate the performance of the classifier using the classification report
accuracy_svm = accuracy_score(y_test, SVMy_pred)
print(accuracy_svm)
# Mulitnomial Naive Bayes
from sklearn.naive_bayes import MultinomialNB
NBclassifier = MultinomialNB()
NBclassifier.fit(X_train_vectorized, y_train)
# Test the classifier on the testing data and print the accuracy score and classification report
NBy_pred = NBclassifier.predict(X_test_vectorized)
print("Accuracy:", accuracy_score(y_test, NBy_pred))
# Enseble of Random Forest XG Boost LightGBM
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
# Define the base classifiers
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
print("Accuracy:", accuracy)
# Hyperparameter tuning
from sklearn.ensemble import (
VotingClassifier,
RandomForestClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
ExtraTreesClassifier,
)
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
# Create the individual models
rf = RandomForestClassifier(random_state=42)
xgb = XGBClassifier(random_state=42)
lgbm = LGBMClassifier(random_state=42)
ada = AdaBoostClassifier(random_state=42)
gb = GradientBoostingClassifier(random_state=42)
et = ExtraTreesClassifier(random_state=42)
# Create the voting classifier
voting = VotingClassifier(
estimators=[
("rf", rf),
("xgb", xgb),
("lgbm", lgbm),
("ada", ada),
("gb", gb),
("et", et),
],
voting="soft",
)
voting.fit(X_train_vectorized, y_train)
# Make predictions on the test set
y_pred = voting.predict(X_test_vectorized)
# Calculate the accuracy score
accuracy = accuracy_score(y_test, y_pred)
# Print the results
print("Test accuracy:", accuracy)
test_df = pd.read_csv("/kaggle/input/math-problem-categorization/test.csv")
test_df.head()
test_df.info()
test_df.drop_duplicates(subset="problem", keep="first")
test_df["problem"] = test_df["problem"].apply(preprocess_text)
XTest = test_df["problem"]
XTest = vectorizer.transform(XTest)
YTestPred = ensemble_classifier.predict(XTest)
test_df.head()
test_df["category"] = YTestPred
test_df.sample(10)
test_df.to_csv("/kaggle/working/Submission_Baseline_0.77.csv", index=False)
len(YTestPred)
test_df.drop_duplicates(subset="problem", keep="first", inplace=True)
test_df.shape
test_df.to_csv("/kaggle/working/submission.csv", index=False)
len(text_corpus.split())
from sklearn.feature_extraction.text import TfidfVectorizer
dic = {}
for i in range(1, len(text_corpus.split())):
# print("------------------------------------\n")
# print("no of words:",i)
# print()
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1]) # [::-1]
# Step 3: Select the k least frequent words.
k = i
least_frequent_words = [w[0] for w in sorted_words[:k]]
tfidf_vectorizer = TfidfVectorizer(vocabulary=least_frequent_words)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
# print("Accuracy:", accuracy)
dic[i] = accuracy
from sklearn.feature_extraction.text import TfidfVectorizer
dicr = {}
for i in range(1, len(text_corpus.split())):
# print("------------------------------------\n")
# print("no of words:",i)
# print()
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1])[::-1]
# Step 3: Select the k least frequent words.
k = i
least_frequent_words = [w[0] for w in sorted_words[:k]]
tfidf_vectorizer = TfidfVectorizer(vocabulary=least_frequent_words)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
# print("Accuracy:", accuracy)
dicr[i] = accuracy
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/107/129107176.ipynb
| null | null |
[{"Id": 129107176, "ScriptId": 38356443, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6126321, "CreationDate": "05/11/2023 04:11:29", "VersionNumber": 1.0, "Title": "KaggleCommunity_MathCategoryClassification", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 354.0, "LinesInsertedFromPrevious": 354.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Math Problem Categorization Community Challenge
# Overview:
# The goal of this competition is to develop a machine-learning model that can accurately categorize math problems based on the text of the problem. This challenge is an excellent opportunity to apply and sharpen your text classification skills in a unique context, that of Mathematical problems.
# Mathematics is a vast field with various sub-domains, each with its unique problem types and solving methods. While it's straightforward for humans, especially educators, to categorize math problems into their respective domains like algebra, calculus, statistics, etc., doing so automatically using machine learning can be a daunting task. However, such automated classification can make it easier for educators to curate problems and help students focus on specific math domains.
# Dataset Description:
# The dataset for this competition contains a collection of math problems in English, each labeled with their respective categories. These categories represent the mathematical domain they fall into.
# The dataset has been anonymized and randomized to maintain the fairness of the competition. It is split into training and test sets. The training set contains the problems and their labels, while the test set only contains the problems. Your task is to predict the labels.
# Columns
#
# problem : Math Problem
# category : Category code for Math Problem
#
# Importing Libraries
#
import pandas as pd # To handle csv files as dataframes
import numpy as np # To handle vector mathematical computations
import re # For Text preprocessing like pattern matching, removal etc
import matplotlib.pyplot as plt # For Data Visualisation
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import SVC
from sklearn.metrics import accuracy_score
# Reading the dataset
train_df = pd.read_csv("/kaggle/input/math-problem-categorization/train.csv")
train_df.head() # Displaying first 5 records
train_df.tail() # Displaying last 5 records
train_df.sample(5) # Displaying random 5 rows from the train data
# Explorartory Data Analysis
train_df.shape
# Inference: It contains 250 Rows and 2 Columns [Problem, category]
train_df.info()
train_df.isna().sum()
# Inference: No Null Values in the dataset.
train_df["category"].unique()
train_df["category"].value_counts()
plt.pie(
train_df["category"].value_counts(),
labels=train_df["category"].value_counts().index,
)
plt.title("Distribution of Categories")
plt.show()
# Inference: There are 25 different categories and almost the data seems to be balanced with category 7 having the highest entries.
for i in sorted(train_df["category"].unique()):
print("----------Category{}---------\n".format(i))
problem = train_df[train_df["category"] == i].sample(2)["problem"]
for i in problem:
print(i)
print()
# Inference: Category 0 - mostly is about finding terms coefficients and constants - Algebra
# Text Preprocessing
# Define a function to perform text preprocessing
def preprocess_text(text):
# Remove punctuations
text = text.translate(str.maketrans("", "", string.punctuation))
# Convert to lowercase
text = text.lower()
# Remove numbers
text = re.sub("\d+", "", text)
# Remove stopwords
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
words = nltk.word_tokenize(text)
words = [word for word in words if word not in stop_words]
"""Lemmatize words
lemmatizer = WordNetLemmatizer()
words = [lemmatizer.lemmatize(word) for word in words]"""
# Join words back into a string
text = " ".join(words)
# Return preprocessed text
return text
# Apply text preprocessing to the text column in the DataFrame
train_df["problem"] = train_df["problem"].apply(preprocess_text)
# Train Test Split Stratified Sampling & TFIDF
# Split the data into training and testing sets using stratified sampling
X_train, X_test, y_train, y_test = train_test_split(
train_df["problem"],
train_df["category"],
test_size=0.3,
stratify=train_df["category"],
random_state=42,
)
text_corpus = ""
for i in range(len(X_train)):
text_corpus += X_train.iloc[i]
from sklearn.feature_extraction.text import TfidfVectorizer
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1])[::-1]
# Step 3: Select the k least frequent words.
k = 400
least_frequent_words = [w[0] for w in sorted_words[:k]]
len(word_freq)
tfidf_vectorizer = TfidfVectorizer(
vocabulary=[
"expression",
"many",
"system",
"number",
"match",
"write",
"following",
"total",
"equations",
"using",
"per",
"phrase",
"students",
"substitution",
"area",
"exploratory",
"terms",
"times",
"product",
"quotient",
"algebraic",
"represent",
"elimination",
"equation",
"phrasen",
]
)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
# Convert the text data into numerical features using TF-IDF vectorization
vectorizer = TfidfVectorizer()
X_train_vectorized = vectorizer.fit_transform(X_train)
X_test_vectorized = vectorizer.transform(X_test)
# Model Building
# Support Vector Machine Classifier
# Train a Support Vector Machine (SVM) classifier on the training data
SVMclassifier = SVC(kernel="linear", C=1, random_state=42)
SVMclassifier.fit(X_train_vectorized, y_train)
# Predict the categories of the testing data using the trained classifier
SVMy_pred = SVMclassifier.predict(X_test_vectorized)
# Evaluate the performance of the classifier using the classification report
accuracy_svm = accuracy_score(y_test, SVMy_pred)
print(accuracy_svm)
# Mulitnomial Naive Bayes
from sklearn.naive_bayes import MultinomialNB
NBclassifier = MultinomialNB()
NBclassifier.fit(X_train_vectorized, y_train)
# Test the classifier on the testing data and print the accuracy score and classification report
NBy_pred = NBclassifier.predict(X_test_vectorized)
print("Accuracy:", accuracy_score(y_test, NBy_pred))
# Enseble of Random Forest XG Boost LightGBM
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
# Define the base classifiers
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
print("Accuracy:", accuracy)
# Hyperparameter tuning
from sklearn.ensemble import (
VotingClassifier,
RandomForestClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
ExtraTreesClassifier,
)
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import accuracy_score
# Create the individual models
rf = RandomForestClassifier(random_state=42)
xgb = XGBClassifier(random_state=42)
lgbm = LGBMClassifier(random_state=42)
ada = AdaBoostClassifier(random_state=42)
gb = GradientBoostingClassifier(random_state=42)
et = ExtraTreesClassifier(random_state=42)
# Create the voting classifier
voting = VotingClassifier(
estimators=[
("rf", rf),
("xgb", xgb),
("lgbm", lgbm),
("ada", ada),
("gb", gb),
("et", et),
],
voting="soft",
)
voting.fit(X_train_vectorized, y_train)
# Make predictions on the test set
y_pred = voting.predict(X_test_vectorized)
# Calculate the accuracy score
accuracy = accuracy_score(y_test, y_pred)
# Print the results
print("Test accuracy:", accuracy)
test_df = pd.read_csv("/kaggle/input/math-problem-categorization/test.csv")
test_df.head()
test_df.info()
test_df.drop_duplicates(subset="problem", keep="first")
test_df["problem"] = test_df["problem"].apply(preprocess_text)
XTest = test_df["problem"]
XTest = vectorizer.transform(XTest)
YTestPred = ensemble_classifier.predict(XTest)
test_df.head()
test_df["category"] = YTestPred
test_df.sample(10)
test_df.to_csv("/kaggle/working/Submission_Baseline_0.77.csv", index=False)
len(YTestPred)
test_df.drop_duplicates(subset="problem", keep="first", inplace=True)
test_df.shape
test_df.to_csv("/kaggle/working/submission.csv", index=False)
len(text_corpus.split())
from sklearn.feature_extraction.text import TfidfVectorizer
dic = {}
for i in range(1, len(text_corpus.split())):
# print("------------------------------------\n")
# print("no of words:",i)
# print()
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1]) # [::-1]
# Step 3: Select the k least frequent words.
k = i
least_frequent_words = [w[0] for w in sorted_words[:k]]
tfidf_vectorizer = TfidfVectorizer(vocabulary=least_frequent_words)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
# print("Accuracy:", accuracy)
dic[i] = accuracy
from sklearn.feature_extraction.text import TfidfVectorizer
dicr = {}
for i in range(1, len(text_corpus.split())):
# print("------------------------------------\n")
# print("no of words:",i)
# print()
# Step 1: Compute the frequency of each word in the corpus.
word_freq = {}
for doc in text_corpus.split():
if doc not in word_freq and len(doc) > 2:
word_freq[doc] = 0
if doc in word_freq:
word_freq[doc] += 1
# Step 2: Sort the words based on their frequency in ascending order.
sorted_words = sorted(word_freq.items(), key=lambda x: x[1])[::-1]
# Step 3: Select the k least frequent words.
k = i
least_frequent_words = [w[0] for w in sorted_words[:k]]
tfidf_vectorizer = TfidfVectorizer(vocabulary=least_frequent_words)
# Step 5: Use the TfidfVectorizer to transform your text corpus.
X_train_vectorized = tfidf_vectorizer.fit_transform(X_train)
X_test_vectorized = tfidf_vectorizer.transform(X_test)
rf_classifier = RandomForestClassifier()
xgb_classifier = XGBClassifier()
lgb_classifier = LGBMClassifier()
# Define the ensemble classifier using majority voting
ensemble_classifier = VotingClassifier(
estimators=[
("rf", rf_classifier),
("xgb", xgb_classifier),
("lgb", lgb_classifier),
],
voting="soft",
)
# Train the ensemble classifier on the training data
ensemble_classifier.fit(X_train_vectorized, y_train)
# Evaluate the ensemble classifier on the testing data
accuracy = ensemble_classifier.score(X_test_vectorized, y_test)
# print("Accuracy:", accuracy)
dicr[i] = accuracy
| false | 0 | 3,492 | 0 | 3,492 | 3,492 |
||
129107608
|
<jupyter_start><jupyter_text>The Movies Dataset
### Context
These files contain metadata for all 45,000 movies listed in the Full MovieLens Dataset. The dataset consists of movies released on or before July 2017. Data points include cast, crew, plot keywords, budget, revenue, posters, release dates, languages, production companies, countries, TMDB vote counts and vote averages.
This dataset also has files containing 26 million ratings from 270,000 users for all 45,000 movies. Ratings are on a scale of 1-5 and have been obtained from the official GroupLens website.
### Content
This dataset consists of the following files:
**movies_metadata.csv:** The main Movies Metadata file. Contains information on 45,000 movies featured in the Full MovieLens dataset. Features include posters, backdrops, budget, revenue, release dates, languages, production countries and companies.
**keywords.csv:** Contains the movie plot keywords for our MovieLens movies. Available in the form of a stringified JSON Object.
**credits.csv:** Consists of Cast and Crew Information for all our movies. Available in the form of a stringified JSON Object.
**links.csv:** The file that contains the TMDB and IMDB IDs of all the movies featured in the Full MovieLens dataset.
**links_small.csv:** Contains the TMDB and IMDB IDs of a small subset of 9,000 movies of the Full Dataset.
**ratings_small.csv:** The subset of 100,000 ratings from 700 users on 9,000 movies.
The Full MovieLens Dataset consisting of 26 million ratings and 750,000 tag applications from 270,000 users on all the 45,000 movies in this dataset can be accessed [here](https://grouplens.org/datasets/movielens/latest/)
Kaggle dataset identifier: the-movies-dataset
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Kaggle directory
dir = "/kaggle/input/the-movies-dataset/"
# Load datasets
ratings = pd.read_csv(dir + "ratings_small.csv")
metadata = pd.read_csv(dir + "movies_metadata.csv", low_memory=False)
metadata = metadata.rename(columns={"id": "movieId"})
metadata["movieId"] = pd.to_numeric(metadata["movieId"], errors="coerce")
# Merge the two DataFrames on the 'movieId' column
combined_data = pd.merge(ratings, metadata, on="movieId")
# Filter users who have rated 20 or more movies
user_rating_counts = combined_data.groupby("userId")["rating"].count()
active_users = user_rating_counts[user_rating_counts >= 20].index
import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Convert 'release_date' to datetime format
combined_data["release_date"] = pd.to_datetime(
combined_data["release_date"], errors="coerce"
)
# Convert 'genres' from JSON format to a list of genres
def parse_genres(genres_str):
genres = json.loads(genres_str.replace("'", '"'))
genres_list = [g["name"] for g in genres]
return genres_list
combined_data["genres"] = combined_data["genres"].apply(parse_genres)
# Extract year from 'release_date' and create 'release_year' column
combined_data["release_year"] = combined_data["release_date"].dt.year
df = combined_data[combined_data["userId"].isin(active_users)]
df.columns
df2 = df
# Plot distribution of ratings
plt.figure(figsize=(10, 5))
sns.histplot(combined_data["rating"], bins=20, kde=False)
plt.title("Distribution of Ratings")
plt.xlabel("Rating")
plt.ylabel("Count")
plt.show()
# distribution of movie release years
plt.figure(figsize=(10, 5))
sns.histplot(combined_data["release_year"], bins=np.arange(1900, 2023, 1))
plt.title("Distribution of Movie Release Years")
plt.xlabel("Release Year")
plt.ylabel("Count")
plt.show()
# number of movies for each genre
all_genres = np.concatenate(combined_data["genres"].values)
unique_genres, counts = np.unique(all_genres, return_counts=True)
genre_counts = pd.DataFrame({"genre": unique_genres, "count": counts}).sort_values(
by="count", ascending=False
)
# distribution of genres
plt.figure(figsize=(10, 5))
sns.barplot(x="genre", y="count", data=genre_counts)
plt.title("Distribution of Genres")
plt.xlabel("Genre")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
# Plot distribution of languages
plt.figure(figsize=(10, 5))
sns.countplot(data=combined_data, x="original_language")
plt.title("Distribution of Original Language")
plt.xlabel("Language")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
df2
df2.columns
df2 = df2.drop_duplicates(subset=["userId", "movieId"])
wide_df = df2.pivot(index="userId", columns="movieId", values="rating")
wide_df
wide_df_drop = wide_df.dropna(thresh=80, axis=1)
mean_rate = wide_df_drop.mean()
wide_df = wide_df_drop.fillna(mean_rate)
wide_df
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
# df2['release_year'] = pd.to_datetime(df2['release_date']).dt.year
# var = ['userId','movieId','rating', 'release_year', 'genres']
pca = PCA(n_components=3)
data_pca = pca.fit_transform(wide_df)
k = 8
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(data_pca)
clusters = kmeans.predict(data_pca)
wide_df["cluster"] = clusters
cluster_counts = result_df["cluster"].value_counts()
print(cluster_counts)
print(wide_df)
# adopted from: https://pythonprogramminglanguage.com/kmeans-elbow-method/
from scipy.spatial.distance import cdist
# Standardize the PCA-transformed data
scaler = StandardScaler()
scaled_data_pca = scaler.fit_transform(data_pca)
# Set range of clusters to consider
K = range(1, 10)
# Run KMeans for each value of K and calculate performance
performance = []
for k in K:
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_data_pca)
performance.append(kmeans.inertia_)
# Plot the elbow
plt.plot(K, performance, "bx-")
plt.xlabel("k")
plt.ylabel("Inertia")
plt.title("The Elbow Method showing the optimal k")
plt.show()
plt.figure(figsize=(10, 7))
plt.scatter(data_pca[:, 0], data_pca[:, 1], c=clusters, cmap="viridis")
plt.title("PCA 1 vs PCA 2 with Clusters")
plt.xlabel("PCA 1")
plt.ylabel("PCA 2")
plt.show()
centroids = kmeans.cluster_centers_
from mpl_toolkits.mplot3d import Axes3D
# Creating the 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(data_pca[:, 0], data_pca[:, 1], data_pca[:, 2], c=clusters, cmap="viridis")
ax.set_title("PCA 1 vs PCA 2 vs PCA 3 with Clusters")
ax.set_xlabel("PCA 1")
ax.set_ylabel("PCA 2")
ax.set_zlabel("PCA 3")
plt.show()
explained_var = pca.explained_variance_ratio_
# Creating the bar chart
plt.bar(range(1, len(explained_var) + 1), explained_var)
plt.title("Explained Variance by Principal Component")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
feature_weights = pd.DataFrame(data_pca, columns=["PC1", "PC2", "PC3"])
feature_weights.index = wide_df.index
feature_weights["cluster"] = clusters
mean_feature_weights = feature_weights.groupby("cluster").mean()
sorted_weights = mean_feature_weights.abs().sort_values(by="PC1", ascending=False)
for cluster in sorted_weights.index:
print(f"Cluster {cluster}")
print(sorted_weights.loc[cluster][:3])
print()
df2
wide_df
wide_df_reset = wide_df.reset_index()
wide_df_reset
merged_df = df2.merge(wide_df_reset[["userId", "cluster"]], on="userId", how="left")
merged_df.columns
# genre counts by cluster
merged_df["genres"] = merged_df["genres"].str.split(",")
genre_counts = merged_df.explode("genres").groupby("cluster")["genres"].value_counts()
print(genre_counts)
# average rating by cluster
avg_rating = merged_df.groupby("cluster")["rating"].mean().sort_values(ascending=False)
print(avg_rating)
most_popular = merged_df.loc[merged_df.groupby("cluster")["vote_count"].idxmax()][
["cluster", "title"]
]
print(most_popular)
avg_budget_revenue = merged_df.groupby("cluster")[["budget", "revenue"]].mean()
print(avg_budget_revenue)
adult_counts = merged_df.groupby("cluster")["adult"].count()
adult_counts
avg_revenue = merged_df.groupby("cluster")["revenue"].mean()
sorted_avg_revenue = avg_revenue.sort_values(ascending=False)
print(sorted_avg_revenue)
most_common_genre = (
merged_df.explode("genres")
.groupby("cluster")["genres"]
.agg(lambda x: x.value_counts().index[0])
)
print(most_common_genre)
avg_release_year = merged_df.groupby("cluster")["release_year"].mean()
print(avg_release_year)
avg_runtime = merged_df.groupby("cluster")["runtime"].mean()
print(avg_runtime)
most_common_language = merged_df.groupby("cluster")["original_language"].agg(
lambda x: x.value_counts().index[0]
)
print(most_common_language)
top_languages = merged_df.groupby("cluster")["original_language"].apply(
lambda x: x.value_counts().nlargest(5)
)
print(top_languages)
top_grossing = (
merged_df.sort_values("revenue", ascending=False)
.groupby("cluster")
.head(8)[["cluster", "title", "revenue"]]
)
print(top_grossing)
top_budgets = (
merged_df.sort_values("budget", ascending=False)
.groupby("cluster")
.head(5)[["cluster", "title", "budget"]]
)
print(top_budgets)
top_ratings = (
merged_df.sort_values("rating", ascending=False)
.groupby("cluster")
.head(5)[["cluster", "title", "rating"]]
)
print(top_ratings)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/107/129107608.ipynb
|
the-movies-dataset
|
rounakbanik
|
[{"Id": 129107608, "ScriptId": 38157186, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8181938, "CreationDate": "05/11/2023 04:17:50", "VersionNumber": 6.0, "Title": "Project4", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 262.0, "LinesInsertedFromPrevious": 137.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 125.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184870498, "KernelVersionId": 129107608, "SourceDatasetVersionId": 6663}]
|
[{"Id": 6663, "DatasetId": 3405, "DatasourceVersionId": 6663, "CreatorUserId": 927562, "LicenseName": "CC0: Public Domain", "CreationDate": "11/10/2017 02:40:38", "VersionNumber": 7.0, "Title": "The Movies Dataset", "Slug": "the-movies-dataset", "Subtitle": "Metadata on over 45,000 movies. 26 million ratings from over 270,000 users.", "Description": "### Context\n\nThese files contain metadata for all 45,000 movies listed in the Full MovieLens Dataset. The dataset consists of movies released on or before July 2017. Data points include cast, crew, plot keywords, budget, revenue, posters, release dates, languages, production companies, countries, TMDB vote counts and vote averages.\n\nThis dataset also has files containing 26 million ratings from 270,000 users for all 45,000 movies. Ratings are on a scale of 1-5 and have been obtained from the official GroupLens website.\n\n\n### Content\n\nThis dataset consists of the following files:\n\n**movies_metadata.csv:** The main Movies Metadata file. Contains information on 45,000 movies featured in the Full MovieLens dataset. Features include posters, backdrops, budget, revenue, release dates, languages, production countries and companies.\n\n**keywords.csv:** Contains the movie plot keywords for our MovieLens movies. Available in the form of a stringified JSON Object.\n\n**credits.csv:** Consists of Cast and Crew Information for all our movies. Available in the form of a stringified JSON Object.\n\n**links.csv:** The file that contains the TMDB and IMDB IDs of all the movies featured in the Full MovieLens dataset.\n\n**links_small.csv:** Contains the TMDB and IMDB IDs of a small subset of 9,000 movies of the Full Dataset.\n\n**ratings_small.csv:** The subset of 100,000 ratings from 700 users on 9,000 movies.\n\nThe Full MovieLens Dataset consisting of 26 million ratings and 750,000 tag applications from 270,000 users on all the 45,000 movies in this dataset can be accessed [here](https://grouplens.org/datasets/movielens/latest/) \n\n### Acknowledgements\n\nThis dataset is an ensemble of data collected from TMDB and GroupLens.\nThe Movie Details, Credits and Keywords have been collected from the TMDB Open API. This product uses the TMDb API but is not endorsed or certified by TMDb. Their API also provides access to data on many additional movies, actors and actresses, crew members, and TV shows. You can try it for yourself [here](https://www.themoviedb.org/documentation/api).\n\nThe Movie Links and Ratings have been obtained from the Official GroupLens website. The files are a part of the dataset available [here](https://grouplens.org/datasets/movielens/latest/)\n\n\n\n\n### Inspiration\n\nThis dataset was assembled as part of my second Capstone Project for Springboard's [Data Science Career Track](https://www.springboard.com/workshops/data-science-career-track). I wanted to perform an extensive EDA on Movie Data to narrate the history and the story of Cinema and use this metadata in combination with MovieLens ratings to build various types of Recommender Systems.\n\nBoth my notebooks are available as kernels with this dataset: [The Story of Film](https://www.kaggle.com/rounakbanik/the-story-of-film) and [Movie Recommender Systems](https://www.kaggle.com/rounakbanik/movie-recommender-systems)\n\nSome of the things you can do with this dataset:\nPredicting movie revenue and/or movie success based on a certain metric. What movies tend to get higher vote counts and vote averages on TMDB? Building Content Based and Collaborative Filtering Based Recommendation Engines.", "VersionNotes": "Add Full Ratings", "TotalCompressedBytes": 943755800.0, "TotalUncompressedBytes": 943755800.0}]
|
[{"Id": 3405, "CreatorUserId": 927562, "OwnerUserId": 927562.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6663.0, "CurrentDatasourceVersionId": 6663.0, "ForumId": 8425, "Type": 2, "CreationDate": "10/24/2017 18:53:43", "LastActivityDate": "02/06/2018", "TotalViews": 1496610, "TotalDownloads": 295134, "TotalVotes": 3284, "TotalKernels": 475}]
|
[{"Id": 927562, "UserName": "rounakbanik", "DisplayName": "Rounak Banik", "RegisterDate": "02/23/2017", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
# Kaggle directory
dir = "/kaggle/input/the-movies-dataset/"
# Load datasets
ratings = pd.read_csv(dir + "ratings_small.csv")
metadata = pd.read_csv(dir + "movies_metadata.csv", low_memory=False)
metadata = metadata.rename(columns={"id": "movieId"})
metadata["movieId"] = pd.to_numeric(metadata["movieId"], errors="coerce")
# Merge the two DataFrames on the 'movieId' column
combined_data = pd.merge(ratings, metadata, on="movieId")
# Filter users who have rated 20 or more movies
user_rating_counts = combined_data.groupby("userId")["rating"].count()
active_users = user_rating_counts[user_rating_counts >= 20].index
import json
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
# Convert 'release_date' to datetime format
combined_data["release_date"] = pd.to_datetime(
combined_data["release_date"], errors="coerce"
)
# Convert 'genres' from JSON format to a list of genres
def parse_genres(genres_str):
genres = json.loads(genres_str.replace("'", '"'))
genres_list = [g["name"] for g in genres]
return genres_list
combined_data["genres"] = combined_data["genres"].apply(parse_genres)
# Extract year from 'release_date' and create 'release_year' column
combined_data["release_year"] = combined_data["release_date"].dt.year
df = combined_data[combined_data["userId"].isin(active_users)]
df.columns
df2 = df
# Plot distribution of ratings
plt.figure(figsize=(10, 5))
sns.histplot(combined_data["rating"], bins=20, kde=False)
plt.title("Distribution of Ratings")
plt.xlabel("Rating")
plt.ylabel("Count")
plt.show()
# distribution of movie release years
plt.figure(figsize=(10, 5))
sns.histplot(combined_data["release_year"], bins=np.arange(1900, 2023, 1))
plt.title("Distribution of Movie Release Years")
plt.xlabel("Release Year")
plt.ylabel("Count")
plt.show()
# number of movies for each genre
all_genres = np.concatenate(combined_data["genres"].values)
unique_genres, counts = np.unique(all_genres, return_counts=True)
genre_counts = pd.DataFrame({"genre": unique_genres, "count": counts}).sort_values(
by="count", ascending=False
)
# distribution of genres
plt.figure(figsize=(10, 5))
sns.barplot(x="genre", y="count", data=genre_counts)
plt.title("Distribution of Genres")
plt.xlabel("Genre")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
# Plot distribution of languages
plt.figure(figsize=(10, 5))
sns.countplot(data=combined_data, x="original_language")
plt.title("Distribution of Original Language")
plt.xlabel("Language")
plt.ylabel("Count")
plt.xticks(rotation=90)
plt.show()
df2
df2.columns
df2 = df2.drop_duplicates(subset=["userId", "movieId"])
wide_df = df2.pivot(index="userId", columns="movieId", values="rating")
wide_df
wide_df_drop = wide_df.dropna(thresh=80, axis=1)
mean_rate = wide_df_drop.mean()
wide_df = wide_df_drop.fillna(mean_rate)
wide_df
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
# df2['release_year'] = pd.to_datetime(df2['release_date']).dt.year
# var = ['userId','movieId','rating', 'release_year', 'genres']
pca = PCA(n_components=3)
data_pca = pca.fit_transform(wide_df)
k = 8
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(data_pca)
clusters = kmeans.predict(data_pca)
wide_df["cluster"] = clusters
cluster_counts = result_df["cluster"].value_counts()
print(cluster_counts)
print(wide_df)
# adopted from: https://pythonprogramminglanguage.com/kmeans-elbow-method/
from scipy.spatial.distance import cdist
# Standardize the PCA-transformed data
scaler = StandardScaler()
scaled_data_pca = scaler.fit_transform(data_pca)
# Set range of clusters to consider
K = range(1, 10)
# Run KMeans for each value of K and calculate performance
performance = []
for k in K:
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_data_pca)
performance.append(kmeans.inertia_)
# Plot the elbow
plt.plot(K, performance, "bx-")
plt.xlabel("k")
plt.ylabel("Inertia")
plt.title("The Elbow Method showing the optimal k")
plt.show()
plt.figure(figsize=(10, 7))
plt.scatter(data_pca[:, 0], data_pca[:, 1], c=clusters, cmap="viridis")
plt.title("PCA 1 vs PCA 2 with Clusters")
plt.xlabel("PCA 1")
plt.ylabel("PCA 2")
plt.show()
centroids = kmeans.cluster_centers_
from mpl_toolkits.mplot3d import Axes3D
# Creating the 3D plot
fig = plt.figure()
ax = fig.add_subplot(111, projection="3d")
ax.scatter(data_pca[:, 0], data_pca[:, 1], data_pca[:, 2], c=clusters, cmap="viridis")
ax.set_title("PCA 1 vs PCA 2 vs PCA 3 with Clusters")
ax.set_xlabel("PCA 1")
ax.set_ylabel("PCA 2")
ax.set_zlabel("PCA 3")
plt.show()
explained_var = pca.explained_variance_ratio_
# Creating the bar chart
plt.bar(range(1, len(explained_var) + 1), explained_var)
plt.title("Explained Variance by Principal Component")
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance Ratio")
plt.show()
feature_weights = pd.DataFrame(data_pca, columns=["PC1", "PC2", "PC3"])
feature_weights.index = wide_df.index
feature_weights["cluster"] = clusters
mean_feature_weights = feature_weights.groupby("cluster").mean()
sorted_weights = mean_feature_weights.abs().sort_values(by="PC1", ascending=False)
for cluster in sorted_weights.index:
print(f"Cluster {cluster}")
print(sorted_weights.loc[cluster][:3])
print()
df2
wide_df
wide_df_reset = wide_df.reset_index()
wide_df_reset
merged_df = df2.merge(wide_df_reset[["userId", "cluster"]], on="userId", how="left")
merged_df.columns
# genre counts by cluster
merged_df["genres"] = merged_df["genres"].str.split(",")
genre_counts = merged_df.explode("genres").groupby("cluster")["genres"].value_counts()
print(genre_counts)
# average rating by cluster
avg_rating = merged_df.groupby("cluster")["rating"].mean().sort_values(ascending=False)
print(avg_rating)
most_popular = merged_df.loc[merged_df.groupby("cluster")["vote_count"].idxmax()][
["cluster", "title"]
]
print(most_popular)
avg_budget_revenue = merged_df.groupby("cluster")[["budget", "revenue"]].mean()
print(avg_budget_revenue)
adult_counts = merged_df.groupby("cluster")["adult"].count()
adult_counts
avg_revenue = merged_df.groupby("cluster")["revenue"].mean()
sorted_avg_revenue = avg_revenue.sort_values(ascending=False)
print(sorted_avg_revenue)
most_common_genre = (
merged_df.explode("genres")
.groupby("cluster")["genres"]
.agg(lambda x: x.value_counts().index[0])
)
print(most_common_genre)
avg_release_year = merged_df.groupby("cluster")["release_year"].mean()
print(avg_release_year)
avg_runtime = merged_df.groupby("cluster")["runtime"].mean()
print(avg_runtime)
most_common_language = merged_df.groupby("cluster")["original_language"].agg(
lambda x: x.value_counts().index[0]
)
print(most_common_language)
top_languages = merged_df.groupby("cluster")["original_language"].apply(
lambda x: x.value_counts().nlargest(5)
)
print(top_languages)
top_grossing = (
merged_df.sort_values("revenue", ascending=False)
.groupby("cluster")
.head(8)[["cluster", "title", "revenue"]]
)
print(top_grossing)
top_budgets = (
merged_df.sort_values("budget", ascending=False)
.groupby("cluster")
.head(5)[["cluster", "title", "budget"]]
)
print(top_budgets)
top_ratings = (
merged_df.sort_values("rating", ascending=False)
.groupby("cluster")
.head(5)[["cluster", "title", "rating"]]
)
print(top_ratings)
| false | 0 | 2,504 | 0 | 2,967 | 2,504 |
||
129093444
|
<jupyter_start><jupyter_text>Smoking Dataset from UK
```
Survey data on smoking habits from the United Kingdom. The data set can be used for analyzing the demographic characteristics of smokers and types of tobacco consumed. A data frame with 1691 observations on the following 12 variables.
```
| Column | Description |
| --- | --- |
| gender | Gender with levels Female and Male. |
| age | Age. |
| marital_status | Marital status with levels Divorced, Married, Separated, Single and Widowed. |
| highest_qualification | Highest education level with levels A Levels, Degree, GCSE/CSE, GCSE/O Level, Higher/Sub Degree, No Qualification, ONC/BTEC and Other/Sub Degree |
| nationality | Nationality with levels British, English, Irish, Scottish, Welsh, Other, Refused and Unknown. |
| ethnicity | Ethnicity with levels Asian, Black, Chinese, Mixed, White and Refused Unknown. |
| gross_income | Gross income with levels Under 2,600, 2,600 to 5,200, 5,200 to 10,400, 10,400 to 15,600, 15,600 to 20,800, 20,800 to 28,600, 28,600 to 36,400, Above 36,400, Refused and Unknown. |
| region | Region with levels London, Midlands & East Anglia, Scotland, South East, South West, The North and Wales |
| smoke | Smoking status with levels No and Yes |
| amt_weekends | Number of cigarettes smoked per day on weekends. |
| amt_weekdays | Number of cigarettes smoked per day on weekdays. |
| type | Type of cigarettes smoked with levels Packets, Hand-Rolled, Both/Mainly Packets and Both/Mainly Hand-Rolled
|
# Source
National STEM Centre, Large Datasets from stats4schools, https://www.stem.org.uk/resources/elibrary/resource/28452/large-datasets-stats4schools.
Kaggle dataset identifier: smoking-dataset-from-uk
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# import plotly.express as px
df = pd.read_csv("/kaggle/input/smoking-dataset-from-uk/smoking.csv")
df.head()
df = df.iloc[:, 1:]
# # NaN values observation
# NaN values are in the features amt_weekends, amt_weekdays and type, which are the amount of cigarettes smoked on weekends, weekdays and types of cigarettes respectively
print(df.shape)
df.isna().sum()
smoking = df[df["smoke"] == "Yes"].copy()
print(smoking.shape)
smoking.isna().sum()
for i in df.columns[:-3]:
if i != "age":
print(i, " ", df[i].nunique())
# # Histplots for age grouped by class
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=df, x="age", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="age", bins=30, kde=True, color="g", hue="smoke")
plt.show()
# # Histplots for the amount of cigarettes on weekdays and weekends grouped by type
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=smoking, x="amt_weekends", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="amt_weekends", bins=30, kde=True, color="g", hue="type")
plt.show()
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=smoking, x="amt_weekdays", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="amt_weekdays", bins=30, kde=True, color="g", hue="type")
plt.show()
def plots(df, x, y):
f, ax = plt.subplots(1, 2, figsize=(25, 10))
Group_data = df.groupby(x)
sns.barplot(
x=Group_data[y].mean().index,
y=Group_data[y].mean().values,
ax=ax[0],
palette="mako",
)
for container in ax[0].containers:
ax[0].bar_label(container, color="black", size=20)
palette_color = sns.color_palette("summer")
plt.pie(
x=df[x].value_counts(),
labels=df[x].value_counts().index,
autopct="%.0f%%",
shadow=True,
colors=palette_color,
)
plt.suptitle("Grouped by {}".format(x))
plt.show()
# # Barplots for age grouped by categorical variables and pie charts
for i in df.columns[:-3]:
if i != "age":
plots(df, i, "age")
# # Bar plots for the amount of cigarettes smoked on weekdays and weekends and pie charts
target = "amt_weekends"
for i in smoking.columns[-3:]:
if i != "amt_weekends" and i != "amt_weekdays":
plots(df, i, target)
target = "amt_weekdays"
for i in smoking.columns[-3:]:
if i != "amt_weekends" and i != "amt_weekdays":
plots(df, i, target)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/093/129093444.ipynb
|
smoking-dataset-from-uk
|
utkarshx27
|
[{"Id": 129093444, "ScriptId": 38376051, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11036701, "CreationDate": "05/11/2023 00:59:33", "VersionNumber": 1.0, "Title": "Smoking in the UK EDA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 82.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 184842669, "KernelVersionId": 129093444, "SourceDatasetVersionId": 5651804}]
|
[{"Id": 5651804, "DatasetId": 3248690, "DatasourceVersionId": 5727175, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:41:12", "VersionNumber": 1.0, "Title": "Smoking Dataset from UK", "Slug": "smoking-dataset-from-uk", "Subtitle": "Demographic Characteristics & Tobacco Consumption Habits: UK Smoking Survey Data", "Description": "``` \nSurvey data on smoking habits from the United Kingdom. The data set can be used for analyzing the demographic characteristics of smokers and types of tobacco consumed. A data frame with 1691 observations on the following 12 variables.\n```\n| Column | Description |\n| --- | --- |\n| gender | Gender with levels Female and Male. |\n| age | Age. |\n| marital_status | Marital status with levels Divorced, Married, Separated, Single and Widowed. |\n| highest_qualification | Highest education level with levels A Levels, Degree, GCSE/CSE, GCSE/O Level, Higher/Sub Degree, No Qualification, ONC/BTEC and Other/Sub Degree |\n| nationality | Nationality with levels British, English, Irish, Scottish, Welsh, Other, Refused and Unknown. |\n| ethnicity | Ethnicity with levels Asian, Black, Chinese, Mixed, White and Refused Unknown. |\n| gross_income | Gross income with levels Under 2,600, 2,600 to 5,200, 5,200 to 10,400, 10,400 to 15,600, 15,600 to 20,800, 20,800 to 28,600, 28,600 to 36,400, Above 36,400, Refused and Unknown. |\n| region | Region with levels London, Midlands & East Anglia, Scotland, South East, South West, The North and Wales |\n| smoke | Smoking status with levels No and Yes |\n| amt_weekends | Number of cigarettes smoked per day on weekends. |\n| amt_weekdays | Number of cigarettes smoked per day on weekdays. |\n| type | Type of cigarettes smoked with levels Packets, Hand-Rolled, Both/Mainly Packets and Both/Mainly Hand-Rolled\n |\n\n# Source\nNational STEM Centre, Large Datasets from stats4schools, https://www.stem.org.uk/resources/elibrary/resource/28452/large-datasets-stats4schools.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3248690, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651804.0, "CurrentDatasourceVersionId": 5727175.0, "ForumId": 3314043, "Type": 2, "CreationDate": "05/10/2023 05:41:12", "LastActivityDate": "05/10/2023", "TotalViews": 14838, "TotalDownloads": 2967, "TotalVotes": 58, "TotalKernels": 10}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# import plotly.express as px
df = pd.read_csv("/kaggle/input/smoking-dataset-from-uk/smoking.csv")
df.head()
df = df.iloc[:, 1:]
# # NaN values observation
# NaN values are in the features amt_weekends, amt_weekdays and type, which are the amount of cigarettes smoked on weekends, weekdays and types of cigarettes respectively
print(df.shape)
df.isna().sum()
smoking = df[df["smoke"] == "Yes"].copy()
print(smoking.shape)
smoking.isna().sum()
for i in df.columns[:-3]:
if i != "age":
print(i, " ", df[i].nunique())
# # Histplots for age grouped by class
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=df, x="age", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="age", bins=30, kde=True, color="g", hue="smoke")
plt.show()
# # Histplots for the amount of cigarettes on weekdays and weekends grouped by type
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=smoking, x="amt_weekends", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="amt_weekends", bins=30, kde=True, color="g", hue="type")
plt.show()
fig, axs = plt.subplots(1, 2, figsize=(15, 7))
plt.subplot(121)
sns.histplot(data=smoking, x="amt_weekdays", bins=30, kde=True, color="g")
plt.subplot(122)
sns.histplot(data=df, x="amt_weekdays", bins=30, kde=True, color="g", hue="type")
plt.show()
def plots(df, x, y):
f, ax = plt.subplots(1, 2, figsize=(25, 10))
Group_data = df.groupby(x)
sns.barplot(
x=Group_data[y].mean().index,
y=Group_data[y].mean().values,
ax=ax[0],
palette="mako",
)
for container in ax[0].containers:
ax[0].bar_label(container, color="black", size=20)
palette_color = sns.color_palette("summer")
plt.pie(
x=df[x].value_counts(),
labels=df[x].value_counts().index,
autopct="%.0f%%",
shadow=True,
colors=palette_color,
)
plt.suptitle("Grouped by {}".format(x))
plt.show()
# # Barplots for age grouped by categorical variables and pie charts
for i in df.columns[:-3]:
if i != "age":
plots(df, i, "age")
# # Bar plots for the amount of cigarettes smoked on weekdays and weekends and pie charts
target = "amt_weekends"
for i in smoking.columns[-3:]:
if i != "amt_weekends" and i != "amt_weekdays":
plots(df, i, target)
target = "amt_weekdays"
for i in smoking.columns[-3:]:
if i != "amt_weekends" and i != "amt_weekdays":
plots(df, i, target)
| false | 1 | 973 | 4 | 1,561 | 973 |
||
129093048
|
<jupyter_start><jupyter_text>Car Price Prediction Multiple Linear Regression
### Problem Statement
A Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts.
They have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know:
Which variables are significant in predicting the price of a car
How well those variables describe the price of a car
Based on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market.
### Business Goal
We are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market.
### Please Note : The dataset provided is for learning purpose. Please don’t draw any inference with real world scenario.
Kaggle dataset identifier: car-price-prediction
<jupyter_code>import pandas as pd
df = pd.read_csv('car-price-prediction/CarPrice_Assignment.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 205 entries, 0 to 204
Data columns (total 26 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 car_ID 205 non-null int64
1 symboling 205 non-null int64
2 CarName 205 non-null object
3 fueltype 205 non-null object
4 aspiration 205 non-null object
5 doornumber 205 non-null object
6 carbody 205 non-null object
7 drivewheel 205 non-null object
8 enginelocation 205 non-null object
9 wheelbase 205 non-null float64
10 carlength 205 non-null float64
11 carwidth 205 non-null float64
12 carheight 205 non-null float64
13 curbweight 205 non-null int64
14 enginetype 205 non-null object
15 cylindernumber 205 non-null object
16 enginesize 205 non-null int64
17 fuelsystem 205 non-null object
18 boreratio 205 non-null float64
19 stroke 205 non-null float64
20 compressionratio 205 non-null float64
21 horsepower 205 non-null int64
22 peakrpm 205 non-null int64
23 citympg 205 non-null int64
24 highwaympg 205 non-null int64
25 price 205 non-null float64
dtypes: float64(8), int64(8), object(10)
memory usage: 41.8+ KB
<jupyter_text>Examples:
{
"car_ID": 1,
"symboling": 3,
"CarName": "alfa-romero giulia",
"fueltype": "gas",
"aspiration": "std",
"doornumber": "two",
"carbody": "convertible",
"drivewheel": "rwd",
"enginelocation": "front",
"wheelbase": 88.6,
"carlength": 168.8,
"carwidth": 64.1,
"carheight": 48.8,
"curbweight": 2548,
"enginetype": "dohc",
"cylindernumber": "four",
"enginesize": 130,
"fuelsystem": "mpfi",
"boreratio": 3.47,
"stroke": 2.68,
"...": "and 6 more columns"
}
{
"car_ID": 2,
"symboling": 3,
"CarName": "alfa-romero stelvio",
"fueltype": "gas",
"aspiration": "std",
"doornumber": "two",
"carbody": "convertible",
"drivewheel": "rwd",
"enginelocation": "front",
"wheelbase": 88.6,
"carlength": 168.8,
"carwidth": 64.1,
"carheight": 48.8,
"curbweight": 2548,
"enginetype": "dohc",
"cylindernumber": "four",
"enginesize": 130,
"fuelsystem": "mpfi",
"boreratio": 3.47,
"stroke": 2.68,
"...": "and 6 more columns"
}
{
"car_ID": 3,
"symboling": 1,
"CarName": "alfa-romero Quadrifoglio",
"fueltype": "gas",
"aspiration": "std",
"doornumber": "two",
"carbody": "hatchback",
"drivewheel": "rwd",
"enginelocation": "front",
"wheelbase": 94.5,
"carlength": 171.2,
"carwidth": 65.5,
"carheight": 52.4,
"curbweight": 2823,
"enginetype": "ohcv",
"cylindernumber": "six",
"enginesize": 152,
"fuelsystem": "mpfi",
"boreratio": 2.68,
"stroke": 3.47,
"...": "and 6 more columns"
}
{
"car_ID": 4,
"symboling": 2,
"CarName": "audi 100 ls",
"fueltype": "gas",
"aspiration": "std",
"doornumber": "four",
"carbody": "sedan",
"drivewheel": "fwd",
"enginelocation": "front",
"wheelbase": 99.8,
"carlength": 176.6,
"carwidth": 66.2,
"carheight": 54.3,
"curbweight": 2337,
"enginetype": "ohc",
"cylindernumber": "four",
"enginesize": 109,
"fuelsystem": "mpfi",
"boreratio": 3.19,
"stroke": 3.4,
"...": "and 6 more columns"
}
<jupyter_script># **Import libraries**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import seaborn as sns
import pylab
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# # Data loading
data = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv")
data.head()
data.tail()
# # Data Understanding
# **shape**
print("data shape : ", data.shape)
# **basic information**
print("baic information :", data.info())
# **Descriptive Statistics Analysis**
data.describe()
# **checking NaN value**
data.isnull().sum().to_frame().rename(columns={0: "Total No. of Missing Values"})
# if there is any NaN value use this code
# data.dropna(inplace= True)
# data: This is the DataFrame we are working with.
# isnull(): This method checks each element of the DataFrame to see if it is null or NaN, and returns a DataFrame of the same shape as the original DataFrame, with True where the element is null or NaN and False otherwise.
# sum(): This method sums up the number of True values in each column of the DataFrame returned by isnull(), giving us the total number of missing values in each column.
# to_frame(): This method converts the Pandas Series returned by sum() to a DataFrame with the same column name as the original Series.
# rename(columns={0:"Total No. of Missing Values"}): This method renames the column of the DataFrame to "Total No. of Missing Values" for clarity.
# **Checking Duplicate Values**
print("Duplicate Values =", df.duplicated().sum())
# duplicated value is repeated values in the data. It can effect the model.
# **Showing Only Categorical Features.**
df.select_dtypes(include="object").head()
# df: the name of the dataframe being used
# .select_dtypes(include="object"): a pandas function used to select columns of a particular data type, in this case "object" data types (which typically refer to string values)
# .head(): a pandas function used to display the first 5 rows of the selected dataframe
# **Showing only the Numerical Features.**
df.select_dtypes(include=["int", "float"]).head()
# # Data Cleaning
data.head()
# **taking company name from car names**
Company_Name = df["CarName"].apply(lambda x: x.split(" ")[0])
data.insert(2, "CompanyName", Company_Name)
# Now we can drop the CarName Feature.
data.drop(columns=["CarName"], inplace=True)
# first call the values of CarName from data and apply lambda { without apply the lambda will work only one).
# and we put company_Name in third colums which index num is 2.
# after that, we drop CarName and inplcae it. Without Inplace, we need to create a new variable
# **check the data set**
data.head()
# **Checking the Unique Car Company Names.**
data["CompanyName"].unique()
# The unique() method in Pandas is used to get a unique set of values in a Pandas object such as a Series or DataFrame column. It returns an array of unique values in the same order as they appear in the original object.
# maxda = mazda
# Nissan = nissan
# porsche = porcshce
# toyota = toyouta
# vokswagen = volkswagen = vw
# **Creating a Function to Replace the Values.**
def replace(a, b):
data["CompanyName"].replace(a, b, inplace=True)
replace("maxda", "mazda")
replace("porcshce", "porsche")
replace("toyouta", "toyota")
replace("vokswagen", "volkswagen")
replace("vw", "volkswagen")
data["CompanyName"].unique()
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
sns.distplot(df["price"], color="red", kde=True)
plt.title("Car Price Distribution", fontweight="black", pad=20, fontsize=20)
plt.subplot(1, 2, 2)
sns.boxplot(y=df["price"], palette="Set2")
plt.title("Car Price Spread", fontweight="black", pad=20, fontsize=20)
plt.tight_layout()
plt.show()
# **Visualizing Total No. of cars sold by different company**
plt.figure(figsize=(14, 6))
counts = data["CompanyName"].value_counts()
sns.barplot(x=counts.index, y=counts.values)
plt.xlabel("Car Company")
plt.ylabel("Total No. of cars sold")
plt.title("Total Cars produced by Companies", pad=20, fontweight="black", fontsize=20)
plt.xticks(rotation=90)
plt.show()
df[data["CompanyName"] == "mercury"]
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
sns.boxplot(x="CompanyName", y="price", data=data)
plt.xticks(rotation=90)
plt.title("Car Company vs Price", pad=10, fontweight="black", fontsize=20)
plt.subplot(1, 2, 2)
x = pd.DataFrame(
data.groupby("CompanyName")["price"].mean().sort_values(ascending=False)
)
sns.barplot(x=x.index, y="price", data=x)
plt.xticks(rotation=90)
plt.title("Car Company vs Average Price", pad=10, fontweight="black", fontsize=20)
plt.tight_layout()
plt.show()
# **Visualizing Car Fuel Type Feature**
def categorical_visualization(cols):
plt.figure(figsize=(20, 8))
plt.subplot(1, 3, 1)
sns.countplot(x=cols, data=df, palette="Set2", order=df[cols].value_counts().index)
plt.title(f"{cols} Distribution", pad=10, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.subplot(1, 3, 2)
sns.boxplot(x=cols, y="price", data=df, palette="Set2")
plt.title(f"{cols} vs Price", pad=20, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.subplot(1, 3, 3)
x = pd.DataFrame(df.groupby(cols)["price"].mean().sort_values(ascending=False))
sns.barplot(x=x.index, y="price", data=x, palette="Set2")
plt.title(f"{cols} vs Average Price", pad=20, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
categorical_visualization("fueltype")
# **Visualizing Aspiration Feature.**
categorical_visualization("aspiration")
# **Visualizing Door Nubmer Feature.**
categorical_visualization("doornumber")
# **Visualizing Car Body Type Feature.**
categorical_visualization("carbody")
# **Visualizing Drive Wheel Feature**
categorical_visualization("drivewheel")
# **Visualizing Engine Location Feature.**
categorical_visualization("enginelocation")
# **Visualizing Engine Type Feature**
categorical_visualization("enginetype")
# **11. Visualizing Cyclinder Number Feature.**
categorical_visualization("cylindernumber")
# **12. Visualizing Fuel System Feature.**
#
categorical_visualization("fuelsystem")
# **13.categorical_visualization("symboling")**
# 1.The symboling represents the degree to which the car is riskier than its price suggests.
# 2.It ranges from -3 to +3, with higher negative values indicating higher risk and higher positive values indicating lower risk.
# 3.In other words, a car with a symboling of -3 is considered riskier than a car with a symboling of +3, and is likely to have a lower price as a result.
categorical_visualization("symboling")
def scatter_plot(cols):
x = 1
plt.figure(figsize=(15, 6))
for col in cols:
plt.subplot(1, 3, x)
sns.scatterplot(x=col, y="price", data=df, color="blue")
plt.title(f"{col} vs Price", fontweight="black", fontsize=20, pad=10)
plt.tight_layout()
x += 1
scatter_plot(["carlength", "carwidth", "carheight"])
scatter_plot(["compressionratio", "horsepower", "peakrpm"])
# # Feature Enginnering
# **1. Deriving New Features From "Company Name" Feature.**
z = round(df.groupby(["CompanyName"])["price"].agg(["mean"]), 2).T
z
df = df.merge(z.T, how="left", on="CompanyName")
bins = [0, 10000, 20000, 40000]
cars_bin = ["Budget", "Medium", "Highend"]
# data['CarsRange'] = pd.cut(data['mean'],bins,right=False,labels=cars_bin)
data["CarsRange"] = pd.cut(df["mean"], bins, right=False, labels=cars_bin)
df.head()
# # Data Preprocessing
# **1. Creating new DataFrame with all the useful Features.**
new_df = df[
[
"fueltype",
"aspiration",
"doornumber",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"wheelbase",
"carlength",
"carwidth",
"curbweight",
"enginesize",
"boreratio",
"horsepower",
"citympg",
"highwaympg",
"price",
"CarsRange",
]
]
new_df.head()
# **2. Creating Dummies Variables for all the Categorical Features**
new_df = pd.get_dummies(
columns=[
"fueltype",
"aspiration",
"doornumber",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"CarsRange",
],
data=new_df,
)
new_df
# **3. Feature Scaling of Numerical Data.**
scaler = StandardScaler()
num_cols = [
"wheelbase",
"carlength",
"carwidth",
"curbweight",
"enginesize",
"boreratio",
"horsepower",
"citympg",
"highwaympg",
]
new_df[num_cols] = scaler.fit_transform(new_df[num_cols])
new_df.head()
# **4. Selecting Features & Labels for Model Training & Testing.**
x = new_df.drop(columns=["price"])
y = new_df["price"]
x.shape
y.shape
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
print("x_train - > ", x_train.shape)
print("x_test - > ", x_test.shape)
print("y_train - > ", y_train.shape)
print("y_test - > ", y_test.shape)
training_score = []
testing_score = []
def model_prediction(model):
model.fit(x_train, y_train)
x_train_pred = model.predict(x_train)
x_test_pred = model.predict(x_test)
a = r2_score(y_train, x_train_pred) * 100
b = r2_score(y_test, x_test_pred) * 100
training_score.append(a)
testing_score.append(b)
print(f"r2_Score of {model} model on Training Data is:", a)
print(f"r2_Score of {model} model on Testing Data is:", b)
# # linear Regression model
model_prediction(LinearRegression())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/093/129093048.ipynb
|
car-price-prediction
|
hellbuoy
|
[{"Id": 129093048, "ScriptId": 37768839, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14091700, "CreationDate": "05/11/2023 00:52:30", "VersionNumber": 1.0, "Title": "mutiple linear regression", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 323.0, "LinesInsertedFromPrevious": 323.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184841810, "KernelVersionId": 129093048, "SourceDatasetVersionId": 741735}]
|
[{"Id": 741735, "DatasetId": 383055, "DatasourceVersionId": 762363, "CreatorUserId": 2318606, "LicenseName": "Unknown", "CreationDate": "10/15/2019 16:45:27", "VersionNumber": 1.0, "Title": "Car Price Prediction Multiple Linear Regression", "Slug": "car-price-prediction", "Subtitle": "Predicting the Prices of cars using RFE and VIF", "Description": "### Problem Statement\n\nA Chinese automobile company Geely Auto aspires to enter the US market by setting up their manufacturing unit there and producing cars locally to give competition to their US and European counterparts. \n\n \n\nThey have contracted an automobile consulting company to understand the factors on which the pricing of cars depends. Specifically, they want to understand the factors affecting the pricing of cars in the American market, since those may be very different from the Chinese market. The company wants to know:\n\nWhich variables are significant in predicting the price of a car\nHow well those variables describe the price of a car\nBased on various market surveys, the consulting firm has gathered a large data set of different types of cars across the America market. \n\n\n### Business Goal\n\nWe are required to model the price of cars with the available independent variables. It will be used by the management to understand how exactly the prices vary with the independent variables. They can accordingly manipulate the design of the cars, the business strategy etc. to meet certain price levels. Further, the model will be a good way for management to understand the pricing dynamics of a new market. \n\n### Please Note : The dataset provided is for learning purpose. Please don\u2019t draw any inference with real world scenario.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 383055, "CreatorUserId": 2318606, "OwnerUserId": 2318606.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 741735.0, "CurrentDatasourceVersionId": 762363.0, "ForumId": 395004, "Type": 2, "CreationDate": "10/15/2019 16:45:27", "LastActivityDate": "10/15/2019", "TotalViews": 339360, "TotalDownloads": 50133, "TotalVotes": 491, "TotalKernels": 345}]
|
[{"Id": 2318606, "UserName": "hellbuoy", "DisplayName": "Manish Kumar", "RegisterDate": "10/03/2018", "PerformanceTier": 2}]
|
# **Import libraries**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import seaborn as sns
import pylab
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
# # Data loading
data = pd.read_csv("/kaggle/input/car-price-prediction/CarPrice_Assignment.csv")
data.head()
data.tail()
# # Data Understanding
# **shape**
print("data shape : ", data.shape)
# **basic information**
print("baic information :", data.info())
# **Descriptive Statistics Analysis**
data.describe()
# **checking NaN value**
data.isnull().sum().to_frame().rename(columns={0: "Total No. of Missing Values"})
# if there is any NaN value use this code
# data.dropna(inplace= True)
# data: This is the DataFrame we are working with.
# isnull(): This method checks each element of the DataFrame to see if it is null or NaN, and returns a DataFrame of the same shape as the original DataFrame, with True where the element is null or NaN and False otherwise.
# sum(): This method sums up the number of True values in each column of the DataFrame returned by isnull(), giving us the total number of missing values in each column.
# to_frame(): This method converts the Pandas Series returned by sum() to a DataFrame with the same column name as the original Series.
# rename(columns={0:"Total No. of Missing Values"}): This method renames the column of the DataFrame to "Total No. of Missing Values" for clarity.
# **Checking Duplicate Values**
print("Duplicate Values =", df.duplicated().sum())
# duplicated value is repeated values in the data. It can effect the model.
# **Showing Only Categorical Features.**
df.select_dtypes(include="object").head()
# df: the name of the dataframe being used
# .select_dtypes(include="object"): a pandas function used to select columns of a particular data type, in this case "object" data types (which typically refer to string values)
# .head(): a pandas function used to display the first 5 rows of the selected dataframe
# **Showing only the Numerical Features.**
df.select_dtypes(include=["int", "float"]).head()
# # Data Cleaning
data.head()
# **taking company name from car names**
Company_Name = df["CarName"].apply(lambda x: x.split(" ")[0])
data.insert(2, "CompanyName", Company_Name)
# Now we can drop the CarName Feature.
data.drop(columns=["CarName"], inplace=True)
# first call the values of CarName from data and apply lambda { without apply the lambda will work only one).
# and we put company_Name in third colums which index num is 2.
# after that, we drop CarName and inplcae it. Without Inplace, we need to create a new variable
# **check the data set**
data.head()
# **Checking the Unique Car Company Names.**
data["CompanyName"].unique()
# The unique() method in Pandas is used to get a unique set of values in a Pandas object such as a Series or DataFrame column. It returns an array of unique values in the same order as they appear in the original object.
# maxda = mazda
# Nissan = nissan
# porsche = porcshce
# toyota = toyouta
# vokswagen = volkswagen = vw
# **Creating a Function to Replace the Values.**
def replace(a, b):
data["CompanyName"].replace(a, b, inplace=True)
replace("maxda", "mazda")
replace("porcshce", "porsche")
replace("toyouta", "toyota")
replace("vokswagen", "volkswagen")
replace("vw", "volkswagen")
data["CompanyName"].unique()
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
sns.distplot(df["price"], color="red", kde=True)
plt.title("Car Price Distribution", fontweight="black", pad=20, fontsize=20)
plt.subplot(1, 2, 2)
sns.boxplot(y=df["price"], palette="Set2")
plt.title("Car Price Spread", fontweight="black", pad=20, fontsize=20)
plt.tight_layout()
plt.show()
# **Visualizing Total No. of cars sold by different company**
plt.figure(figsize=(14, 6))
counts = data["CompanyName"].value_counts()
sns.barplot(x=counts.index, y=counts.values)
plt.xlabel("Car Company")
plt.ylabel("Total No. of cars sold")
plt.title("Total Cars produced by Companies", pad=20, fontweight="black", fontsize=20)
plt.xticks(rotation=90)
plt.show()
df[data["CompanyName"] == "mercury"]
plt.figure(figsize=(15, 6))
plt.subplot(1, 2, 1)
sns.boxplot(x="CompanyName", y="price", data=data)
plt.xticks(rotation=90)
plt.title("Car Company vs Price", pad=10, fontweight="black", fontsize=20)
plt.subplot(1, 2, 2)
x = pd.DataFrame(
data.groupby("CompanyName")["price"].mean().sort_values(ascending=False)
)
sns.barplot(x=x.index, y="price", data=x)
plt.xticks(rotation=90)
plt.title("Car Company vs Average Price", pad=10, fontweight="black", fontsize=20)
plt.tight_layout()
plt.show()
# **Visualizing Car Fuel Type Feature**
def categorical_visualization(cols):
plt.figure(figsize=(20, 8))
plt.subplot(1, 3, 1)
sns.countplot(x=cols, data=df, palette="Set2", order=df[cols].value_counts().index)
plt.title(f"{cols} Distribution", pad=10, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.subplot(1, 3, 2)
sns.boxplot(x=cols, y="price", data=df, palette="Set2")
plt.title(f"{cols} vs Price", pad=20, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.subplot(1, 3, 3)
x = pd.DataFrame(df.groupby(cols)["price"].mean().sort_values(ascending=False))
sns.barplot(x=x.index, y="price", data=x, palette="Set2")
plt.title(f"{cols} vs Average Price", pad=20, fontweight="black", fontsize=18)
plt.xticks(rotation=90)
plt.tight_layout()
plt.show()
categorical_visualization("fueltype")
# **Visualizing Aspiration Feature.**
categorical_visualization("aspiration")
# **Visualizing Door Nubmer Feature.**
categorical_visualization("doornumber")
# **Visualizing Car Body Type Feature.**
categorical_visualization("carbody")
# **Visualizing Drive Wheel Feature**
categorical_visualization("drivewheel")
# **Visualizing Engine Location Feature.**
categorical_visualization("enginelocation")
# **Visualizing Engine Type Feature**
categorical_visualization("enginetype")
# **11. Visualizing Cyclinder Number Feature.**
categorical_visualization("cylindernumber")
# **12. Visualizing Fuel System Feature.**
#
categorical_visualization("fuelsystem")
# **13.categorical_visualization("symboling")**
# 1.The symboling represents the degree to which the car is riskier than its price suggests.
# 2.It ranges from -3 to +3, with higher negative values indicating higher risk and higher positive values indicating lower risk.
# 3.In other words, a car with a symboling of -3 is considered riskier than a car with a symboling of +3, and is likely to have a lower price as a result.
categorical_visualization("symboling")
def scatter_plot(cols):
x = 1
plt.figure(figsize=(15, 6))
for col in cols:
plt.subplot(1, 3, x)
sns.scatterplot(x=col, y="price", data=df, color="blue")
plt.title(f"{col} vs Price", fontweight="black", fontsize=20, pad=10)
plt.tight_layout()
x += 1
scatter_plot(["carlength", "carwidth", "carheight"])
scatter_plot(["compressionratio", "horsepower", "peakrpm"])
# # Feature Enginnering
# **1. Deriving New Features From "Company Name" Feature.**
z = round(df.groupby(["CompanyName"])["price"].agg(["mean"]), 2).T
z
df = df.merge(z.T, how="left", on="CompanyName")
bins = [0, 10000, 20000, 40000]
cars_bin = ["Budget", "Medium", "Highend"]
# data['CarsRange'] = pd.cut(data['mean'],bins,right=False,labels=cars_bin)
data["CarsRange"] = pd.cut(df["mean"], bins, right=False, labels=cars_bin)
df.head()
# # Data Preprocessing
# **1. Creating new DataFrame with all the useful Features.**
new_df = df[
[
"fueltype",
"aspiration",
"doornumber",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"wheelbase",
"carlength",
"carwidth",
"curbweight",
"enginesize",
"boreratio",
"horsepower",
"citympg",
"highwaympg",
"price",
"CarsRange",
]
]
new_df.head()
# **2. Creating Dummies Variables for all the Categorical Features**
new_df = pd.get_dummies(
columns=[
"fueltype",
"aspiration",
"doornumber",
"carbody",
"drivewheel",
"enginetype",
"cylindernumber",
"fuelsystem",
"CarsRange",
],
data=new_df,
)
new_df
# **3. Feature Scaling of Numerical Data.**
scaler = StandardScaler()
num_cols = [
"wheelbase",
"carlength",
"carwidth",
"curbweight",
"enginesize",
"boreratio",
"horsepower",
"citympg",
"highwaympg",
]
new_df[num_cols] = scaler.fit_transform(new_df[num_cols])
new_df.head()
# **4. Selecting Features & Labels for Model Training & Testing.**
x = new_df.drop(columns=["price"])
y = new_df["price"]
x.shape
y.shape
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
print("x_train - > ", x_train.shape)
print("x_test - > ", x_test.shape)
print("y_train - > ", y_train.shape)
print("y_test - > ", y_test.shape)
training_score = []
testing_score = []
def model_prediction(model):
model.fit(x_train, y_train)
x_train_pred = model.predict(x_train)
x_test_pred = model.predict(x_test)
a = r2_score(y_train, x_train_pred) * 100
b = r2_score(y_test, x_test_pred) * 100
training_score.append(a)
testing_score.append(b)
print(f"r2_Score of {model} model on Training Data is:", a)
print(f"r2_Score of {model} model on Testing Data is:", b)
# # linear Regression model
model_prediction(LinearRegression())
|
[{"car-price-prediction/CarPrice_Assignment.csv": {"column_names": "[\"car_ID\", \"symboling\", \"CarName\", \"fueltype\", \"aspiration\", \"doornumber\", \"carbody\", \"drivewheel\", \"enginelocation\", \"wheelbase\", \"carlength\", \"carwidth\", \"carheight\", \"curbweight\", \"enginetype\", \"cylindernumber\", \"enginesize\", \"fuelsystem\", \"boreratio\", \"stroke\", \"compressionratio\", \"horsepower\", \"peakrpm\", \"citympg\", \"highwaympg\", \"price\"]", "column_data_types": "{\"car_ID\": \"int64\", \"symboling\": \"int64\", \"CarName\": \"object\", \"fueltype\": \"object\", \"aspiration\": \"object\", \"doornumber\": \"object\", \"carbody\": \"object\", \"drivewheel\": \"object\", \"enginelocation\": \"object\", \"wheelbase\": \"float64\", \"carlength\": \"float64\", \"carwidth\": \"float64\", \"carheight\": \"float64\", \"curbweight\": \"int64\", \"enginetype\": \"object\", \"cylindernumber\": \"object\", \"enginesize\": \"int64\", \"fuelsystem\": \"object\", \"boreratio\": \"float64\", \"stroke\": \"float64\", \"compressionratio\": \"float64\", \"horsepower\": \"int64\", \"peakrpm\": \"int64\", \"citympg\": \"int64\", \"highwaympg\": \"int64\", \"price\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 205 entries, 0 to 204\nData columns (total 26 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 car_ID 205 non-null int64 \n 1 symboling 205 non-null int64 \n 2 CarName 205 non-null object \n 3 fueltype 205 non-null object \n 4 aspiration 205 non-null object \n 5 doornumber 205 non-null object \n 6 carbody 205 non-null object \n 7 drivewheel 205 non-null object \n 8 enginelocation 205 non-null object \n 9 wheelbase 205 non-null float64\n 10 carlength 205 non-null float64\n 11 carwidth 205 non-null float64\n 12 carheight 205 non-null float64\n 13 curbweight 205 non-null int64 \n 14 enginetype 205 non-null object \n 15 cylindernumber 205 non-null object \n 16 enginesize 205 non-null int64 \n 17 fuelsystem 205 non-null object \n 18 boreratio 205 non-null float64\n 19 stroke 205 non-null float64\n 20 compressionratio 205 non-null float64\n 21 horsepower 205 non-null int64 \n 22 peakrpm 205 non-null int64 \n 23 citympg 205 non-null int64 \n 24 highwaympg 205 non-null int64 \n 25 price 205 non-null float64\ndtypes: float64(8), int64(8), object(10)\nmemory usage: 41.8+ KB\n", "summary": "{\"car_ID\": {\"count\": 205.0, \"mean\": 103.0, \"std\": 59.32256456582661, \"min\": 1.0, \"25%\": 52.0, \"50%\": 103.0, \"75%\": 154.0, \"max\": 205.0}, \"symboling\": {\"count\": 205.0, \"mean\": 0.8341463414634146, \"std\": 1.2453068281055297, \"min\": -2.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"wheelbase\": {\"count\": 205.0, \"mean\": 98.75658536585367, \"std\": 6.021775685025571, \"min\": 86.6, \"25%\": 94.5, \"50%\": 97.0, \"75%\": 102.4, \"max\": 120.9}, \"carlength\": {\"count\": 205.0, \"mean\": 174.04926829268288, \"std\": 12.33728852655518, \"min\": 141.1, \"25%\": 166.3, \"50%\": 173.2, \"75%\": 183.1, \"max\": 208.1}, \"carwidth\": {\"count\": 205.0, \"mean\": 65.90780487804878, \"std\": 2.145203852687183, \"min\": 60.3, \"25%\": 64.1, \"50%\": 65.5, \"75%\": 66.9, \"max\": 72.3}, \"carheight\": {\"count\": 205.0, \"mean\": 53.72487804878049, \"std\": 2.4435219699049036, \"min\": 47.8, \"25%\": 52.0, \"50%\": 54.1, \"75%\": 55.5, \"max\": 59.8}, \"curbweight\": {\"count\": 205.0, \"mean\": 2555.5658536585365, \"std\": 520.6802035016387, \"min\": 1488.0, \"25%\": 2145.0, \"50%\": 2414.0, \"75%\": 2935.0, \"max\": 4066.0}, \"enginesize\": {\"count\": 205.0, \"mean\": 126.90731707317073, \"std\": 41.64269343817984, \"min\": 61.0, \"25%\": 97.0, \"50%\": 120.0, \"75%\": 141.0, \"max\": 326.0}, \"boreratio\": {\"count\": 205.0, \"mean\": 3.329756097560975, \"std\": 0.27084370542622926, \"min\": 2.54, \"25%\": 3.15, \"50%\": 3.31, \"75%\": 3.58, \"max\": 3.94}, \"stroke\": {\"count\": 205.0, \"mean\": 3.255414634146341, \"std\": 0.31359701376080407, \"min\": 2.07, \"25%\": 3.11, \"50%\": 3.29, \"75%\": 3.41, \"max\": 4.17}, \"compressionratio\": {\"count\": 205.0, \"mean\": 10.142536585365855, \"std\": 3.972040321863298, \"min\": 7.0, \"25%\": 8.6, \"50%\": 9.0, \"75%\": 9.4, \"max\": 23.0}, \"horsepower\": {\"count\": 205.0, \"mean\": 104.1170731707317, \"std\": 39.54416680936116, \"min\": 48.0, \"25%\": 70.0, \"50%\": 95.0, \"75%\": 116.0, \"max\": 288.0}, \"peakrpm\": {\"count\": 205.0, \"mean\": 5125.121951219512, \"std\": 476.98564305694634, \"min\": 4150.0, \"25%\": 4800.0, \"50%\": 5200.0, \"75%\": 5500.0, \"max\": 6600.0}, \"citympg\": {\"count\": 205.0, \"mean\": 25.21951219512195, \"std\": 6.542141653001622, \"min\": 13.0, \"25%\": 19.0, \"50%\": 24.0, \"75%\": 30.0, \"max\": 49.0}, \"highwaympg\": {\"count\": 205.0, \"mean\": 30.75121951219512, \"std\": 6.886443130941824, \"min\": 16.0, \"25%\": 25.0, \"50%\": 30.0, \"75%\": 34.0, \"max\": 54.0}, \"price\": {\"count\": 205.0, \"mean\": 13276.710570731706, \"std\": 7988.85233174315, \"min\": 5118.0, \"25%\": 7788.0, \"50%\": 10295.0, \"75%\": 16503.0, \"max\": 45400.0}}", "examples": "{\"car_ID\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"symboling\":{\"0\":3,\"1\":3,\"2\":1,\"3\":2},\"CarName\":{\"0\":\"alfa-romero giulia\",\"1\":\"alfa-romero stelvio\",\"2\":\"alfa-romero Quadrifoglio\",\"3\":\"audi 100 ls\"},\"fueltype\":{\"0\":\"gas\",\"1\":\"gas\",\"2\":\"gas\",\"3\":\"gas\"},\"aspiration\":{\"0\":\"std\",\"1\":\"std\",\"2\":\"std\",\"3\":\"std\"},\"doornumber\":{\"0\":\"two\",\"1\":\"two\",\"2\":\"two\",\"3\":\"four\"},\"carbody\":{\"0\":\"convertible\",\"1\":\"convertible\",\"2\":\"hatchback\",\"3\":\"sedan\"},\"drivewheel\":{\"0\":\"rwd\",\"1\":\"rwd\",\"2\":\"rwd\",\"3\":\"fwd\"},\"enginelocation\":{\"0\":\"front\",\"1\":\"front\",\"2\":\"front\",\"3\":\"front\"},\"wheelbase\":{\"0\":88.6,\"1\":88.6,\"2\":94.5,\"3\":99.8},\"carlength\":{\"0\":168.8,\"1\":168.8,\"2\":171.2,\"3\":176.6},\"carwidth\":{\"0\":64.1,\"1\":64.1,\"2\":65.5,\"3\":66.2},\"carheight\":{\"0\":48.8,\"1\":48.8,\"2\":52.4,\"3\":54.3},\"curbweight\":{\"0\":2548,\"1\":2548,\"2\":2823,\"3\":2337},\"enginetype\":{\"0\":\"dohc\",\"1\":\"dohc\",\"2\":\"ohcv\",\"3\":\"ohc\"},\"cylindernumber\":{\"0\":\"four\",\"1\":\"four\",\"2\":\"six\",\"3\":\"four\"},\"enginesize\":{\"0\":130,\"1\":130,\"2\":152,\"3\":109},\"fuelsystem\":{\"0\":\"mpfi\",\"1\":\"mpfi\",\"2\":\"mpfi\",\"3\":\"mpfi\"},\"boreratio\":{\"0\":3.47,\"1\":3.47,\"2\":2.68,\"3\":3.19},\"stroke\":{\"0\":2.68,\"1\":2.68,\"2\":3.47,\"3\":3.4},\"compressionratio\":{\"0\":9.0,\"1\":9.0,\"2\":9.0,\"3\":10.0},\"horsepower\":{\"0\":111,\"1\":111,\"2\":154,\"3\":102},\"peakrpm\":{\"0\":5000,\"1\":5000,\"2\":5000,\"3\":5500},\"citympg\":{\"0\":21,\"1\":21,\"2\":19,\"3\":24},\"highwaympg\":{\"0\":27,\"1\":27,\"2\":26,\"3\":30},\"price\":{\"0\":13495.0,\"1\":16500.0,\"2\":16500.0,\"3\":13950.0}}"}}]
| true | 1 |
<start_data_description><data_path>car-price-prediction/CarPrice_Assignment.csv:
<column_names>
['car_ID', 'symboling', 'CarName', 'fueltype', 'aspiration', 'doornumber', 'carbody', 'drivewheel', 'enginelocation', 'wheelbase', 'carlength', 'carwidth', 'carheight', 'curbweight', 'enginetype', 'cylindernumber', 'enginesize', 'fuelsystem', 'boreratio', 'stroke', 'compressionratio', 'horsepower', 'peakrpm', 'citympg', 'highwaympg', 'price']
<column_types>
{'car_ID': 'int64', 'symboling': 'int64', 'CarName': 'object', 'fueltype': 'object', 'aspiration': 'object', 'doornumber': 'object', 'carbody': 'object', 'drivewheel': 'object', 'enginelocation': 'object', 'wheelbase': 'float64', 'carlength': 'float64', 'carwidth': 'float64', 'carheight': 'float64', 'curbweight': 'int64', 'enginetype': 'object', 'cylindernumber': 'object', 'enginesize': 'int64', 'fuelsystem': 'object', 'boreratio': 'float64', 'stroke': 'float64', 'compressionratio': 'float64', 'horsepower': 'int64', 'peakrpm': 'int64', 'citympg': 'int64', 'highwaympg': 'int64', 'price': 'float64'}
<dataframe_Summary>
{'car_ID': {'count': 205.0, 'mean': 103.0, 'std': 59.32256456582661, 'min': 1.0, '25%': 52.0, '50%': 103.0, '75%': 154.0, 'max': 205.0}, 'symboling': {'count': 205.0, 'mean': 0.8341463414634146, 'std': 1.2453068281055297, 'min': -2.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'wheelbase': {'count': 205.0, 'mean': 98.75658536585367, 'std': 6.021775685025571, 'min': 86.6, '25%': 94.5, '50%': 97.0, '75%': 102.4, 'max': 120.9}, 'carlength': {'count': 205.0, 'mean': 174.04926829268288, 'std': 12.33728852655518, 'min': 141.1, '25%': 166.3, '50%': 173.2, '75%': 183.1, 'max': 208.1}, 'carwidth': {'count': 205.0, 'mean': 65.90780487804878, 'std': 2.145203852687183, 'min': 60.3, '25%': 64.1, '50%': 65.5, '75%': 66.9, 'max': 72.3}, 'carheight': {'count': 205.0, 'mean': 53.72487804878049, 'std': 2.4435219699049036, 'min': 47.8, '25%': 52.0, '50%': 54.1, '75%': 55.5, 'max': 59.8}, 'curbweight': {'count': 205.0, 'mean': 2555.5658536585365, 'std': 520.6802035016387, 'min': 1488.0, '25%': 2145.0, '50%': 2414.0, '75%': 2935.0, 'max': 4066.0}, 'enginesize': {'count': 205.0, 'mean': 126.90731707317073, 'std': 41.64269343817984, 'min': 61.0, '25%': 97.0, '50%': 120.0, '75%': 141.0, 'max': 326.0}, 'boreratio': {'count': 205.0, 'mean': 3.329756097560975, 'std': 0.27084370542622926, 'min': 2.54, '25%': 3.15, '50%': 3.31, '75%': 3.58, 'max': 3.94}, 'stroke': {'count': 205.0, 'mean': 3.255414634146341, 'std': 0.31359701376080407, 'min': 2.07, '25%': 3.11, '50%': 3.29, '75%': 3.41, 'max': 4.17}, 'compressionratio': {'count': 205.0, 'mean': 10.142536585365855, 'std': 3.972040321863298, 'min': 7.0, '25%': 8.6, '50%': 9.0, '75%': 9.4, 'max': 23.0}, 'horsepower': {'count': 205.0, 'mean': 104.1170731707317, 'std': 39.54416680936116, 'min': 48.0, '25%': 70.0, '50%': 95.0, '75%': 116.0, 'max': 288.0}, 'peakrpm': {'count': 205.0, 'mean': 5125.121951219512, 'std': 476.98564305694634, 'min': 4150.0, '25%': 4800.0, '50%': 5200.0, '75%': 5500.0, 'max': 6600.0}, 'citympg': {'count': 205.0, 'mean': 25.21951219512195, 'std': 6.542141653001622, 'min': 13.0, '25%': 19.0, '50%': 24.0, '75%': 30.0, 'max': 49.0}, 'highwaympg': {'count': 205.0, 'mean': 30.75121951219512, 'std': 6.886443130941824, 'min': 16.0, '25%': 25.0, '50%': 30.0, '75%': 34.0, 'max': 54.0}, 'price': {'count': 205.0, 'mean': 13276.710570731706, 'std': 7988.85233174315, 'min': 5118.0, '25%': 7788.0, '50%': 10295.0, '75%': 16503.0, 'max': 45400.0}}
<dataframe_info>
RangeIndex: 205 entries, 0 to 204
Data columns (total 26 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 car_ID 205 non-null int64
1 symboling 205 non-null int64
2 CarName 205 non-null object
3 fueltype 205 non-null object
4 aspiration 205 non-null object
5 doornumber 205 non-null object
6 carbody 205 non-null object
7 drivewheel 205 non-null object
8 enginelocation 205 non-null object
9 wheelbase 205 non-null float64
10 carlength 205 non-null float64
11 carwidth 205 non-null float64
12 carheight 205 non-null float64
13 curbweight 205 non-null int64
14 enginetype 205 non-null object
15 cylindernumber 205 non-null object
16 enginesize 205 non-null int64
17 fuelsystem 205 non-null object
18 boreratio 205 non-null float64
19 stroke 205 non-null float64
20 compressionratio 205 non-null float64
21 horsepower 205 non-null int64
22 peakrpm 205 non-null int64
23 citympg 205 non-null int64
24 highwaympg 205 non-null int64
25 price 205 non-null float64
dtypes: float64(8), int64(8), object(10)
memory usage: 41.8+ KB
<some_examples>
{'car_ID': {'0': 1, '1': 2, '2': 3, '3': 4}, 'symboling': {'0': 3, '1': 3, '2': 1, '3': 2}, 'CarName': {'0': 'alfa-romero giulia', '1': 'alfa-romero stelvio', '2': 'alfa-romero Quadrifoglio', '3': 'audi 100 ls'}, 'fueltype': {'0': 'gas', '1': 'gas', '2': 'gas', '3': 'gas'}, 'aspiration': {'0': 'std', '1': 'std', '2': 'std', '3': 'std'}, 'doornumber': {'0': 'two', '1': 'two', '2': 'two', '3': 'four'}, 'carbody': {'0': 'convertible', '1': 'convertible', '2': 'hatchback', '3': 'sedan'}, 'drivewheel': {'0': 'rwd', '1': 'rwd', '2': 'rwd', '3': 'fwd'}, 'enginelocation': {'0': 'front', '1': 'front', '2': 'front', '3': 'front'}, 'wheelbase': {'0': 88.6, '1': 88.6, '2': 94.5, '3': 99.8}, 'carlength': {'0': 168.8, '1': 168.8, '2': 171.2, '3': 176.6}, 'carwidth': {'0': 64.1, '1': 64.1, '2': 65.5, '3': 66.2}, 'carheight': {'0': 48.8, '1': 48.8, '2': 52.4, '3': 54.3}, 'curbweight': {'0': 2548, '1': 2548, '2': 2823, '3': 2337}, 'enginetype': {'0': 'dohc', '1': 'dohc', '2': 'ohcv', '3': 'ohc'}, 'cylindernumber': {'0': 'four', '1': 'four', '2': 'six', '3': 'four'}, 'enginesize': {'0': 130, '1': 130, '2': 152, '3': 109}, 'fuelsystem': {'0': 'mpfi', '1': 'mpfi', '2': 'mpfi', '3': 'mpfi'}, 'boreratio': {'0': 3.47, '1': 3.47, '2': 2.68, '3': 3.19}, 'stroke': {'0': 2.68, '1': 2.68, '2': 3.47, '3': 3.4}, 'compressionratio': {'0': 9.0, '1': 9.0, '2': 9.0, '3': 10.0}, 'horsepower': {'0': 111, '1': 111, '2': 154, '3': 102}, 'peakrpm': {'0': 5000, '1': 5000, '2': 5000, '3': 5500}, 'citympg': {'0': 21, '1': 21, '2': 19, '3': 24}, 'highwaympg': {'0': 27, '1': 27, '2': 26, '3': 30}, 'price': {'0': 13495.0, '1': 16500.0, '2': 16500.0, '3': 13950.0}}
<end_description>
| 3,013 | 0 | 4,720 | 3,013 |
129093082
|
# Stage 1: Data Preprocessing
import pandas as pd
from statsmodels.tsa.stattools import adfuller
from sklearn.preprocessing import MinMaxScaler
# Define the file path to the dataset
file_path = "/kaggle/input/chargingbehavior/ChargePoint Data CY20Q4.csv"
# Load the dataset into a Pandas DataFrame
df = pd.read_csv(file_path, low_memory=False)
# Convert relevant columns to appropriate datatypes
df["Transaction Date (Pacific Time)"] = pd.to_datetime(
df["Transaction Date (Pacific Time)"], yearfirst=True, errors="coerce"
)
df["Start Date"] = pd.to_datetime(df["Start Date"], yearfirst=True, errors="coerce")
df["End Date"] = pd.to_datetime(df["End Date"], yearfirst=True, errors="coerce")
# Handling missing values
df.dropna(
subset=[
"Transaction Date (Pacific Time)",
"Charging Time (hh:mm:ss)",
"Energy (kWh)",
],
inplace=True,
)
# Additional Data Preprocessing Steps
# Remove unnecessary column 'Start Time Zone'
df.drop("Start Time Zone", axis=1, inplace=True)
df.drop("End Time Zone", axis=1, inplace=True)
# Handling outliers
def handle_outliers(df, columns):
for column in columns:
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
df[column] = df[column].apply(
lambda x: upper_bound
if x > upper_bound
else lower_bound
if x < lower_bound
else x
)
return df
# Specify columns to handle outliers
outlier_columns = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"]
# Apply outlier handling
df = handle_outliers(df, outlier_columns)
# Convert 'Energy (kWh)' to a stationary series
adf_result = adfuller(df["Energy (kWh)"])
p_value = adf_result[1]
if p_value > 0.05:
df["Energy (kWh)"] = df["Energy (kWh)"].diff().dropna()
# Scaling
scaler = MinMaxScaler()
columns_to_scale = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"]
df[columns_to_scale] = scaler.fit_transform(df[columns_to_scale])
# Confirm the preprocessing is complete
preprocessed = True
df.columns
# Generate data types of all columns
data_types = df.dtypes
# Print the data types
print(data_types)
# Stage 2: Model Identification
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf, pacf
# Select the 'Energy (kWh)' column from the preprocessed DataFrame
energy_data = df["Energy (kWh)"]
# Calculate the ACF and PACF values
acf_vals, conf_int_acf = acf(energy_data, nlags=50, alpha=0.05)
pacf_vals, conf_int_pacf = pacf(energy_data, nlags=50, alpha=0.05)
# Print the ACF values
print("ACF values:")
for lag, acf_val in enumerate(acf_vals):
print(f"Lag {lag + 1}: {acf_val}")
# Print the PACF values
print("PACF values:")
for lag, pacf_val in enumerate(pacf_vals):
print(f"Lag {lag + 1}: {pacf_val}")
# Plot the ACF
fig, ax = plt.subplots(figsize=(12, 6))
plt.stem(range(len(acf_vals)), acf_vals, use_line_collection=True)
plt.xlabel("Lag")
plt.ylabel("Autocorrelation")
plt.title("ACF Plot")
plt.show()
# Plot the PACF
fig, ax = plt.subplots(figsize=(12, 6))
plt.stem(range(len(pacf_vals)), pacf_vals, use_line_collection=True)
plt.xlabel("Lag")
plt.ylabel("Partial Autocorrelation")
plt.title("PACF Plot")
plt.show()
from statsmodels.tsa.arima.model import ARIMA
# Define the optimal parameter values
p = 2
d = 0
q = 1
# Fit the ARIMA model
model = ARIMA(df["Energy (kWh)"], order=(p, d, q), enforce_stationarity=False)
model_fit = model.fit()
# Print the summary of the model
print(model_fit.summary())
# The output of the ARIMA model suggests that the model has been successfully fitted to the data. Here are some key points from the model summary:
# The optimal parameter values used are ARIMA(2, 0, 1), indicating an autoregressive order of 2, a differencing order of 0, and a moving average order of 1.
# The estimated coefficients for the AR and MA terms are as follows:
# AR(1): 0.9582
# AR(2): -0.0018
# MA(1): -0.9177
# The constant term (const) is estimated to be 0.3551.
# The estimated variance of the residuals (sigma2) is 0.0595.
# Some additional information provided by the model summary includes the Ljung-Box test for autocorrelation (Q-value), the Jarque-Bera test for normality (JB-value), and the heteroskedasticity test (H-value).
# Overall, the model seems to fit the data well, as indicated by the low AIC and BIC values and the significant coefficients. However, it's important to further evaluate the model's performance through diagnostic checks, such as examining residual plots, conducting hypothesis tests, and assessing forecast accuracy.
# Residual Analysis: Plotting the residuals helps us assess if they exhibit any patterns or remaining structure that the model did not capture.
# Ljung-Box Test: This test helps determine if the residuals are autocorrelated or exhibit any significant residual patterns.
# Jarque-Bera Test: This test assesses if the residuals follow a normal distribution.
# Forecast Evaluation: Comparing the model's forecasts to the actual values can provide insights into the accuracy of the predictions.
train_size = int(len(df) * 0.8)
train_data = df[:train_size]
test_data = df[train_size:]
# Import necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.gofplots import qqplot
from statsmodels.stats.diagnostic import acorr_ljungbox
from scipy.stats import jarque_bera
# Get the residuals
residuals = model_fit.resid
# Plotting the residuals
plt.figure(figsize=(10, 6))
plt.plot(residuals)
plt.title("Residuals")
plt.xlabel("Time")
plt.ylabel("Residual Value")
plt.show()
# Q-Q plot of the residuals
plt.figure(figsize=(8, 4))
qqplot(residuals, line="s")
plt.title("Q-Q Plot of Residuals")
plt.show()
# Ljung-Box test for residual autocorrelation
_, p_value = acorr_ljungbox(residuals, lags=20)
print(f"Ljung-Box test p-values:\n{p_value}")
# Jarque-Bera test for normality of residuals
_, p_value = jarque_bera(residuals)
print(f"Jarque-Bera test p-value: {p_value}")
print(test_data.columns)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/093/129093082.ipynb
| null | null |
[{"Id": 129093082, "ScriptId": 38373994, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9516386, "CreationDate": "05/11/2023 00:53:15", "VersionNumber": 4.0, "Title": "Charging Behavior", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 186.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 177.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Stage 1: Data Preprocessing
import pandas as pd
from statsmodels.tsa.stattools import adfuller
from sklearn.preprocessing import MinMaxScaler
# Define the file path to the dataset
file_path = "/kaggle/input/chargingbehavior/ChargePoint Data CY20Q4.csv"
# Load the dataset into a Pandas DataFrame
df = pd.read_csv(file_path, low_memory=False)
# Convert relevant columns to appropriate datatypes
df["Transaction Date (Pacific Time)"] = pd.to_datetime(
df["Transaction Date (Pacific Time)"], yearfirst=True, errors="coerce"
)
df["Start Date"] = pd.to_datetime(df["Start Date"], yearfirst=True, errors="coerce")
df["End Date"] = pd.to_datetime(df["End Date"], yearfirst=True, errors="coerce")
# Handling missing values
df.dropna(
subset=[
"Transaction Date (Pacific Time)",
"Charging Time (hh:mm:ss)",
"Energy (kWh)",
],
inplace=True,
)
# Additional Data Preprocessing Steps
# Remove unnecessary column 'Start Time Zone'
df.drop("Start Time Zone", axis=1, inplace=True)
df.drop("End Time Zone", axis=1, inplace=True)
# Handling outliers
def handle_outliers(df, columns):
for column in columns:
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - 1.5 * iqr
upper_bound = q3 + 1.5 * iqr
df[column] = df[column].apply(
lambda x: upper_bound
if x > upper_bound
else lower_bound
if x < lower_bound
else x
)
return df
# Specify columns to handle outliers
outlier_columns = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"]
# Apply outlier handling
df = handle_outliers(df, outlier_columns)
# Convert 'Energy (kWh)' to a stationary series
adf_result = adfuller(df["Energy (kWh)"])
p_value = adf_result[1]
if p_value > 0.05:
df["Energy (kWh)"] = df["Energy (kWh)"].diff().dropna()
# Scaling
scaler = MinMaxScaler()
columns_to_scale = ["Energy (kWh)", "GHG Savings (kg)", "Gasoline Savings (gallons)"]
df[columns_to_scale] = scaler.fit_transform(df[columns_to_scale])
# Confirm the preprocessing is complete
preprocessed = True
df.columns
# Generate data types of all columns
data_types = df.dtypes
# Print the data types
print(data_types)
# Stage 2: Model Identification
import matplotlib.pyplot as plt
from statsmodels.tsa.stattools import acf, pacf
# Select the 'Energy (kWh)' column from the preprocessed DataFrame
energy_data = df["Energy (kWh)"]
# Calculate the ACF and PACF values
acf_vals, conf_int_acf = acf(energy_data, nlags=50, alpha=0.05)
pacf_vals, conf_int_pacf = pacf(energy_data, nlags=50, alpha=0.05)
# Print the ACF values
print("ACF values:")
for lag, acf_val in enumerate(acf_vals):
print(f"Lag {lag + 1}: {acf_val}")
# Print the PACF values
print("PACF values:")
for lag, pacf_val in enumerate(pacf_vals):
print(f"Lag {lag + 1}: {pacf_val}")
# Plot the ACF
fig, ax = plt.subplots(figsize=(12, 6))
plt.stem(range(len(acf_vals)), acf_vals, use_line_collection=True)
plt.xlabel("Lag")
plt.ylabel("Autocorrelation")
plt.title("ACF Plot")
plt.show()
# Plot the PACF
fig, ax = plt.subplots(figsize=(12, 6))
plt.stem(range(len(pacf_vals)), pacf_vals, use_line_collection=True)
plt.xlabel("Lag")
plt.ylabel("Partial Autocorrelation")
plt.title("PACF Plot")
plt.show()
from statsmodels.tsa.arima.model import ARIMA
# Define the optimal parameter values
p = 2
d = 0
q = 1
# Fit the ARIMA model
model = ARIMA(df["Energy (kWh)"], order=(p, d, q), enforce_stationarity=False)
model_fit = model.fit()
# Print the summary of the model
print(model_fit.summary())
# The output of the ARIMA model suggests that the model has been successfully fitted to the data. Here are some key points from the model summary:
# The optimal parameter values used are ARIMA(2, 0, 1), indicating an autoregressive order of 2, a differencing order of 0, and a moving average order of 1.
# The estimated coefficients for the AR and MA terms are as follows:
# AR(1): 0.9582
# AR(2): -0.0018
# MA(1): -0.9177
# The constant term (const) is estimated to be 0.3551.
# The estimated variance of the residuals (sigma2) is 0.0595.
# Some additional information provided by the model summary includes the Ljung-Box test for autocorrelation (Q-value), the Jarque-Bera test for normality (JB-value), and the heteroskedasticity test (H-value).
# Overall, the model seems to fit the data well, as indicated by the low AIC and BIC values and the significant coefficients. However, it's important to further evaluate the model's performance through diagnostic checks, such as examining residual plots, conducting hypothesis tests, and assessing forecast accuracy.
# Residual Analysis: Plotting the residuals helps us assess if they exhibit any patterns or remaining structure that the model did not capture.
# Ljung-Box Test: This test helps determine if the residuals are autocorrelated or exhibit any significant residual patterns.
# Jarque-Bera Test: This test assesses if the residuals follow a normal distribution.
# Forecast Evaluation: Comparing the model's forecasts to the actual values can provide insights into the accuracy of the predictions.
train_size = int(len(df) * 0.8)
train_data = df[:train_size]
test_data = df[train_size:]
# Import necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.graphics.gofplots import qqplot
from statsmodels.stats.diagnostic import acorr_ljungbox
from scipy.stats import jarque_bera
# Get the residuals
residuals = model_fit.resid
# Plotting the residuals
plt.figure(figsize=(10, 6))
plt.plot(residuals)
plt.title("Residuals")
plt.xlabel("Time")
plt.ylabel("Residual Value")
plt.show()
# Q-Q plot of the residuals
plt.figure(figsize=(8, 4))
qqplot(residuals, line="s")
plt.title("Q-Q Plot of Residuals")
plt.show()
# Ljung-Box test for residual autocorrelation
_, p_value = acorr_ljungbox(residuals, lags=20)
print(f"Ljung-Box test p-values:\n{p_value}")
# Jarque-Bera test for normality of residuals
_, p_value = jarque_bera(residuals)
print(f"Jarque-Bera test p-value: {p_value}")
print(test_data.columns)
| false | 0 | 1,940 | 0 | 1,940 | 1,940 |
||
129170394
|
# ## **Introduction**
# This is my attempt at Case Study 1 for the Google Data Analytics Certificate. I will be applying the Ask, Perpare, Process, Analyze, and Share phases of the data analysis process. Using what I learned through the course to provide recomendations for the current issues affecting Cyclistic.
# ## **About the Company - Cyclistic**
# A bike-share program that features more than 5,800 bicycles and 600 docking stations. Each bike is geotracked and locked into a network of 692 stations across Chicago (as of 2016). The bikes can be unlocked from one station and returned to any other station in the system at anytime. Cyclistic sets itself apart by also offering reclining bikes, hand tricycles, and cargo bikes, making bike-share more inclusive to people with
# disabilities and riders who can’t use a standard two-wheeled bike. The majority of riders opt for traditional bikes; about
# 8% of riders use the assistive options. Cyclistic users are more likely to ride for leisure, but about 30% use them to commute to work each day.
# ### Characters & Teams
# * **My Role:** I'm a junior data analyst working in the marketing analyst team at Cyclistic. Tasked with designing marketing strategies that will maximise and retain annual members.
# * **Lily Moreno:** The director of marketing. Moreno is responsible for the development of campaigns and initiatives to promote the bike-share program. These may include email, social media, and other channels.
# * **Cyclistic Marketing Analytics Team:** A team of data analysts who are responsible for collecting, analyzing, and reporting data that helps guide Cyclistic marketing strategy.
# * **Cyclistic Executive Team:** The detail-oriented executive team will decide whether to approve the recommended marketing program.
# ## **Case Study Scenario**
# Until now, Cyclistic’s marketing strategy relied on building general awareness and appealing to broad consumer segments.
# One approach that helped make these things possible was the flexibility of its pricing plans: single-ride passes, full-day passes,
# and annual memberships. Customers who purchase single-ride or full-day passes are referred to as casual riders. Customers
# who purchase annual memberships are Cyclistic members.
# Cyclistic’s finance analysts have concluded that annual members are much more profitable than casual riders. Although the
# pricing flexibility helps Cyclistic attract more customers, Moreno believes that maximizing the number of annual members will
# be key to future growth.
# Therefore, your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives
# must approve your recommendations, so they must be backed up with compelling data insights and professional data
# visualizations.
# # **Ask**
# **The Main Goal:**
# * Design marketing strategies aimed at converting casual riders into annual members.
# **Business Task, that will help provide insight on the Main Goal:**
# * How do annual members and casual riders use Cyclistic bikes differently?
# **What is the problem you are trying to solve?**
# * How do we make the Annual Membership more appealing to the casual riders using the Cyclistic bikes service?
# **How can your insights drive business decisions?**
# * If we can find the reasoning why the majority don’t find the annual membership worthwhile for the cost. We can create solutions to make the annual membership more valuable to the customers.
# **Consider key stakeholders:**
# * They would not like anything that would show a decrease in profit at the price of more annual memberships.
# You will produce a report with the following deliverables:
# 1. A clear statement of the business task - **Completed**
# 2. A description of all data sources used -
# 3. Documentation of any cleaning or manipulation of data -
# 4. A summary of your analysis -
# 5. Supporting visualizations and key findings -
# 6. Your top three recommendations based on your analysis -
# # Prepare
# **Where is your data located?**
# * The dataset that I will be using for analysis is [cyclistic-tripdata-2022](https://www.kaggle.com/datasets/ryanalexander1532/cyclistic-tripdata-2022) that I added to Kaggle from the case study prompt.
# **How is the data organized?**
# * The data was collected in the form of 12 .csv files, one for each month of 2022.
# **Are there issues with bias or credibility in this data? Does your data ROCCC?**
# * This data was collected by Cyclist so, since the data is consisted of strictly first party it would be very credible. The data follows ROCCC standards of being reliable, original, comprehensive, current, and cited.
# **How are you addressing licensing, privacy, security, and accessibility?**
# * The license was provided within the prompt and can be found [here](https://ride.divvybikes.com/data-license-agreement)
# * This data is all provided to us as open source by the company.
# * We were also informed that data-privacy issues prohibit us from using rider's personally identifiable information. Thus, prevents connecting pass purchases to credit card numbers to determine if casual riders live in the Cyclistic service area or if they have purchased multiple single passes.
# # Process
# **What tools are you choosing and why?**
# * Given the scale of the amount of data I will be working with. Spreadsheets would not be the best choice and would struggle at cleaning/manipulating the data in this exercise. So, I will be using R for my data cleaning and visualizations. R is able to handle large amounts of data and can make changes in a timely mannor.
# **Have you ensured your data’s integrity?**
# * Upon examination of the data, the columns are consistent after some manipulation.
# **What steps have you taken to ensure that your data is clean?**
# * (The NA's and duplicates were removed, the time and dates were formatted.)
# **How can you verify that your data is clean and ready to analyze?**
# * (The steps shown in this section of the notebook show all cleaning steps and a summary of the data to ensure it is ready.)
# **Have you documented your cleaning process so you can review and share those results?**
# * (The cleaning process has been documented throughout)
# install(tidyverse)
# install(ggplot2)
# install(lubridate)
library(tidyverse)
library(lubridate)
library(ggplot2)
library(dplyr)
library(readr)
library(janitor)
library(data.table)
library(tidyr)
Jan01_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202201-divvy-tripdata.csv"
)
Feb02_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202202-divvy-tripdata.csv"
)
Mar03_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202203-divvy-tripdata.csv"
)
Apr04_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202204-divvy-tripdata.csv"
)
May05_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202205-divvy-tripdata.csv"
)
June06_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202206-divvy-tripdata.csv"
)
July07_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202207-divvy-tripdata.csv"
)
Aug08_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202208-divvy-tripdata.csv"
)
Sept09_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202209-divvy-publictripdata.csv"
)
Oct10_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202210-divvy-tripdata.csv"
)
Nov11_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202211-divvy-tripdata.csv"
)
Dec12_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202212-divvy-tripdata.csv"
)
spec(Jan01_2022)
spec(Feb02_2022)
spec(Mar03_2022)
spec(Apr04_2022)
spec(May05_2022)
spec(June06_2022)
spec(July07_2022)
spec(Aug08_2022)
spec(Sept09_2022)
spec(Oct10_2022)
spec(Nov11_2022)
spec(Dec12_2022)
colnames(Jan01_2022)
colnames(Feb02_2022)
colnames(Mar03_2022)
colnames(Apr04_2022)
colnames(May05_2022)
colnames(June06_2022)
colnames(July07_2022)
colnames(Aug08_2022)
colnames(Sept09_2022)
colnames(Oct10_2022)
colnames(Nov11_2022)
colnames(Dec12_2022)
str(Jan01_2022)
str(Feb02_2022)
str(Mar03_2022)
str(Apr04_2022)
str(May05_2022)
str(June06_2022)
str(July07_2022)
str(Aug08_2022)
str(Sept09_2022)
str(Oct10_2022)
str(Nov11_2022)
str(Dec12_2022)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/170/129170394.ipynb
| null | null |
[{"Id": 129170394, "ScriptId": 38162177, "ParentScriptVersionId": NaN, "ScriptLanguageId": 12, "AuthorUserId": 14887340, "CreationDate": "05/11/2023 14:07:14", "VersionNumber": 2.0, "Title": "Google Data Analytics Capstone: Cyclistic Analysis", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 118.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 62.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## **Introduction**
# This is my attempt at Case Study 1 for the Google Data Analytics Certificate. I will be applying the Ask, Perpare, Process, Analyze, and Share phases of the data analysis process. Using what I learned through the course to provide recomendations for the current issues affecting Cyclistic.
# ## **About the Company - Cyclistic**
# A bike-share program that features more than 5,800 bicycles and 600 docking stations. Each bike is geotracked and locked into a network of 692 stations across Chicago (as of 2016). The bikes can be unlocked from one station and returned to any other station in the system at anytime. Cyclistic sets itself apart by also offering reclining bikes, hand tricycles, and cargo bikes, making bike-share more inclusive to people with
# disabilities and riders who can’t use a standard two-wheeled bike. The majority of riders opt for traditional bikes; about
# 8% of riders use the assistive options. Cyclistic users are more likely to ride for leisure, but about 30% use them to commute to work each day.
# ### Characters & Teams
# * **My Role:** I'm a junior data analyst working in the marketing analyst team at Cyclistic. Tasked with designing marketing strategies that will maximise and retain annual members.
# * **Lily Moreno:** The director of marketing. Moreno is responsible for the development of campaigns and initiatives to promote the bike-share program. These may include email, social media, and other channels.
# * **Cyclistic Marketing Analytics Team:** A team of data analysts who are responsible for collecting, analyzing, and reporting data that helps guide Cyclistic marketing strategy.
# * **Cyclistic Executive Team:** The detail-oriented executive team will decide whether to approve the recommended marketing program.
# ## **Case Study Scenario**
# Until now, Cyclistic’s marketing strategy relied on building general awareness and appealing to broad consumer segments.
# One approach that helped make these things possible was the flexibility of its pricing plans: single-ride passes, full-day passes,
# and annual memberships. Customers who purchase single-ride or full-day passes are referred to as casual riders. Customers
# who purchase annual memberships are Cyclistic members.
# Cyclistic’s finance analysts have concluded that annual members are much more profitable than casual riders. Although the
# pricing flexibility helps Cyclistic attract more customers, Moreno believes that maximizing the number of annual members will
# be key to future growth.
# Therefore, your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives
# must approve your recommendations, so they must be backed up with compelling data insights and professional data
# visualizations.
# # **Ask**
# **The Main Goal:**
# * Design marketing strategies aimed at converting casual riders into annual members.
# **Business Task, that will help provide insight on the Main Goal:**
# * How do annual members and casual riders use Cyclistic bikes differently?
# **What is the problem you are trying to solve?**
# * How do we make the Annual Membership more appealing to the casual riders using the Cyclistic bikes service?
# **How can your insights drive business decisions?**
# * If we can find the reasoning why the majority don’t find the annual membership worthwhile for the cost. We can create solutions to make the annual membership more valuable to the customers.
# **Consider key stakeholders:**
# * They would not like anything that would show a decrease in profit at the price of more annual memberships.
# You will produce a report with the following deliverables:
# 1. A clear statement of the business task - **Completed**
# 2. A description of all data sources used -
# 3. Documentation of any cleaning or manipulation of data -
# 4. A summary of your analysis -
# 5. Supporting visualizations and key findings -
# 6. Your top three recommendations based on your analysis -
# # Prepare
# **Where is your data located?**
# * The dataset that I will be using for analysis is [cyclistic-tripdata-2022](https://www.kaggle.com/datasets/ryanalexander1532/cyclistic-tripdata-2022) that I added to Kaggle from the case study prompt.
# **How is the data organized?**
# * The data was collected in the form of 12 .csv files, one for each month of 2022.
# **Are there issues with bias or credibility in this data? Does your data ROCCC?**
# * This data was collected by Cyclist so, since the data is consisted of strictly first party it would be very credible. The data follows ROCCC standards of being reliable, original, comprehensive, current, and cited.
# **How are you addressing licensing, privacy, security, and accessibility?**
# * The license was provided within the prompt and can be found [here](https://ride.divvybikes.com/data-license-agreement)
# * This data is all provided to us as open source by the company.
# * We were also informed that data-privacy issues prohibit us from using rider's personally identifiable information. Thus, prevents connecting pass purchases to credit card numbers to determine if casual riders live in the Cyclistic service area or if they have purchased multiple single passes.
# # Process
# **What tools are you choosing and why?**
# * Given the scale of the amount of data I will be working with. Spreadsheets would not be the best choice and would struggle at cleaning/manipulating the data in this exercise. So, I will be using R for my data cleaning and visualizations. R is able to handle large amounts of data and can make changes in a timely mannor.
# **Have you ensured your data’s integrity?**
# * Upon examination of the data, the columns are consistent after some manipulation.
# **What steps have you taken to ensure that your data is clean?**
# * (The NA's and duplicates were removed, the time and dates were formatted.)
# **How can you verify that your data is clean and ready to analyze?**
# * (The steps shown in this section of the notebook show all cleaning steps and a summary of the data to ensure it is ready.)
# **Have you documented your cleaning process so you can review and share those results?**
# * (The cleaning process has been documented throughout)
# install(tidyverse)
# install(ggplot2)
# install(lubridate)
library(tidyverse)
library(lubridate)
library(ggplot2)
library(dplyr)
library(readr)
library(janitor)
library(data.table)
library(tidyr)
Jan01_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202201-divvy-tripdata.csv"
)
Feb02_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202202-divvy-tripdata.csv"
)
Mar03_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202203-divvy-tripdata.csv"
)
Apr04_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202204-divvy-tripdata.csv"
)
May05_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202205-divvy-tripdata.csv"
)
June06_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202206-divvy-tripdata.csv"
)
July07_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202207-divvy-tripdata.csv"
)
Aug08_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202208-divvy-tripdata.csv"
)
Sept09_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202209-divvy-publictripdata.csv"
)
Oct10_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202210-divvy-tripdata.csv"
)
Nov11_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202211-divvy-tripdata.csv"
)
Dec12_2022 < -read_csv(
"/kaggle/input/cyclistic-tripdata-2022/202212-divvy-tripdata.csv"
)
spec(Jan01_2022)
spec(Feb02_2022)
spec(Mar03_2022)
spec(Apr04_2022)
spec(May05_2022)
spec(June06_2022)
spec(July07_2022)
spec(Aug08_2022)
spec(Sept09_2022)
spec(Oct10_2022)
spec(Nov11_2022)
spec(Dec12_2022)
colnames(Jan01_2022)
colnames(Feb02_2022)
colnames(Mar03_2022)
colnames(Apr04_2022)
colnames(May05_2022)
colnames(June06_2022)
colnames(July07_2022)
colnames(Aug08_2022)
colnames(Sept09_2022)
colnames(Oct10_2022)
colnames(Nov11_2022)
colnames(Dec12_2022)
str(Jan01_2022)
str(Feb02_2022)
str(Mar03_2022)
str(Apr04_2022)
str(May05_2022)
str(June06_2022)
str(July07_2022)
str(Aug08_2022)
str(Sept09_2022)
str(Oct10_2022)
str(Nov11_2022)
str(Dec12_2022)
| false | 0 | 2,704 | 0 | 2,704 | 2,704 |
||
129170067
|
# # Decision Tree on Iris dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/iris-dataset/iris.csv")
df
# # Without Grid Search CV
df.shape
df.isnull().sum()
cols = list(df.columns)
cols
for x in cols:
if df[x].dtypes == "int64" or df[x].dtypes == "float64":
plt.hist(df[x])
plt.xlabel(x)
plt.ylabel("count")
plt.show()
# # Convert to numeric using Label Encoder
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
df["species"] = labelencoder.fit_transform(df["species"])
df
df.columns
# df['species'] = pd.factorize(df['species'])[0]
# Convert the 'species' column to an integer data type
# df['species'] = df['species'].astype(int)
df["species"].value_counts()
df.reset_index(drop=True, inplace=True)
df
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
x
y
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25, random_state=20)
xtrain.shape, xtest.shape, ytrain.shape, ytest.shape
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=4)
dt.fit(xtrain, ytrain)
pred = dt.predict(xtest)
pred
from sklearn.metrics import accuracy_score
accuracy_score(ytest, pred)
from sklearn.metrics import confusion_matrix
confusion_matrix(ytest, pred)
# we cannot calculate roc auc score here bcus it is multi class (more than 2 classes (more than 0 and 1))
from sklearn.metrics import classification_report
print(classification_report(ytest, pred))
from sklearn import tree
tree.plot_tree(dt)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/170/129170067.ipynb
| null | null |
[{"Id": 129170067, "ScriptId": 38401083, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13314142, "CreationDate": "05/11/2023 14:04:31", "VersionNumber": 1.0, "Title": "Iris Dataset - Decision Tree Classifier", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 84.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Decision Tree on Iris dataset
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/iris-dataset/iris.csv")
df
# # Without Grid Search CV
df.shape
df.isnull().sum()
cols = list(df.columns)
cols
for x in cols:
if df[x].dtypes == "int64" or df[x].dtypes == "float64":
plt.hist(df[x])
plt.xlabel(x)
plt.ylabel("count")
plt.show()
# # Convert to numeric using Label Encoder
from sklearn.preprocessing import LabelEncoder
labelencoder = LabelEncoder()
df["species"] = labelencoder.fit_transform(df["species"])
df
df.columns
# df['species'] = pd.factorize(df['species'])[0]
# Convert the 'species' column to an integer data type
# df['species'] = df['species'].astype(int)
df["species"].value_counts()
df.reset_index(drop=True, inplace=True)
df
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
x
y
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25, random_state=20)
xtrain.shape, xtest.shape, ytrain.shape, ytest.shape
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(max_depth=4)
dt.fit(xtrain, ytrain)
pred = dt.predict(xtest)
pred
from sklearn.metrics import accuracy_score
accuracy_score(ytest, pred)
from sklearn.metrics import confusion_matrix
confusion_matrix(ytest, pred)
# we cannot calculate roc auc score here bcus it is multi class (more than 2 classes (more than 0 and 1))
from sklearn.metrics import classification_report
print(classification_report(ytest, pred))
from sklearn import tree
tree.plot_tree(dt)
| false | 0 | 517 | 0 | 517 | 517 |
||
129170911
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
b = pd.read_csv("/kaggle/input/boston-dataset/Boston.csv")
b
b = b.drop("Unnamed: 0", axis=1) # dropped unecessary col
b.info()
b.isnull().sum()
col = list(b.columns)
col
for x in col:
if b[x].dtypes == "int64" or b[x].dtypes == "float64":
plt.hist(b[x])
plt.xlabel(col)
plt.ylabel("count")
plt.show()
x = b.iloc[:, :-1]
x
y = b.iloc[:, -1]
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25, random_state=123)
# # Without Gridsearch
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor(max_depth=4)
dt.fit(xtrain, ytrain)
pred = dt.predict(xtest)
from sklearn.metrics import r2_score
r2_score(ytest, pred)
from sklearn.metrics import mean_squared_error
mean_squared_error(ytest, pred)
import numpy as np
np.sqrt(mean_squared_error(ytest, pred))
residuals = ytest - pred
sns.distplot(residuals)
from sklearn import tree
tree.plot_tree(dt, fontsize=8)
# # With Grid Search
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
dtr = DecisionTreeRegressor(random_state=2)
params = {"max_depth": [1, 2], "min_samples_leaf": [1, 2, 3, 4]}
grid = GridSearchCV(param_grid=params, estimator=dtr, cv=3, scoring="r2")
grid.fit(xtrain, ytrain)
grid.best_score_
grid.best_estimator_
grid.best_params_
pred1 = grid.predict(xtest)
from sklearn.metrics import r2_score
r2_score(ytest, pred1)
from sklearn.metrics import mean_squared_error
print(np.sqrt(mean_squared_error(ytest, pred)))
from sklearn import tree
tree.plot_tree(grid.best_estimator_, fontsize=9)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/170/129170911.ipynb
| null | null |
[{"Id": 129170911, "ScriptId": 38401503, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13314142, "CreationDate": "05/11/2023 14:11:43", "VersionNumber": 1.0, "Title": "Boston Dataset - Decision Tree Regressor", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 98.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
b = pd.read_csv("/kaggle/input/boston-dataset/Boston.csv")
b
b = b.drop("Unnamed: 0", axis=1) # dropped unecessary col
b.info()
b.isnull().sum()
col = list(b.columns)
col
for x in col:
if b[x].dtypes == "int64" or b[x].dtypes == "float64":
plt.hist(b[x])
plt.xlabel(col)
plt.ylabel("count")
plt.show()
x = b.iloc[:, :-1]
x
y = b.iloc[:, -1]
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25, random_state=123)
# # Without Gridsearch
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor(max_depth=4)
dt.fit(xtrain, ytrain)
pred = dt.predict(xtest)
from sklearn.metrics import r2_score
r2_score(ytest, pred)
from sklearn.metrics import mean_squared_error
mean_squared_error(ytest, pred)
import numpy as np
np.sqrt(mean_squared_error(ytest, pred))
residuals = ytest - pred
sns.distplot(residuals)
from sklearn import tree
tree.plot_tree(dt, fontsize=8)
# # With Grid Search
from sklearn.tree import DecisionTreeRegressor
from sklearn.model_selection import GridSearchCV
dtr = DecisionTreeRegressor(random_state=2)
params = {"max_depth": [1, 2], "min_samples_leaf": [1, 2, 3, 4]}
grid = GridSearchCV(param_grid=params, estimator=dtr, cv=3, scoring="r2")
grid.fit(xtrain, ytrain)
grid.best_score_
grid.best_estimator_
grid.best_params_
pred1 = grid.predict(xtest)
from sklearn.metrics import r2_score
r2_score(ytest, pred1)
from sklearn.metrics import mean_squared_error
print(np.sqrt(mean_squared_error(ytest, pred)))
from sklearn import tree
tree.plot_tree(grid.best_estimator_, fontsize=9)
| false | 0 | 605 | 0 | 605 | 605 |
||
129077795
|
<jupyter_start><jupyter_text>Credit Card Fraud Detection
Context
---------
It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
Content
---------
The dataset contains transactions made by credit cards in September 2013 by European cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
Update (03/05/2021)
---------
A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
Acknowledgements
---------
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.
More details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project
Please cite the following works:
Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon
Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE
Dal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)
Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier
Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019
Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019
Yann-Aël Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook)
Bertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Oblé, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics
[1]: https://www.researchgate.net/project/Fraud-detection-5
[2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/
[3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification
[4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective
[5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy
[6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf
[7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark
[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection
Kaggle dataset identifier: creditcardfraud
<jupyter_script># Credit Card Fraud Detection Using Semi-Supervised Learning
# # Content
# - Introduction
# - Load packages
# - Load the data
# - Check data
# - Go through the data
# - Check Null Values
# - Check data unbalance
# - Data exploration
# - Predictive models
# - RandomForrestClassifier
# - AdaBoostClassifier
# - CatBoostClassifier
# - XGBoost
# - LightGBM
# - Conclusions
# - References
# # Introduction
# The dataset contains transactions made by credit cards in September 2013 by European cardholders.
# This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
# It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
# Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
# ### Update (03/05/2021)
# A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
# # Load packages
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
import gc
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from catboost import CatBoostClassifier
from sklearn import svm
import lightgbm as lgb
from lightgbm import LGBMClassifier
import xgboost as xgb
# # Load the data
df = pd.read_csv("../input/creditcardfraud/creditcard.csv")
# # Check data
print(df.shape[0], df.shape[1])
# ## Go through the data
df.head()
df.tail()
df.sample(10)
df.describe()
df.shape
# Total 31 coloumns ... Do we need all of them ??
# ## Check Null Values
df.isnull()
df.isnull().sum()
# Great No Missing Values
total = df.isnull().sum()
percent = (df.isnull().sum() / df.isnull().count() * 100).sort_values(ascending=False)
pd.concat([total, percent], axis=1, keys=["Total", "Percent"]).transpose()
# ## Data unbalance
temp = df["Class"].value_counts()
df = pd.DataFrame({"Class": temp.index, "values": temp.values})
trace = go.Bar(
x=df["Class"],
y=df["values"],
name="Credit Card Fraud Class - data unbalance (Not fraud = 0, Fraud = 1)",
marker=dict(color="Red"),
text=df["values"],
)
data = [trace]
layout = dict(
title="Credit Card Fraud Class - data unbalance (Not fraud = 0, Fraud = 1)",
xaxis=dict(title="Class", showticklabels=True),
yaxis=dict(title="Number of transactions"),
hovermode="closest",
width=600,
)
fig = dict(data=data, layout=layout)
iplot(fig, filename="class")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/077/129077795.ipynb
|
creditcardfraud
| null |
[{"Id": 129077795, "ScriptId": 38370802, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10131870, "CreationDate": "05/10/2023 20:11:56", "VersionNumber": 1.0, "Title": "Fraud Detection Using Semi-Supervised Learning", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 121.0, "LinesInsertedFromPrevious": 121.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184812308, "KernelVersionId": 129077795, "SourceDatasetVersionId": 23498}]
|
[{"Id": 23498, "DatasetId": 310, "DatasourceVersionId": 23502, "CreatorUserId": 998023, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/23/2018 01:17:27", "VersionNumber": 3.0, "Title": "Credit Card Fraud Detection", "Slug": "creditcardfraud", "Subtitle": "Anonymized credit card transactions labeled as fraudulent or genuine", "Description": "Context\n---------\n\nIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.\n\nContent\n---------\n\nThe dataset contains transactions made by credit cards in September 2013 by European cardholders. \nThis dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.\n\nIt contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. \n\nGiven the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.\n\nUpdate (03/05/2021)\n---------\n\nA simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.\n\nAcknowledgements\n---------\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00e9 Libre de Bruxelles) on big data mining and fraud detection.\nMore details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project\n\nPlease cite the following works: \n\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n\nDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon\n\nDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n\nDal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)\n\nCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier\n\nCarcillo, Fabrizio; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n\nBertrand Lebichot, Yann-A\u00ebl Le Borgne, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n\nFabrizio Carcillo, Yann-A\u00ebl Le Borgne, Olivier Caelen, Frederic Obl\u00e9, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019\n\nYann-A\u00ebl Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) \n\nBertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics\n\n [1]: https://www.researchgate.net/project/Fraud-detection-5\n [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/\n [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification\n [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective\n [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy\n [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf\n [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark\n \n[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection", "VersionNotes": "Fixed preview", "TotalCompressedBytes": 150828752.0, "TotalUncompressedBytes": 69155632.0}]
|
[{"Id": 310, "CreatorUserId": 14069, "OwnerUserId": NaN, "OwnerOrganizationId": 1160.0, "CurrentDatasetVersionId": 23498.0, "CurrentDatasourceVersionId": 23502.0, "ForumId": 1838, "Type": 2, "CreationDate": "11/03/2016 13:21:36", "LastActivityDate": "02/06/2018", "TotalViews": 10310781, "TotalDownloads": 564249, "TotalVotes": 10432, "TotalKernels": 4266}]
| null |
# Credit Card Fraud Detection Using Semi-Supervised Learning
# # Content
# - Introduction
# - Load packages
# - Load the data
# - Check data
# - Go through the data
# - Check Null Values
# - Check data unbalance
# - Data exploration
# - Predictive models
# - RandomForrestClassifier
# - AdaBoostClassifier
# - CatBoostClassifier
# - XGBoost
# - LightGBM
# - Conclusions
# - References
# # Introduction
# The dataset contains transactions made by credit cards in September 2013 by European cardholders.
# This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
# It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
# Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
# ### Update (03/05/2021)
# A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
# # Load packages
import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.graph_objs as go
import plotly.figure_factory as ff
from plotly import tools
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
import gc
from datetime import datetime
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import KFold
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from catboost import CatBoostClassifier
from sklearn import svm
import lightgbm as lgb
from lightgbm import LGBMClassifier
import xgboost as xgb
# # Load the data
df = pd.read_csv("../input/creditcardfraud/creditcard.csv")
# # Check data
print(df.shape[0], df.shape[1])
# ## Go through the data
df.head()
df.tail()
df.sample(10)
df.describe()
df.shape
# Total 31 coloumns ... Do we need all of them ??
# ## Check Null Values
df.isnull()
df.isnull().sum()
# Great No Missing Values
total = df.isnull().sum()
percent = (df.isnull().sum() / df.isnull().count() * 100).sort_values(ascending=False)
pd.concat([total, percent], axis=1, keys=["Total", "Percent"]).transpose()
# ## Data unbalance
temp = df["Class"].value_counts()
df = pd.DataFrame({"Class": temp.index, "values": temp.values})
trace = go.Bar(
x=df["Class"],
y=df["values"],
name="Credit Card Fraud Class - data unbalance (Not fraud = 0, Fraud = 1)",
marker=dict(color="Red"),
text=df["values"],
)
data = [trace]
layout = dict(
title="Credit Card Fraud Class - data unbalance (Not fraud = 0, Fraud = 1)",
xaxis=dict(title="Class", showticklabels=True),
yaxis=dict(title="Number of transactions"),
hovermode="closest",
width=600,
)
fig = dict(data=data, layout=layout)
iplot(fig, filename="class")
| false | 0 | 1,089 | 3 | 2,962 | 1,089 |
||
129077191
|
<jupyter_start><jupyter_text>Mlebourne 2016-2017
Kaggle dataset identifier: mlebourne-20162017
<jupyter_script>import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/mlebourne-20162017/melb_data.csv")
df.head()
df.info()
df["Type"].value_counts()
# ## Les étapes de résolution d'un problème de prédiction :
# ### 1) Définir x et y :
y = df["Price"]
x = df[
[
"Rooms",
"Landsize",
"Bathroom",
"Bedroom2",
"Distance",
"Lattitude",
"Longtitude",
"Propertycount",
"Postcode",
]
]
# ### 2)Diviser les données en ensemble d'entrainement et ensemble de test :
# #### train_test_split divise les données de maniére aléatoire en deux ensemble distints (75% des données sont utilisées pour l'entraînement et 25% pour les tests).
from sklearn.model_selection import train_test_split
train_test_split(x, y)
# #### L'argument random_state=2 est utilisé pour spécifier une graine aléatoire afin que la division soit reproduisible.
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=2)
y_train.head()
# #### La méthode fit ajuste le modèle aux données d'entraînement en trouvant les paramètres optimaux pour minimiser l'erreur de prédiction.
# #### LinearRegression permet de modéliser la relation entre plusieurs variables d'entrée (x_train) et variable de sortie (y_train).
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train, y_train)
# ## 4)Evaluer le modéle
# ##### On utilise le modèle entraîné pour faire des prédictions sur les cinq premières observations de l'ensemble de test (x_test[:5]).
model.predict(x_test[:5])
# #### mean_absolute_error est une fonction permet de calculer l'erreur absolue moyenne entre deux ensembles de valeurs.
#
from sklearn.metrics import mean_absolute_error, mean_squared_error
y_pred = model.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
# ## Amélioration du modéle
# #### DecisionTreeRegressor cette classe permet de créer un modèle de régression par arbre de décision.
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
# #### RandomForestRegressor cette classe permet de créer un modèle de régression par forêt aléatoire.
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(x_train, y_train)
y_pred = forest_reg.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/077/129077191.ipynb
|
mlebourne-20162017
|
yan08042
|
[{"Id": 129077191, "ScriptId": 38359203, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13835697, "CreationDate": "05/10/2023 20:02:51", "VersionNumber": 1.0, "Title": "R\u00e9vision_Pr\u00e9diction", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184811330, "KernelVersionId": 129077191, "SourceDatasetVersionId": 2616660}]
|
[{"Id": 2616660, "DatasetId": 1590461, "DatasourceVersionId": 2660319, "CreatorUserId": 1947218, "LicenseName": "Unknown", "CreationDate": "09/15/2021 06:42:33", "VersionNumber": 3.0, "Title": "Mlebourne 2016-2017", "Slug": "mlebourne-20162017", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2021/09/15", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1590461, "CreatorUserId": 1947218, "OwnerUserId": 1947218.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2616660.0, "CurrentDatasourceVersionId": 2660319.0, "ForumId": 1610646, "Type": 2, "CreationDate": "09/15/2021 06:37:28", "LastActivityDate": "09/15/2021", "TotalViews": 1640, "TotalDownloads": 33, "TotalVotes": 1, "TotalKernels": 7}]
|
[{"Id": 1947218, "UserName": "yan08042", "DisplayName": "Cherry", "RegisterDate": "05/28/2018", "PerformanceTier": 0}]
|
import pandas as pd
import numpy as np
df = pd.read_csv("/kaggle/input/mlebourne-20162017/melb_data.csv")
df.head()
df.info()
df["Type"].value_counts()
# ## Les étapes de résolution d'un problème de prédiction :
# ### 1) Définir x et y :
y = df["Price"]
x = df[
[
"Rooms",
"Landsize",
"Bathroom",
"Bedroom2",
"Distance",
"Lattitude",
"Longtitude",
"Propertycount",
"Postcode",
]
]
# ### 2)Diviser les données en ensemble d'entrainement et ensemble de test :
# #### train_test_split divise les données de maniére aléatoire en deux ensemble distints (75% des données sont utilisées pour l'entraînement et 25% pour les tests).
from sklearn.model_selection import train_test_split
train_test_split(x, y)
# #### L'argument random_state=2 est utilisé pour spécifier une graine aléatoire afin que la division soit reproduisible.
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=2)
y_train.head()
# #### La méthode fit ajuste le modèle aux données d'entraînement en trouvant les paramètres optimaux pour minimiser l'erreur de prédiction.
# #### LinearRegression permet de modéliser la relation entre plusieurs variables d'entrée (x_train) et variable de sortie (y_train).
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train, y_train)
# ## 4)Evaluer le modéle
# ##### On utilise le modèle entraîné pour faire des prédictions sur les cinq premières observations de l'ensemble de test (x_test[:5]).
model.predict(x_test[:5])
# #### mean_absolute_error est une fonction permet de calculer l'erreur absolue moyenne entre deux ensembles de valeurs.
#
from sklearn.metrics import mean_absolute_error, mean_squared_error
y_pred = model.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
# ## Amélioration du modéle
# #### DecisionTreeRegressor cette classe permet de créer un modèle de régression par arbre de décision.
from sklearn.tree import DecisionTreeRegressor
model = DecisionTreeRegressor()
model.fit(x_train, y_train)
y_pred = model.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
# #### RandomForestRegressor cette classe permet de créer un modèle de régression par forêt aléatoire.
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(x_train, y_train)
y_pred = forest_reg.predict(x_test)
score = mean_absolute_error(y_test, y_pred)
score
| false | 1 | 795 | 3 | 836 | 795 |
||
129077705
|
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China
```
Data on serious suicide attempts in Shandong, China
A data frame with 2571 observations on the following 11 variables.
```
| Column | Description |
| --- | --- |
| Person_ID | ID number of victims |
| Hospitalised | Hospitalized? (no or yes) |
| Died | Died? (no or yes) |
| Urban | Urban area? (no, unknown, or yes) |
| Year | Year (2009, 2010, or 2011) |
| Month | Month (1=Jan through 12=December) |
| Sex | Sex (female or male) |
| Age | Age (years) |
| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |
| Occupation | One of ten occupation categories |
| method | One of nine possible methods |
### Details
Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.
## Source
Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762
Kaggle dataset identifier: suicide-attempts-in-shandong-china
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv")
import pandas_profiling
df.profile_report()
# ## Dataset overview:
# ### + Most have been hospitalised
# ### + Dataset covers the period from 2009-2011
# ### + Most of the suicide attempts are done by rurual farmers with secondary education
# ### + There are more females than males
# ### + Most suicide attempts happen in May,June
# ### + Age varies --> drill down
# ### + The most common method is pesticide --> correlation = 0.187
# # Interesting question: is pesticide the most common method because most cases are farmers and they have access to pesticides?
# ## Variable overview
# ### + Numeric 4
# ### + Boolean 2
# ### + Categorical 6
#
print(df["method"].value_counts())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/077/129077705.ipynb
|
suicide-attempts-in-shandong-china
|
utkarshx27
|
[{"Id": 129077705, "ScriptId": 38361382, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6545977, "CreationDate": "05/10/2023 20:10:34", "VersionNumber": 1.0, "Title": "Suicide attempts in shandong china", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 33.0, "LinesInsertedFromPrevious": 33.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184812114, "KernelVersionId": 129077705, "SourceDatasetVersionId": 5617993}]
|
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv")
import pandas_profiling
df.profile_report()
# ## Dataset overview:
# ### + Most have been hospitalised
# ### + Dataset covers the period from 2009-2011
# ### + Most of the suicide attempts are done by rurual farmers with secondary education
# ### + There are more females than males
# ### + Most suicide attempts happen in May,June
# ### + Age varies --> drill down
# ### + The most common method is pesticide --> correlation = 0.187
# # Interesting question: is pesticide the most common method because most cases are farmers and they have access to pesticides?
# ## Variable overview
# ### + Numeric 4
# ### + Boolean 2
# ### + Categorical 6
#
print(df["method"].value_counts())
| false | 1 | 307 | 0 | 720 | 307 |
||
129194083
|
# наша задача - обнаруживать эпизоды замирания при ходьбе для серий в датасетах tdcsfog и defog. Для этого нам нужно использовать данные о временных шагах, ускорениях по трем осям и типах событий (записанных в столбце Type) из этих двух датасетов.
# Данные из tdcsfog записываются со скоростью 128 Гц (128 временных шагов в секунду) в единицах m/s^2. Данные из defog записываются со скоростью 100 Гц (100 временных шагов в секунду) в единицах g. Для преобразования ускорения из g в m/s^2 мы можем использовать следующую формулу: 1 g = 9.81 m/s^2.
# В датасете defog есть два дополнительных столбца: Valid и Task. Мы должны использовать только аннотации событий, в которых серия помечена как true, а участки, помеченные как false, следует рассматривать как неаннотированные.
# Метаданные tdcsfog_metadata, defog_metadata и events содержат информацию о лабораторных визитах, выполненных тестах и принимаемых препаратах для каждого субъекта в исследовании. Мы можем использовать эту информацию для анализа результатов нашей модели в зависимости от разных факторов.
# Таким образом, для создания объединенного датасета нам нужно извлечь необходимые данные (временные шаги, ускорения по трем осям и типы событий) из tdcsfog и defog, преобразовать ускорение в единицы m/s^2 для defog, и использовать только аннотированные события из defog. Мы также можем использовать метаданные для анализа результатов.
import pandas as pd
import numpy as np
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Считываем данные из tdcsfog_metadata.csv
tdcsfog = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/tdcsfog_metadata.csv"
)
# Считываем данные из defog_metadata.csv
defog = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/defog_metadata.csv"
)
# Считываем данные из events.csv
events = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/events.csv"
)
# Считываем данные из папки train подпапки tdcsfog файла daily_metadata.csv
acc = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog/08fbe142f9.csv"
)
# Считываем данные из subjects.csv
subject = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/subjects.csv"
)
print("Shape of tdcsfog:", tdcsfog.shape)
print("Head of tdcsfog:\n", tdcsfog.head())
print("Info of tdcsfog:\n", tdcsfog.info())
print("Summary statistics of tdcsfog:\n", tdcsfog.describe())
print("Missing values in tdcsfog:\n", tdcsfog.isnull().sum())
print("Duplicate rows in tdcsfog:", tdcsfog.duplicated().sum())
# Данные в наборе выглядят полными и чистыми, то есть отсутствуют пропущенные значения и дубликаты.
# Набор данных состоит из 833 записей и 5 столбцов, которые описывают идентификатор пациента, номер визита, номер теста, медикаментозное лечение и результаты испытаний. Столбец "Test" имеет всего три возможных значения, что указывает на то, что в каждом тесте было произведено измерение только одного параметра.чны для анализа и построения модели.
print("Shape of defog:", defog.shape)
print("Head of defog:\n", defog.head())
print("Info of defog:\n", defog.info())
print("Summary statistics of defog:\n", defog.describe())
print("Missing values in defog:\n", defog.isnull().sum())
print("Duplicate rows in defog:", defog.duplicated().sum())
# Данный датасет содержит информацию о 137 пациентах, которые проходили лечение с различными медикаментами для улучшения функций мозга.
# Каждая строка содержит информацию об идентификаторе пациента, его идентификаторе FOG, факт приема медикоментов
# Датасет не содержит пропущенных значений или дубликатов.
# Среди пациентов присутствуют только два значения номера визита 1 и 2, причем большинство пациентов (75%) проходили лечение на первом визите.
print("Shape of events:", events.shape)
print("Head of events:\n", events.head())
print("Info of events:\n", events.info())
print("Summary statistics of events:\n", events.describe())
print("Missing values in events:\n", events.isnull().sum())
print("Duplicate rows in events:", events.duplicated().sum())
# Датасет состоит из 3544 событий, каждое из которых характеризуется пятию колонками: "Id", "Init", "Completion", "Type", "Kinetic". "Id" представляет собой уникальный идентификатор для каждого события. "Init" и "Completion" описывают время начала и завершения события соответственно. "Type" и "Kinetic" это категориальная и числовая переменные соответственно, которые описывают тип и кинетическую активность события.
# Количество непустых значений для всех колонок равно 3544, кроме колонок "Type" и "Kinetic", где есть 1042 пропущенных значений. В датасете нет дубликатов строк.
# Среднее значение для "Init", "Completion" и "Kinetic" составляет 956.30, 964.49 и 0.82 соответственно. Стандартное отклонение равно 946.36, 943.97 и 0.39 соответственно. Минимальное и максимальное значения для "Init" равны -30.67 и 4381.22 соответственно, а для "Completion" минимальное и максимальное значения равны -29.72 и 4392.75 соответственно.
print("Shape of acc:", acc.shape)
print("Head of acc:\n", acc.head())
print("Info of acc:\n", acc.info())
print("Summary statistics of acc:\n", acc.describe())
print("Missing values in acc:\n", acc.isnull().sum())
print("Duplicate rows in acc:", acc.duplicated().sum())
#
# Датасет содержит 4474 строк и 7 столбцов: Time, AccV, AccML, AccAP, StartHesitation, Turn и Walking.
# Столбец Time представляет метку времени данных акселерометра. AccV, AccML и AccAP представляют значения ускорения в вертикальном, медиально-латеральном и антеро-постериорном направлениях соответственно. StartHesitation, Turn и Walking - это бинарные переменные, указывающие, начинает ли испытуемый ходить, поворачивает или уже идет в данный момент времени.
# Сводные статистические данные показывают, что среднее значение вертикального ускорения составляет -9,61 м / с², среднее значение медиально-латерального ускорения - 0,14 м / с², а среднее значение антеро-постериорного ускорения - 1,14 м / с². В датасете нет пропущенных значений или дублирующихся строк.
print("Shape of subject:", subject.shape)
print("Head of subject:\n", subject.head())
print("Info of subject:\n", subject.info())
print("Summary statistics of subject:\n", subject.describe())
print("Missing values in subject:\n", subject.isnull().sum())
print("Duplicate rows in subject:", subject.duplicated().sum())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/194/129194083.ipynb
| null | null |
[{"Id": 129194083, "ScriptId": 38396051, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12868381, "CreationDate": "05/11/2023 17:46:26", "VersionNumber": 1.0, "Title": "EDA Parkinson", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 103.0, "LinesInsertedFromPrevious": 103.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# наша задача - обнаруживать эпизоды замирания при ходьбе для серий в датасетах tdcsfog и defog. Для этого нам нужно использовать данные о временных шагах, ускорениях по трем осям и типах событий (записанных в столбце Type) из этих двух датасетов.
# Данные из tdcsfog записываются со скоростью 128 Гц (128 временных шагов в секунду) в единицах m/s^2. Данные из defog записываются со скоростью 100 Гц (100 временных шагов в секунду) в единицах g. Для преобразования ускорения из g в m/s^2 мы можем использовать следующую формулу: 1 g = 9.81 m/s^2.
# В датасете defog есть два дополнительных столбца: Valid и Task. Мы должны использовать только аннотации событий, в которых серия помечена как true, а участки, помеченные как false, следует рассматривать как неаннотированные.
# Метаданные tdcsfog_metadata, defog_metadata и events содержат информацию о лабораторных визитах, выполненных тестах и принимаемых препаратах для каждого субъекта в исследовании. Мы можем использовать эту информацию для анализа результатов нашей модели в зависимости от разных факторов.
# Таким образом, для создания объединенного датасета нам нужно извлечь необходимые данные (временные шаги, ускорения по трем осям и типы событий) из tdcsfog и defog, преобразовать ускорение в единицы m/s^2 для defog, и использовать только аннотированные события из defog. Мы также можем использовать метаданные для анализа результатов.
import pandas as pd
import numpy as np
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# Считываем данные из tdcsfog_metadata.csv
tdcsfog = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/tdcsfog_metadata.csv"
)
# Считываем данные из defog_metadata.csv
defog = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/defog_metadata.csv"
)
# Считываем данные из events.csv
events = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/events.csv"
)
# Считываем данные из папки train подпапки tdcsfog файла daily_metadata.csv
acc = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/train/tdcsfog/08fbe142f9.csv"
)
# Считываем данные из subjects.csv
subject = pd.read_csv(
"/kaggle/input/tlvmc-parkinsons-freezing-gait-prediction/subjects.csv"
)
print("Shape of tdcsfog:", tdcsfog.shape)
print("Head of tdcsfog:\n", tdcsfog.head())
print("Info of tdcsfog:\n", tdcsfog.info())
print("Summary statistics of tdcsfog:\n", tdcsfog.describe())
print("Missing values in tdcsfog:\n", tdcsfog.isnull().sum())
print("Duplicate rows in tdcsfog:", tdcsfog.duplicated().sum())
# Данные в наборе выглядят полными и чистыми, то есть отсутствуют пропущенные значения и дубликаты.
# Набор данных состоит из 833 записей и 5 столбцов, которые описывают идентификатор пациента, номер визита, номер теста, медикаментозное лечение и результаты испытаний. Столбец "Test" имеет всего три возможных значения, что указывает на то, что в каждом тесте было произведено измерение только одного параметра.чны для анализа и построения модели.
print("Shape of defog:", defog.shape)
print("Head of defog:\n", defog.head())
print("Info of defog:\n", defog.info())
print("Summary statistics of defog:\n", defog.describe())
print("Missing values in defog:\n", defog.isnull().sum())
print("Duplicate rows in defog:", defog.duplicated().sum())
# Данный датасет содержит информацию о 137 пациентах, которые проходили лечение с различными медикаментами для улучшения функций мозга.
# Каждая строка содержит информацию об идентификаторе пациента, его идентификаторе FOG, факт приема медикоментов
# Датасет не содержит пропущенных значений или дубликатов.
# Среди пациентов присутствуют только два значения номера визита 1 и 2, причем большинство пациентов (75%) проходили лечение на первом визите.
print("Shape of events:", events.shape)
print("Head of events:\n", events.head())
print("Info of events:\n", events.info())
print("Summary statistics of events:\n", events.describe())
print("Missing values in events:\n", events.isnull().sum())
print("Duplicate rows in events:", events.duplicated().sum())
# Датасет состоит из 3544 событий, каждое из которых характеризуется пятию колонками: "Id", "Init", "Completion", "Type", "Kinetic". "Id" представляет собой уникальный идентификатор для каждого события. "Init" и "Completion" описывают время начала и завершения события соответственно. "Type" и "Kinetic" это категориальная и числовая переменные соответственно, которые описывают тип и кинетическую активность события.
# Количество непустых значений для всех колонок равно 3544, кроме колонок "Type" и "Kinetic", где есть 1042 пропущенных значений. В датасете нет дубликатов строк.
# Среднее значение для "Init", "Completion" и "Kinetic" составляет 956.30, 964.49 и 0.82 соответственно. Стандартное отклонение равно 946.36, 943.97 и 0.39 соответственно. Минимальное и максимальное значения для "Init" равны -30.67 и 4381.22 соответственно, а для "Completion" минимальное и максимальное значения равны -29.72 и 4392.75 соответственно.
print("Shape of acc:", acc.shape)
print("Head of acc:\n", acc.head())
print("Info of acc:\n", acc.info())
print("Summary statistics of acc:\n", acc.describe())
print("Missing values in acc:\n", acc.isnull().sum())
print("Duplicate rows in acc:", acc.duplicated().sum())
#
# Датасет содержит 4474 строк и 7 столбцов: Time, AccV, AccML, AccAP, StartHesitation, Turn и Walking.
# Столбец Time представляет метку времени данных акселерометра. AccV, AccML и AccAP представляют значения ускорения в вертикальном, медиально-латеральном и антеро-постериорном направлениях соответственно. StartHesitation, Turn и Walking - это бинарные переменные, указывающие, начинает ли испытуемый ходить, поворачивает или уже идет в данный момент времени.
# Сводные статистические данные показывают, что среднее значение вертикального ускорения составляет -9,61 м / с², среднее значение медиально-латерального ускорения - 0,14 м / с², а среднее значение антеро-постериорного ускорения - 1,14 м / с². В датасете нет пропущенных значений или дублирующихся строк.
print("Shape of subject:", subject.shape)
print("Head of subject:\n", subject.head())
print("Info of subject:\n", subject.info())
print("Summary statistics of subject:\n", subject.describe())
print("Missing values in subject:\n", subject.isnull().sum())
print("Duplicate rows in subject:", subject.duplicated().sum())
| false | 0 | 2,586 | 0 | 2,586 | 2,586 |
||
129000049
|
<jupyter_start><jupyter_text>Historical Weather Data for Indian Cities
### Context
The dataset was created by keeping in mind the necessity of such historical weather data in the community. The datasets for top 8 Indian cities as per the population.
### Content
The dataset was used with the help of the worldweatheronline.com API and the wwo_hist package. The datasets contain hourly weather data from 01-01-2009 to 01-01-2020. The data of each city is for more than 10 years. This data can be used to visualize the change in data due to global warming or can be used to predict the weather for upcoming days, weeks, months, seasons, etc.
Note : The data was extracted with the help of worldweatheronline.com API and I can't guarantee about the accuracy of the data.
Kaggle dataset identifier: historical-weather-data-for-indian-cities
<jupyter_script># # Importing Packages
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
# # Reading CSV file as wdf and making date_time column as index of dataframe
wdf = pd.read_csv(
"/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv",
parse_dates=["date_time"],
index_col="date_time",
)
wdf.head(5)
# # Checking columns in our dataframe
wdf.columns
# ## Now shape
wdf.shape
wdf.describe()
# # Checking is there any null values in dataset
wdf.isnull().any()
# ### Now lets separate the feature (i.e. temperature) to be predicted from the rest of the featured. weather_x stores the rest of the dataset while weather_y has temperature column.
wdf_num = wdf.loc[:, ["mintempC", "tempC", "HeatIndexC", "pressure"]]
wdf_num.head()
# # Shape of new dataframe
wdf_num.shape
# # Columns in new dataframe
wdf_num.columns
weth = wdf_num["2019":"2020"]
weth.head()
weather_y = wdf_num.pop("tempC")
weather_x = wdf_num
# # split the dataset into training and testing.
train_X, test_X, train_y, test_y = train_test_split(
weather_x, weather_y, test_size=0.2, random_state=4
)
train_X.shape
train_y.shape
# ### train_x has all the features except temperature and train_y has the corresponding temperature for those features. in supervised machine learning we first feed the model with input and associated output and then we check with a new input.
train_y.head()
# # Linear Regression
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# # plot the scatter plot for three features
#
plt.hexbin(weth.mintempC, weth.tempC, gridsize=20)
plt.xlabel("Minimum Temperature")
plt.ylabel("Temperature")
plt.show()
plt.hexbin(weth.HeatIndexC, weth.tempC, gridsize=20)
plt.xlabel("Heat Index")
plt.ylabel("Temperature")
plt.show()
plt.hexbin(weth.pressure, weth.tempC, gridsize=20)
plt.xlabel("Pressure")
plt.ylabel("Temperature")
plt.show()
# # train the model
#
model = LinearRegression()
model.fit(train_X, train_y)
# # make predictions
prediction = model.predict(test_X)
# calculate the mean absolute error
mae = np.mean(np.absolute(prediction - test_y))
print("Mean Absolute Error:", mae)
# calculate the variance score
variance_score = model.score(test_X, test_y)
print("Variance score:", variance_score)
# round the predictions to two decimal places
prediction = np.round(prediction, 2)
# display the results in a table
results = pd.DataFrame(
{"Actual": test_y, "Prediction": prediction, "Difference": test_y - prediction}
)
print(results)
# # Logistic Regression
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
# # train the model
model = LogisticRegression()
model.fit(train_X, train_y)
# # make predictions
#
prediction = model.predict(test_X)
# # calculate the mean absolute error
mae = np.mean(np.absolute(prediction - test_y))
print("Mean Absolute Error:", mae)
# # calculate the variance score
#
variance_score = model.score(test_X, test_y)
print("Variance score:", variance_score)
# display the results in a table
results = pd.DataFrame(
{"Actual": test_y, "Prediction": prediction, "Difference": test_y - prediction}
)
print(results)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/000/129000049.ipynb
|
historical-weather-data-for-indian-cities
|
hiteshsoneji
|
[{"Id": 129000049, "ScriptId": 37807324, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14833766, "CreationDate": "05/10/2023 08:12:21", "VersionNumber": 1.0, "Title": "Weather-forcasting using python", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 160.0, "LinesInsertedFromPrevious": 160.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184671023, "KernelVersionId": 129000049, "SourceDatasetVersionId": 1129180}]
|
[{"Id": 1129180, "DatasetId": 635203, "DatasourceVersionId": 1159657, "CreatorUserId": 4267922, "LicenseName": "Other (specified in description)", "CreationDate": "05/04/2020 12:43:21", "VersionNumber": 1.0, "Title": "Historical Weather Data for Indian Cities", "Slug": "historical-weather-data-for-indian-cities", "Subtitle": "Historical weather data for top 8 indian cities per population", "Description": "### Context\n\nThe dataset was created by keeping in mind the necessity of such historical weather data in the community. The datasets for top 8 Indian cities as per the population. \n\n\n### Content\n\nThe dataset was used with the help of the worldweatheronline.com API and the wwo_hist package. The datasets contain hourly weather data from 01-01-2009 to 01-01-2020. The data of each city is for more than 10 years. This data can be used to visualize the change in data due to global warming or can be used to predict the weather for upcoming days, weeks, months, seasons, etc.\nNote : The data was extracted with the help of worldweatheronline.com API and I can't guarantee about the accuracy of the data.\n\n\n### Acknowledgements\n\nThe data is owned by worldweatheronline.com and is extracted with the help of their API. \n\n\n### Inspiration\n\nThe main target of this dataset can be used to predict weather for the next day or week with huge amounts of data provided in the dataset. Furthermore, this data can also be used to make visualization which would help to understand the impact of global warming over the various aspects of the weather like precipitation, humidity, temperature, etc.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 635203, "CreatorUserId": 4267922, "OwnerUserId": 4267922.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1129180.0, "CurrentDatasourceVersionId": 1159657.0, "ForumId": 649466, "Type": 2, "CreationDate": "05/04/2020 12:43:21", "LastActivityDate": "05/04/2020", "TotalViews": 21168, "TotalDownloads": 2732, "TotalVotes": 39, "TotalKernels": 3}]
|
[{"Id": 4267922, "UserName": "hiteshsoneji", "DisplayName": "Hitesh Soneji", "RegisterDate": "12/30/2019", "PerformanceTier": 1}]
|
# # Importing Packages
import warnings
warnings.filterwarnings("ignore")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.linear_model import LinearRegression
from sklearn import preprocessing
# # Reading CSV file as wdf and making date_time column as index of dataframe
wdf = pd.read_csv(
"/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv",
parse_dates=["date_time"],
index_col="date_time",
)
wdf.head(5)
# # Checking columns in our dataframe
wdf.columns
# ## Now shape
wdf.shape
wdf.describe()
# # Checking is there any null values in dataset
wdf.isnull().any()
# ### Now lets separate the feature (i.e. temperature) to be predicted from the rest of the featured. weather_x stores the rest of the dataset while weather_y has temperature column.
wdf_num = wdf.loc[:, ["mintempC", "tempC", "HeatIndexC", "pressure"]]
wdf_num.head()
# # Shape of new dataframe
wdf_num.shape
# # Columns in new dataframe
wdf_num.columns
weth = wdf_num["2019":"2020"]
weth.head()
weather_y = wdf_num.pop("tempC")
weather_x = wdf_num
# # split the dataset into training and testing.
train_X, test_X, train_y, test_y = train_test_split(
weather_x, weather_y, test_size=0.2, random_state=4
)
train_X.shape
train_y.shape
# ### train_x has all the features except temperature and train_y has the corresponding temperature for those features. in supervised machine learning we first feed the model with input and associated output and then we check with a new input.
train_y.head()
# # Linear Regression
from sklearn.linear_model import LinearRegression
import matplotlib.pyplot as plt
# # plot the scatter plot for three features
#
plt.hexbin(weth.mintempC, weth.tempC, gridsize=20)
plt.xlabel("Minimum Temperature")
plt.ylabel("Temperature")
plt.show()
plt.hexbin(weth.HeatIndexC, weth.tempC, gridsize=20)
plt.xlabel("Heat Index")
plt.ylabel("Temperature")
plt.show()
plt.hexbin(weth.pressure, weth.tempC, gridsize=20)
plt.xlabel("Pressure")
plt.ylabel("Temperature")
plt.show()
# # train the model
#
model = LinearRegression()
model.fit(train_X, train_y)
# # make predictions
prediction = model.predict(test_X)
# calculate the mean absolute error
mae = np.mean(np.absolute(prediction - test_y))
print("Mean Absolute Error:", mae)
# calculate the variance score
variance_score = model.score(test_X, test_y)
print("Variance score:", variance_score)
# round the predictions to two decimal places
prediction = np.round(prediction, 2)
# display the results in a table
results = pd.DataFrame(
{"Actual": test_y, "Prediction": prediction, "Difference": test_y - prediction}
)
print(results)
# # Logistic Regression
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
# # train the model
model = LogisticRegression()
model.fit(train_X, train_y)
# # make predictions
#
prediction = model.predict(test_X)
# # calculate the mean absolute error
mae = np.mean(np.absolute(prediction - test_y))
print("Mean Absolute Error:", mae)
# # calculate the variance score
#
variance_score = model.score(test_X, test_y)
print("Variance score:", variance_score)
# display the results in a table
results = pd.DataFrame(
{"Actual": test_y, "Prediction": prediction, "Difference": test_y - prediction}
)
print(results)
| false | 1 | 1,031 | 0 | 1,245 | 1,031 |
||
129099527
|
<jupyter_start><jupyter_text>youtube_api_dataset_v2
Kaggle dataset identifier: youtube-api-dataset-v2
<jupyter_script>import pandas as pd
df = pd.read_csv("/kaggle/input/youtube-api-dataset-v2/data_set.csv")
df.head()
df.shape
cols = df.columns
cols
X = df[
["duration", "videoAge", "subscribers", "totalVideos", "totalViews", "channelAge"]
]
X
y = df["views"]
y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# fit and transfrom
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
X_train
X_test
from keras.models import Sequential
from keras.layers import Dense
import keras.metrics
import keras.optimizers
import keras.losses
import keras.metrics
# from keras import backend as K
# def coeff_determination(y_true, y_pred):
# SS_res = K.sum(K.square( y_true-y_pred ))
# SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
model = Sequential()
# input layer
model.add(keras.Input(6))
# hidden layers
model.add(Dense(100, activation="relu"))
model.add(Dense(1000, activation="relu"))
model.add(Dense(1000, activation="relu"))
model.add(Dense(100, activation="relu"))
# output layer
model.add(Dense(1))
model.compile(optimizer=keras.optimizers.Adam(), loss="mse")
hist = model.fit(
x=X_train, y=y_train, batch_size=32, epochs=450, validation_split=0.2, shuffle=False
)
model.summary()
import matplotlib.pyplot as plt
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.xlabel("epochs")
plt.ylabel("Loss")
plt.title("Model Loss")
plt.legend(["training loss", "validation loss"])
plt.show()
import numpy as np
y_pred = model.predict(X_test)
y_test = np.float32(y_test)
y_test = y_test.reshape(-1, 1)
comp_res = np.concatenate((y_test, y_pred), axis=1)
comp_res
model.evaluate(x=X_test, y=y_test)
from sklearn.metrics import r2_score, mean_squared_error
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_true=y_test, y_pred=y_pred)
r2, mse
y_test.shape, y_pred.shape
# %matplotlib qt
# plt.figure()
# plt.plot(y_test)
# plt.plot(y_pred)
# plt.xlabel('Training sample')
# plt.ylabel('Views')
# plt.title('Actual and predicted output')
# plt.legend(['Actual Views', 'Predicted Views'])
# plt.show()
# plt.figure(figsize=(10,10))
# plt.scatter(y_test, y_pred, c='crimson')
# plt.yscale('log')
# plt.xscale('log')
# p1 = max(max(y_pred), max(y_test))
# p2 = min(min(y_pred), min(y_test))
# plt.plot([p1, p2], [p1, p2], 'b-')
# plt.xlabel('True Values', fontsize=15)
# plt.ylabel('Predictions', fontsize=15)
# plt.axis('equal')
# plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/099/129099527.ipynb
|
youtube-api-dataset-v2
|
amitsingh1555
|
[{"Id": 129099527, "ScriptId": 38378535, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13747440, "CreationDate": "05/11/2023 02:31:45", "VersionNumber": 1.0, "Title": "notebook0c94c39d10", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 123.0, "LinesInsertedFromPrevious": 123.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184855777, "KernelVersionId": 129099527, "SourceDatasetVersionId": 5658096}]
|
[{"Id": 5658096, "DatasetId": 3251885, "DatasourceVersionId": 5733505, "CreatorUserId": 13747440, "LicenseName": "Unknown", "CreationDate": "05/10/2023 18:49:09", "VersionNumber": 1.0, "Title": "youtube_api_dataset_v2", "Slug": "youtube-api-dataset-v2", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3251885, "CreatorUserId": 13747440, "OwnerUserId": 13747440.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5658096.0, "CurrentDatasourceVersionId": 5733505.0, "ForumId": 3317293, "Type": 2, "CreationDate": "05/10/2023 18:49:09", "LastActivityDate": "05/10/2023", "TotalViews": 51, "TotalDownloads": 1, "TotalVotes": 0, "TotalKernels": 3}]
|
[{"Id": 13747440, "UserName": "amitsingh1555", "DisplayName": "Amit Singh 1555", "RegisterDate": "02/17/2023", "PerformanceTier": 0}]
|
import pandas as pd
df = pd.read_csv("/kaggle/input/youtube-api-dataset-v2/data_set.csv")
df.head()
df.shape
cols = df.columns
cols
X = df[
["duration", "videoAge", "subscribers", "totalVideos", "totalViews", "channelAge"]
]
X
y = df["views"]
y
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
# fit and transfrom
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
X_train
X_test
from keras.models import Sequential
from keras.layers import Dense
import keras.metrics
import keras.optimizers
import keras.losses
import keras.metrics
# from keras import backend as K
# def coeff_determination(y_true, y_pred):
# SS_res = K.sum(K.square( y_true-y_pred ))
# SS_tot = K.sum(K.square( y_true - K.mean(y_true) ) )
model = Sequential()
# input layer
model.add(keras.Input(6))
# hidden layers
model.add(Dense(100, activation="relu"))
model.add(Dense(1000, activation="relu"))
model.add(Dense(1000, activation="relu"))
model.add(Dense(100, activation="relu"))
# output layer
model.add(Dense(1))
model.compile(optimizer=keras.optimizers.Adam(), loss="mse")
hist = model.fit(
x=X_train, y=y_train, batch_size=32, epochs=450, validation_split=0.2, shuffle=False
)
model.summary()
import matplotlib.pyplot as plt
plt.plot(hist.history["loss"])
plt.plot(hist.history["val_loss"])
plt.xlabel("epochs")
plt.ylabel("Loss")
plt.title("Model Loss")
plt.legend(["training loss", "validation loss"])
plt.show()
import numpy as np
y_pred = model.predict(X_test)
y_test = np.float32(y_test)
y_test = y_test.reshape(-1, 1)
comp_res = np.concatenate((y_test, y_pred), axis=1)
comp_res
model.evaluate(x=X_test, y=y_test)
from sklearn.metrics import r2_score, mean_squared_error
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_true=y_test, y_pred=y_pred)
r2, mse
y_test.shape, y_pred.shape
# %matplotlib qt
# plt.figure()
# plt.plot(y_test)
# plt.plot(y_pred)
# plt.xlabel('Training sample')
# plt.ylabel('Views')
# plt.title('Actual and predicted output')
# plt.legend(['Actual Views', 'Predicted Views'])
# plt.show()
# plt.figure(figsize=(10,10))
# plt.scatter(y_test, y_pred, c='crimson')
# plt.yscale('log')
# plt.xscale('log')
# p1 = max(max(y_pred), max(y_test))
# p2 = min(min(y_pred), min(y_test))
# plt.plot([p1, p2], [p1, p2], 'b-')
# plt.xlabel('True Values', fontsize=15)
# plt.ylabel('Predictions', fontsize=15)
# plt.axis('equal')
# plt.show()
| false | 1 | 982 | 0 | 1,012 | 982 |
||
129099654
|
<jupyter_start><jupyter_text>Heart Disease Dataset
### Context
This data set dates from 1988 and consists of four databases: Cleveland, Hungary, Switzerland, and Long Beach V. It contains 76 attributes, including the predicted attribute, but all published experiments refer to using a subset of 14 of them. The "target" field refers to the presence of heart disease in the patient. It is integer valued 0 = no disease and 1 = disease.
### Content
Attribute Information:
> 1. age
> 2. sex
> 3. chest pain type (4 values)
> 4. resting blood pressure
> 5. serum cholestoral in mg/dl
> 6. fasting blood sugar > 120 mg/dl
> 7. resting electrocardiographic results (values 0,1,2)
> 8. maximum heart rate achieved
> 9. exercise induced angina
> 10. oldpeak = ST depression induced by exercise relative to rest
> 11. the slope of the peak exercise ST segment
> 12. number of major vessels (0-3) colored by flourosopy
> 13. thal: 0 = normal; 1 = fixed defect; 2 = reversable defect
The names and social security numbers of the patients were recently removed from the database, replaced with dummy values.
Kaggle dataset identifier: heart-disease-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('heart-disease-dataset/heart.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1025 entries, 0 to 1024
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 1025 non-null int64
1 sex 1025 non-null int64
2 cp 1025 non-null int64
3 trestbps 1025 non-null int64
4 chol 1025 non-null int64
5 fbs 1025 non-null int64
6 restecg 1025 non-null int64
7 thalach 1025 non-null int64
8 exang 1025 non-null int64
9 oldpeak 1025 non-null float64
10 slope 1025 non-null int64
11 ca 1025 non-null int64
12 thal 1025 non-null int64
13 target 1025 non-null int64
dtypes: float64(1), int64(13)
memory usage: 112.2 KB
<jupyter_text>Examples:
{
"age": 52.0,
"sex": 1.0,
"cp": 0.0,
"trestbps": 125.0,
"chol": 212.0,
"fbs": 0.0,
"restecg": 1.0,
"thalach": 168.0,
"exang": 0.0,
"oldpeak": 1.0,
"slope": 2.0,
"ca": 2.0,
"thal": 3.0,
"target": 0.0
}
{
"age": 53.0,
"sex": 1.0,
"cp": 0.0,
"trestbps": 140.0,
"chol": 203.0,
"fbs": 1.0,
"restecg": 0.0,
"thalach": 155.0,
"exang": 1.0,
"oldpeak": 3.1,
"slope": 0.0,
"ca": 0.0,
"thal": 3.0,
"target": 0.0
}
{
"age": 70.0,
"sex": 1.0,
"cp": 0.0,
"trestbps": 145.0,
"chol": 174.0,
"fbs": 0.0,
"restecg": 1.0,
"thalach": 125.0,
"exang": 1.0,
"oldpeak": 2.6,
"slope": 0.0,
"ca": 0.0,
"thal": 3.0,
"target": 0.0
}
{
"age": 61.0,
"sex": 1.0,
"cp": 0.0,
"trestbps": 148.0,
"chol": 203.0,
"fbs": 0.0,
"restecg": 1.0,
"thalach": 161.0,
"exang": 0.0,
"oldpeak": 0.0,
"slope": 2.0,
"ca": 1.0,
"thal": 3.0,
"target": 0.0
}
<jupyter_script># # **Business Problem**
# Saat ini tantangan terbesar bagi industri medis adalah untuk menyediakan fasilitas tingkat tinggi pada infrastruktur kesehatan untuk mendiagnosa penyakit pada hari pertama dan memberikan perawatan tepat waktu untuk meningkatkan kualitas hidup melalui kualitas layanan. Sekitar 31% kematian terjadi di dunia karena penyakit jantung. Bahkan, Menurut WHO, Cardiovaskular Disease (CVD) adalah penyebab utama kematian secara global, merenggut sekitar 17.9 juta jiwa setiap tahun. Oleh karena itu, identifikasi faktor-faktor yang berpengaruh signifikan terhadap penyakit jantung diperlukan agar dapat memberikan perawatan yang tepat.
# # **Metrics**
# Dataset ini merupakan data sekunder yang diambil dari [Kaggle](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset). Dataset ini berasal dari tahun 1988 dan terdiri dari empat database: Cleveland, Hungaria, Swiss, dan Long Beach V. Ini berisi 76 atribut, termaksud yang akan diprediksi, tetapi semua percobaan yang dipublikasikan mengacu pada penggunaan subset dari 14 atribut tersebut. "Target" mengacu pada adanya penyakit jantung pada pasien, dengan 0 merepresentasikan tidak ada penyakit. Sedangkan 1, merepresentasikan ada penyakit. Berikut informasi detail mengenai 14 Artibut yang digunakan:
# 
# # **Data Preprocessing**
# import library
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import altair as alt
# load data
heart = pd.read_csv("/kaggle/input/heart-disease-dataset/heart.csv")
heart.head()
# info data
heart.info()
# Cek Missing Value
msno.matrix(heart, color=(0.27, 0.52, 1.0))
plt.figure(figsize=(15, 9))
plt.show()
# Melalui visualisasi diatas, terlihat bahwa dataset ini tidak memiliki missing value.
# cek duplicate
heart.duplicated().sum()
# handling duplicate
heart = heart.drop_duplicates()
# **Outliers**
# - Pengecekan outlier ini dilakukan dengan menggunakan formula:
# - Outlier bawah jika dantum Q3 + 1.5 x IQR.
# - Selanjutnya, penulis akan mengecek apakah terdapat ekstrem value pada atribut ini dengan menggunakan formula:
# - ekstreem value bawah jika dantum Q3 + 3 x IQR.
#
# Outlier Checking
def find_outliers_IQR(df):
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
IQR = q3 - q1
outliers = df[((df < (q1 - 1.5 * IQR)) | (df > (q3 + 1.5 * IQR)))]
return outliers
# Oulier Checking
num = ["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
outliers = find_outliers_IQR(heart[num])
outliers.notnull().sum()
# Ekstreem Value Checking
def find_ekstrem_IQR(df):
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
IQR = q3 - q1
ekstrem = df[((df < (q1 - 3 * IQR)) | (df > (q3 + 3 * IQR)))]
return ekstrem
# Ekstreem Value Checking
num = ["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
ekstrem = find_ekstrem_IQR(heart[num])
ekstrem.notnull().sum()
# Pengecekan Outlier ini dilakukan pada data yang bertipe interval atau rasio, yaitu terhadap **variabel age, trestbps, chol, thalach, oldppeak, dan ca**.
# - Pada variabel age tidak terdapat data outlier
# - Pada variabel trestbps terdapat outlier sebanyak 9
# - Pada variabel chol terdapat outlier sebanyak 5
# - Pada variabel thalach terdapat outlier sebanyak 1
# - Pada variabel oldpeak terdapat outlier sebanyak 5
# - Pada variabel ca terdapat outlier sebanyak 24
# Selanjutnya, setelah dilakukan pengecekan ekstrem value, terdapat satu ekstrem value pada variabel chol. Berdasarkan jurnal dengan study case dan dataset yang sama, penulis akan tetap membiarkan data outlier ini digunakan pada proses selanjutnya.
#
# Data Imbalance Check
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("target:N", title="Heart Disease (0:tidak, 1:ya)"),
y=alt.Y("count(target):Q", title="Count of target"),
tooltip="count(target):Q",
)
.properties(height=300, width=400, title="Barplot of Heart Disease Status")
)
# Descriptive Statistics
heart.describe()
# **Fitur Numerik**
# * `age` :
# Pasien memiliki rata-rata umur sebesar 54.42 tahun dengan standar deviasi sebesar 9.05 tahun yang artinya umur pasien memusat di sekitar 54.42 tahun dengan penyebaran sebesar 9.05 tahun. Pasien memiliki umur tertinggi 77 tahun dan terendah 29 tahun.
# * `trestbps` :
# Pasien memiliki rata-rata resting blood pressure sebesar 131.6 mmHg dengan standar deviasi sebesar 17.56 mmHg yang artinya resting blood pressure pasien memusat di sekitar 131.60 mmHg dengan penyebaran sebesar 17.56 mmHg. Pasien memiliki resting blood pressure tertinggi sebesar 200 mmHg dan terendah sebesar 94 mmHg.
# * `chol` :
# Pasien memiliki rata-rata serum cholesterol sebesar 246.5 mg/dl dengan standar deviasi sebesar 51.75 mg/dl yang artinya serum cholesterol pasien memusat di sekitar 246.5 mg/dl dengan penyebaran sebesar 51.75 mg/dl. Pasien memiliki serum cholesterol tertinggi sebesar 564 mg/dl dan terendah sebesar 126 mg/dl.
# * `thalach` :
# Pasien memiliki rata-rata maximum heart rate sebesar 149.57 dengan standar deviasi sebesar 22.9 yang artinya rata-rata maximum heart rate pasien memusat di sekitar 149.57 dengan penyebaran sebesar 22.9. Pasien memiliki rata-rata maximum heart rate tertinggi sebesar 202 dan terendah sebesar 71.
# * `oldpeak` :
# Pasien memiliki rata-rata ST depression induced by exercise relative to res sebesar 1.04 dengan standar deviasi sebesar 1.16 yang artinya data memusat di sekitar 1.04 dengan penyebaran sebesar 1.16. Pasien memiliki ST depression induced by exercise relative to res tertinggi sebesar 6.2 dan ada juga pasien yang tidak memiliki T depression induced by exercise relative to res.
# * `ca` :
# Pasien memiliki rata-rata jumlah dari major vessels colored by fluoroscopy sebesar 0.71 dengan standar deviasi sekitar 1 yang artinya data memusat di sekitar 0.71 dengan penyebaran di sekitar 1. Pasien memiliki jumlah dari major vessels colored by fluoroscopy tertinggi sejumlah 4 dan ada juga pasien yang tidak memiliki jumlah dari major vessels colored by fluoroscopy.
# **Fitur Kategorik**
# Dataset ini memiliki fitur kategorik, yaitu `sex`, `cp`, `fbs`, `restecg`, `exang`, `slope`, dan `thal` dimana dengan menggunakan count plot akan dilihat distribusi dan karakteristik dari setiap fitur dapat dilihat pada poin dibawah.
# **Check the correlation between feature**
# Korelasi antar fitur numerik ditunjukkan dengan metode spearman. Penulis memilih metode ini, karena spearman correlation dapat melihat hubungan antar variabel baik linear ataupun tidak linear.
heart[num].corr(method="spearman")
corr_matrix = heart[num].corr(method="spearman")
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix, annot=True, linewidths=0.5, cmap="RdBu", fmt=".2f")
# Pada output diatas dapat dilihat bahwa tidak terdapat multikolinearitas antar variabel numerik.
# # **Risk Factor of Heart Disease**
# ## **1. Age**
# Histogram Age
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("age:Q", bin=True, title="Age"),
y=alt.Y("count()", title="Count"),
tooltip=["age"],
)
rule = base.mark_rule(color="red").encode(
x="mean(age):Q", tooltip=["mean(age)"], size=alt.value(5)
)
bar + rule
# Histogram diatas menunjukkan distribusi variabel umur pasien dengan garis merah merupakan rata-rata umur pasien. Kebanyakan pasien pada dataset ini memiliki umur sekitar 50 - 60 tahun. Secara keseluruhan pasien memiliki rata-rata umur 54.42 tahun. Terlihat juga bahwa umur pasien cenderung berdistribusi mendekati normal.
# ## **2. Sex**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("sex:N", title="Sex (0:female, 1:male)"),
y=alt.Y("count(sex):Q", title="Count of Sex"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(sex):Q",
)
.properties(height=300, width=400, title="Barplot of Sex")
)
# Barplot diatas menunjukkan distribusi dari variabel Sex dengan kategori 0 (female) dan 1 (male).
# - Terlihat bahwa pasien pada dataset ini kebanyakan berjenis kelamin laki-laki. Dengan perbandingan untuk pasien berjenis kelamin laki-laki dengan perempuan sekitar 2:1.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 96 pasien yang berjenis kelamin perempuan, terdapat 72 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 3:1 untuk jenis kelamin perempuan.
# - Dari 206 pasien yang berjenis kelamin laki-laki, terdapat 92 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 1:2 untuk jenis kelamin laki-laki.
# ## **3. Cp (Chest Pain Type)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("cp:N", title="Chest-pain type"),
y=alt.Y("count(cp):Q", title="Count"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(cp):Q",
)
.properties(height=300, width=400, title="Barplot of Chest-pain type")
)
# Barplot diatas menunjukkan distribusi dari variabel cp (Chest-pain type), dengan kategori 0(typical angina), 1(atypical angina), 2(non-anginal pain), dan 3(asymptotic).
# - Terlihat bahwa pasien pada dataset ini kebanyakan memiliki jenis chest pain typical angina dan sangat jarang pasien memiliki chest pain asymptotic.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 143 pasien dengan jenis chest pain typical angina. Dari 143 pasien tersebut 39 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 1:3 untuk jenis chest pain typical angina.
# - Terdapat 50 pasien dengan jenis chest pain atypical angina. Dari 50 pasien tersebut 41 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 5:1 untuk jenis chest pain atypical angina.
# - Terdapat 86 pasien dengan jenis chest pain non-anginal pain. Dari 86 pasien tersebut 68 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 4:1 untuk jenis chest pain non-anginal pain.
# - Terdapat 23 pasien dengan jenis chest pain asymptotic. Dari 23 pasien tersebut 16 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 2:1 untuk jenis chest pain asymptotic.
# ## **4. trestbps (Resting Blood Pressure)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("trestbps:Q", bin=True, title="Resting Blood Pressure"),
y=alt.Y("count()", title="Count"),
tooltip=["trestbps"],
)
rule = base.mark_rule(color="red").encode(
x="mean(trestbps):Q", tooltip=["mean(trestbps)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki resting blood pressure sekitar 120 - 160 mmHg. Secara keseluruhan pasien memiliki rata-rata resting blood pressure sebesar 131.6 mmHg. Terlihat juga, variabel resting blood pressure mendekati distribusi normal.
# ## **5. Chol (Serum Cholesterol)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("chol:Q", bin=True, title="Serum Cholesterol"),
y=alt.Y("count()", title="Count"),
tooltip=["chol"],
)
rule = base.mark_rule(color="red").encode(
x="mean(chol):Q", tooltip=["mean(chol)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki serum cholesterol sekitar 150 - 250 mg/dl dan secara keseluruhan pasien memiliki rata-rata serum cholesterol sebesar 246.5 mg/dl. Selain itu, beberapa pasien memiliki serum cholesterol yang tinggi, yaitu berkisar 550-600 mg/dl. Bentuk distribusi variabel ini cenderung menceng kanan.
# ## **6. fbs (Fasting Blood Sugar)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("fbs:N", title="Fasting Blood Sugar (0:<=120 mg/dl, 1:>120 mg/dl) "),
y=alt.Y("count(fbs):Q", title="Count of fbs"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(fbs):Q",
)
.properties(height=300, width=400, title="Barplot of Fasting Blood Sugar")
)
# Barplot diatas menunjukkan distribusi dari variabel Fasting Blood Sugar dengan kategori 0 (120 mg/dl).
# - Terlihat bahwa pasien pada dataset ini kebanyakan memiliki fasting blood sugar 120 mg/dl sekitar 6:1.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 257 pasien yang memiliki fasting blood sugar <= 120 mg/dl, terdapat 141 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Dari 45 pasien yang memiliki fasting blood sugar >120 mg/dl, terdapat 23 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# ## **7. Restecg (resting electrocardiographic results)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("restecg:N", title="restecg"),
y=alt.Y("count(restecg):Q", title="Count of restecg"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(restecg):Q",
)
.properties(
height=300, width=400, title="Barplot of Resting Electrocardiographic Results"
)
)
# Barplot diatas menunjukkan distribusi dari variabel Resting Electrocardiographic Results dengan kategori 0 (normal), 1 (having ST-T wave abnormality), 2(left ventricular hyperthrophy).
# - Terlihat bahwa pasien pada dataset ini memiliki hasil Resting Electrocardiographic normal atau memiliki ST-T wave abnormality. Selain itu, sangat jarang pasien memiliki hasil Resting Electrocardiographic left ventricular hyperthrophy.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 147 pasien yang memiliki hasil Resting Electrocardiographic normal, terdapat 68 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Dari 151 pasien yyang memiliki hasil Resting Electrocardiographic memiliki gelombang ST-T abnormality, terdapat 95 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 2:1.
# - Dari 4 pasien yang memiliki hasil Resting Electrocardiographic left ventricular hyperthrophy, terdapat 1 pasien yang terkena heart disease.
# ## **8. thalach (Max Heart Rate Achieved)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("thalach:Q", bin=True, title="Max Heart Rate Achieved"),
y=alt.Y("count()", title="Count"),
tooltip=["thalach"],
)
rule = base.mark_rule(color="red").encode(
x="mean(thalach):Q", tooltip=["mean(thalach)"], size=alt.value(5)
)
bar + rule
# Histogram diatas menunjukkan distribusi variabel Max Heart Rate Achieved dengan garis merah merupakan rata-rata. Kebanyakan pasien pada dataset ini memiliki Max Heart Rate Achieved sekitar 160-180. Secara keseluruhan pasien memiliki rata-rata Max Heart Rate Achieved 149.6. Terlihat juga bahwa variabel ini cenderung berdistribusi menceng kiri.
# ## **9. Exang (Angina included by exercise)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("exang:N", title="Angina included by exercise (0:no. 1:yes)"),
y=alt.Y("count(exang):Q", title="Count of exang"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(exang):Q",
)
.properties(height=300, width=400, title="Barplot of Angina included by exercise")
)
# Barplot diatas menunjukkan distribusi dari variabel Angina included by exercise dengan kategori 0 (no) dan 1 (yes).
# - Terlihat bahwa pasien pada dataset ini kebanyakan tidak melakukan angina exercise. Dengan perbandingan pasien yang angina included by exercise ya dan tidak sebesar 1:2.
# - Jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 203 pasien yang tidak angina exercise. Dari 203 pasien tersebut 141 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 2:1.
# - Terdapat 99 pasien dengan angina exercise. Dari 99 pasien tersebut 23 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 1:3.
# ## **10. Oldpeak (ST depression induced by exercise relative to rest)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X(
"oldpeak:Q",
bin=True,
title="ST depression induced by exercise relative to rest",
),
y=alt.Y("count()", title="Count"),
tooltip=["oldpeak"],
)
rule = base.mark_rule(color="red").encode(
x="mean(oldpeak):Q", tooltip=["mean(oldpeak)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki ST depression induced by exercise relative to rest kurang dari 1. Kebanyakan pula tidak memiliki ST depression induced by exercise relative to rest. Secara keseluruhan pasien memiliki rata-rata ST depression induced by exercise relative to rest sebesar 1.04. Selain itu, beberapa pasien memiliki ST depression induced by exercise relative to rest yang tinggi, yaitu berkisar 6-7. Bentuk distribusi variabel ini cenderung menceng kanan.
# ## **11. slope (Peak exercise ST segment)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X(
"slope:N",
title="Peak exercise ST segment (0:upsloping, 1:flat, 2:downsloping)",
),
y=alt.Y("count(slope):Q", title="Count of slope"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(slope):Q",
)
.properties(height=300, width=400, title="Barplot of Peak exercise ST segment")
)
# Barplot diatas menunjukkan distribusi dari variabel Peak exercise ST segment dengan kategori 0 (upsloping), 1 (flat), dan 2 (downsloping).
# - Terlihat bahwa pasien pada dataset ini kebanyakan dengan peak exercise ST segment flat atau downsloping. Selain itu, jarang pasien dengan peak exercise ST segment upsloping.
# - Jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 21 pasien dengan peak exercise ST segment upsloping. Dari 21 pasien tersebut 9 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Terdapat 140 pasien dengan peak exercise ST segment flat. Dari 140 pasien tersebut 49 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 1:2.
# - Terdapat 141 pasien dengan peak exercise ST segment downsloping. Dari 141 pasien tersebut 106 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 3:1.
# ## **12. ca (Number of major vessels colored by fluoroscopy)**
# Number of major vessels colored by fluoroscopy merupakan data numerik dengan rentang 0-4. Rentang tersebut cukup kecil sehingga visualisasi dengan barplot tidak masalah.
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("ca:N", title="Number of major vessels colored by fluoroscopy"),
y=alt.Y("count(ca):Q", title="Count of ca"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(ca):Q",
)
.properties(
height=300,
width=400,
title="Barplot of Number of major vessels colored by fluoroscopy",
)
)
# Kebanyakan pasien tidak memiliki major vessels colored by fluroscopy. Kebanyakan pula mereka yang tidak memiliki major vessels colored by fluroscopy terkena heart disease. Selain itu, sangat jarang pasien memiliki jumlah major vessels colored by fluroscopy sebanyak 4.
# ## **13. thal (thalassemia)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("thal:N", title="Thalassemia"),
y=alt.Y("count(thal):Q", title="Count of thal"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(thal):Q",
)
.properties(height=300, width=400, title="Barplot of Thalassemia")
)
# Barplot diatas menunjukkan distribusi dari variabel Thalasemia. Terlihat bahwa pasien pada dataset ini kebanyakan memiliki jenis Thalasemia kategori 2. Selain itu, jarang pasien yang memiliki jenis thalasemia kategori 0. Selanjutnya, jika dilihat dari variabel target dimana 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease), kebanyakan pasien dengan Thalasemia jenis 2 memiliki status heart disease.
#
# Pairplot
sns.pairplot(
data=heart[["age", "trestbps", "chol", "thalach", "oldpeak", "ca", "target"]],
hue="target",
diag_kind="kde",
height=2,
)
plt.show()
# Output diatas merupakan pairplot, dimana sumbu non diagonal menunjukkan Scaterplot. Dapat dilihat bahwa Scaterplot masing-masing fitur terhadap target cenderung membentuk kurva sigmoid. Sedangkan sumbu diagonal menunjukkan distribusi dari masing-masing fitur.
# - Untuk variabel Age, terlihat bahwa baik pasien yang terkena penyakit jantung dan tidak terkena penyakit jantung cenderung memiliki penyebaran umur yang relatif sama.
# - Untuk variabel trestbps, pada dataset ini terlihat bahwa pasien yang tidak terkena penyakit jantung sedikit cenderung memiliki resting blood pressure yang lebih besar daripada pasien yang terkena penyakit jantung.
# - Untuk variabel chol, pada dataset ini terlihat bahwa pasien yang terkena penyakit jantung cenderung memiliki serum cholesterol dalam mg/dl yang lebih besar daripada pasien yang tidak terkena penyakit jantung.
# - Untuk variabel thalac, pada dataset ini terlihat bahwa pasien yang terkena penyakit jantung cenderung memiliki Maximum Heart Rate Achieved yang lebih besar daripada pasien yang tidak terkena penyakit jantung.
# - Untuk variabel oldpeak, pada dataset ini terlihat bahwa pasien yang tidak terkena penyakit jantung sedikit cenderung memiliki ST depression induced by exercise relative to rest yang lebih besar daripada pasien yang terkena penyakit jantung.
# - Untuk variabel ca, pada dataset ini terlihat bahwa baik pasien yang terkena penyakit jantung dan tidak terkena penyakit jantung cenderung memiliki jumlah major vessels colored by fluroscopy yang relatif sama.
# # **Automated EDA**
# ## **dataprep package**
from dataprep.eda import create_report
create_report(heart).show()
# # **Feature Analysis**
# ## **Variabel Numerik vs Variabel Target**
# **Point-biserial correlation coefficient:** This is a measure specifically designed for assessing the correlation between a binary (dichotomous) nominal variable and a numeric variable. It calculates the correlation between the binary variable (e.g., presence or absence of a characteristic) and the numeric variable (e.g., continuous or discrete variable).
from scipy.stats import pointbiserialr
numeric_vars = heart[
["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
] # Numeric independent variables
# Loop through each numeric variable and calculate point-biserial correlation coefficient
for col in numeric_vars:
correlation_coefficient, p_value = pointbiserialr(
heart["target"], numeric_vars[col]
)
print(
f"Point-Biserial Correlation Coefficient for {col}: {correlation_coefficient:.3f}"
)
# Feature chol, trestbps, dan age karena masing-masing feature tersebut memiliki korelasi yang rendah terhadap variabel target, yaitu secara berurutan -0.081, -0.146, -0.221.
# ## **Chi Square Test**
# The chi-square test can be used to test the association between a nominal dependent variable and a categorical independent variable, whether it is nominal or ordinal.
from sklearn.feature_selection import chi2
X = heart.drop(
["target", "age", "trestbps", "chol", "thalach", "oldpeak", "ca"], axis=1
)
y = heart["target"]
chi_scores = chi2(X, y)
p_values = pd.Series(chi_scores[1], index=X.columns)
p_values.sort_values(ascending=False, inplace=True)
p_values.plot.bar()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/099/129099654.ipynb
|
heart-disease-dataset
|
johnsmith88
|
[{"Id": 129099654, "ScriptId": 38358527, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10032783, "CreationDate": "05/11/2023 02:33:41", "VersionNumber": 1.0, "Title": "Risk Factor of Heart Disease_Assignment 2", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 459.0, "LinesInsertedFromPrevious": 459.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184856052, "KernelVersionId": 129099654, "SourceDatasetVersionId": 477177}]
|
[{"Id": 477177, "DatasetId": 216167, "DatasourceVersionId": 493143, "CreatorUserId": 3308439, "LicenseName": "Unknown", "CreationDate": "06/06/2019 15:33:55", "VersionNumber": 2.0, "Title": "Heart Disease Dataset", "Slug": "heart-disease-dataset", "Subtitle": "Public Health Dataset", "Description": "### Context\n\nThis data set dates from 1988 and consists of four databases: Cleveland, Hungary, Switzerland, and Long Beach V. It contains 76 attributes, including the predicted attribute, but all published experiments refer to using a subset of 14 of them. The \"target\" field refers to the presence of heart disease in the patient. It is integer valued 0 = no disease and 1 = disease.\n\n\n### Content\n\nAttribute Information: \n> 1. age \n> 2. sex \n> 3. chest pain type (4 values) \n> 4. resting blood pressure \n> 5. serum cholestoral in mg/dl \n> 6. fasting blood sugar > 120 mg/dl\n> 7. resting electrocardiographic results (values 0,1,2)\n> 8. maximum heart rate achieved \n> 9. exercise induced angina \n> 10. oldpeak = ST depression induced by exercise relative to rest \n> 11. the slope of the peak exercise ST segment \n> 12. number of major vessels (0-3) colored by flourosopy \n> 13. thal: 0 = normal; 1 = fixed defect; 2 = reversable defect\nThe names and social security numbers of the patients were recently removed from the database, replaced with dummy values.", "VersionNotes": "Update data", "TotalCompressedBytes": 38114.0, "TotalUncompressedBytes": 38114.0}]
|
[{"Id": 216167, "CreatorUserId": 3308439, "OwnerUserId": 3308439.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 477177.0, "CurrentDatasourceVersionId": 493143.0, "ForumId": 227259, "Type": 2, "CreationDate": "06/04/2019 02:58:45", "LastActivityDate": "06/04/2019", "TotalViews": 578180, "TotalDownloads": 97361, "TotalVotes": 747, "TotalKernels": 308}]
|
[{"Id": 3308439, "UserName": "johnsmith88", "DisplayName": "David Lapp", "RegisterDate": "06/04/2019", "PerformanceTier": 0}]
|
# # **Business Problem**
# Saat ini tantangan terbesar bagi industri medis adalah untuk menyediakan fasilitas tingkat tinggi pada infrastruktur kesehatan untuk mendiagnosa penyakit pada hari pertama dan memberikan perawatan tepat waktu untuk meningkatkan kualitas hidup melalui kualitas layanan. Sekitar 31% kematian terjadi di dunia karena penyakit jantung. Bahkan, Menurut WHO, Cardiovaskular Disease (CVD) adalah penyebab utama kematian secara global, merenggut sekitar 17.9 juta jiwa setiap tahun. Oleh karena itu, identifikasi faktor-faktor yang berpengaruh signifikan terhadap penyakit jantung diperlukan agar dapat memberikan perawatan yang tepat.
# # **Metrics**
# Dataset ini merupakan data sekunder yang diambil dari [Kaggle](https://www.kaggle.com/datasets/johnsmith88/heart-disease-dataset). Dataset ini berasal dari tahun 1988 dan terdiri dari empat database: Cleveland, Hungaria, Swiss, dan Long Beach V. Ini berisi 76 atribut, termaksud yang akan diprediksi, tetapi semua percobaan yang dipublikasikan mengacu pada penggunaan subset dari 14 atribut tersebut. "Target" mengacu pada adanya penyakit jantung pada pasien, dengan 0 merepresentasikan tidak ada penyakit. Sedangkan 1, merepresentasikan ada penyakit. Berikut informasi detail mengenai 14 Artibut yang digunakan:
# 
# # **Data Preprocessing**
# import library
import pandas as pd
import missingno as msno
import matplotlib.pyplot as plt
from pylab import rcParams
import seaborn as sns
import altair as alt
# load data
heart = pd.read_csv("/kaggle/input/heart-disease-dataset/heart.csv")
heart.head()
# info data
heart.info()
# Cek Missing Value
msno.matrix(heart, color=(0.27, 0.52, 1.0))
plt.figure(figsize=(15, 9))
plt.show()
# Melalui visualisasi diatas, terlihat bahwa dataset ini tidak memiliki missing value.
# cek duplicate
heart.duplicated().sum()
# handling duplicate
heart = heart.drop_duplicates()
# **Outliers**
# - Pengecekan outlier ini dilakukan dengan menggunakan formula:
# - Outlier bawah jika dantum Q3 + 1.5 x IQR.
# - Selanjutnya, penulis akan mengecek apakah terdapat ekstrem value pada atribut ini dengan menggunakan formula:
# - ekstreem value bawah jika dantum Q3 + 3 x IQR.
#
# Outlier Checking
def find_outliers_IQR(df):
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
IQR = q3 - q1
outliers = df[((df < (q1 - 1.5 * IQR)) | (df > (q3 + 1.5 * IQR)))]
return outliers
# Oulier Checking
num = ["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
outliers = find_outliers_IQR(heart[num])
outliers.notnull().sum()
# Ekstreem Value Checking
def find_ekstrem_IQR(df):
q1 = df.quantile(0.25)
q3 = df.quantile(0.75)
IQR = q3 - q1
ekstrem = df[((df < (q1 - 3 * IQR)) | (df > (q3 + 3 * IQR)))]
return ekstrem
# Ekstreem Value Checking
num = ["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
ekstrem = find_ekstrem_IQR(heart[num])
ekstrem.notnull().sum()
# Pengecekan Outlier ini dilakukan pada data yang bertipe interval atau rasio, yaitu terhadap **variabel age, trestbps, chol, thalach, oldppeak, dan ca**.
# - Pada variabel age tidak terdapat data outlier
# - Pada variabel trestbps terdapat outlier sebanyak 9
# - Pada variabel chol terdapat outlier sebanyak 5
# - Pada variabel thalach terdapat outlier sebanyak 1
# - Pada variabel oldpeak terdapat outlier sebanyak 5
# - Pada variabel ca terdapat outlier sebanyak 24
# Selanjutnya, setelah dilakukan pengecekan ekstrem value, terdapat satu ekstrem value pada variabel chol. Berdasarkan jurnal dengan study case dan dataset yang sama, penulis akan tetap membiarkan data outlier ini digunakan pada proses selanjutnya.
#
# Data Imbalance Check
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("target:N", title="Heart Disease (0:tidak, 1:ya)"),
y=alt.Y("count(target):Q", title="Count of target"),
tooltip="count(target):Q",
)
.properties(height=300, width=400, title="Barplot of Heart Disease Status")
)
# Descriptive Statistics
heart.describe()
# **Fitur Numerik**
# * `age` :
# Pasien memiliki rata-rata umur sebesar 54.42 tahun dengan standar deviasi sebesar 9.05 tahun yang artinya umur pasien memusat di sekitar 54.42 tahun dengan penyebaran sebesar 9.05 tahun. Pasien memiliki umur tertinggi 77 tahun dan terendah 29 tahun.
# * `trestbps` :
# Pasien memiliki rata-rata resting blood pressure sebesar 131.6 mmHg dengan standar deviasi sebesar 17.56 mmHg yang artinya resting blood pressure pasien memusat di sekitar 131.60 mmHg dengan penyebaran sebesar 17.56 mmHg. Pasien memiliki resting blood pressure tertinggi sebesar 200 mmHg dan terendah sebesar 94 mmHg.
# * `chol` :
# Pasien memiliki rata-rata serum cholesterol sebesar 246.5 mg/dl dengan standar deviasi sebesar 51.75 mg/dl yang artinya serum cholesterol pasien memusat di sekitar 246.5 mg/dl dengan penyebaran sebesar 51.75 mg/dl. Pasien memiliki serum cholesterol tertinggi sebesar 564 mg/dl dan terendah sebesar 126 mg/dl.
# * `thalach` :
# Pasien memiliki rata-rata maximum heart rate sebesar 149.57 dengan standar deviasi sebesar 22.9 yang artinya rata-rata maximum heart rate pasien memusat di sekitar 149.57 dengan penyebaran sebesar 22.9. Pasien memiliki rata-rata maximum heart rate tertinggi sebesar 202 dan terendah sebesar 71.
# * `oldpeak` :
# Pasien memiliki rata-rata ST depression induced by exercise relative to res sebesar 1.04 dengan standar deviasi sebesar 1.16 yang artinya data memusat di sekitar 1.04 dengan penyebaran sebesar 1.16. Pasien memiliki ST depression induced by exercise relative to res tertinggi sebesar 6.2 dan ada juga pasien yang tidak memiliki T depression induced by exercise relative to res.
# * `ca` :
# Pasien memiliki rata-rata jumlah dari major vessels colored by fluoroscopy sebesar 0.71 dengan standar deviasi sekitar 1 yang artinya data memusat di sekitar 0.71 dengan penyebaran di sekitar 1. Pasien memiliki jumlah dari major vessels colored by fluoroscopy tertinggi sejumlah 4 dan ada juga pasien yang tidak memiliki jumlah dari major vessels colored by fluoroscopy.
# **Fitur Kategorik**
# Dataset ini memiliki fitur kategorik, yaitu `sex`, `cp`, `fbs`, `restecg`, `exang`, `slope`, dan `thal` dimana dengan menggunakan count plot akan dilihat distribusi dan karakteristik dari setiap fitur dapat dilihat pada poin dibawah.
# **Check the correlation between feature**
# Korelasi antar fitur numerik ditunjukkan dengan metode spearman. Penulis memilih metode ini, karena spearman correlation dapat melihat hubungan antar variabel baik linear ataupun tidak linear.
heart[num].corr(method="spearman")
corr_matrix = heart[num].corr(method="spearman")
fig, ax = plt.subplots(figsize=(15, 10))
ax = sns.heatmap(corr_matrix, annot=True, linewidths=0.5, cmap="RdBu", fmt=".2f")
# Pada output diatas dapat dilihat bahwa tidak terdapat multikolinearitas antar variabel numerik.
# # **Risk Factor of Heart Disease**
# ## **1. Age**
# Histogram Age
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("age:Q", bin=True, title="Age"),
y=alt.Y("count()", title="Count"),
tooltip=["age"],
)
rule = base.mark_rule(color="red").encode(
x="mean(age):Q", tooltip=["mean(age)"], size=alt.value(5)
)
bar + rule
# Histogram diatas menunjukkan distribusi variabel umur pasien dengan garis merah merupakan rata-rata umur pasien. Kebanyakan pasien pada dataset ini memiliki umur sekitar 50 - 60 tahun. Secara keseluruhan pasien memiliki rata-rata umur 54.42 tahun. Terlihat juga bahwa umur pasien cenderung berdistribusi mendekati normal.
# ## **2. Sex**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("sex:N", title="Sex (0:female, 1:male)"),
y=alt.Y("count(sex):Q", title="Count of Sex"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(sex):Q",
)
.properties(height=300, width=400, title="Barplot of Sex")
)
# Barplot diatas menunjukkan distribusi dari variabel Sex dengan kategori 0 (female) dan 1 (male).
# - Terlihat bahwa pasien pada dataset ini kebanyakan berjenis kelamin laki-laki. Dengan perbandingan untuk pasien berjenis kelamin laki-laki dengan perempuan sekitar 2:1.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 96 pasien yang berjenis kelamin perempuan, terdapat 72 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 3:1 untuk jenis kelamin perempuan.
# - Dari 206 pasien yang berjenis kelamin laki-laki, terdapat 92 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 1:2 untuk jenis kelamin laki-laki.
# ## **3. Cp (Chest Pain Type)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("cp:N", title="Chest-pain type"),
y=alt.Y("count(cp):Q", title="Count"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(cp):Q",
)
.properties(height=300, width=400, title="Barplot of Chest-pain type")
)
# Barplot diatas menunjukkan distribusi dari variabel cp (Chest-pain type), dengan kategori 0(typical angina), 1(atypical angina), 2(non-anginal pain), dan 3(asymptotic).
# - Terlihat bahwa pasien pada dataset ini kebanyakan memiliki jenis chest pain typical angina dan sangat jarang pasien memiliki chest pain asymptotic.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 143 pasien dengan jenis chest pain typical angina. Dari 143 pasien tersebut 39 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 1:3 untuk jenis chest pain typical angina.
# - Terdapat 50 pasien dengan jenis chest pain atypical angina. Dari 50 pasien tersebut 41 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 5:1 untuk jenis chest pain atypical angina.
# - Terdapat 86 pasien dengan jenis chest pain non-anginal pain. Dari 86 pasien tersebut 68 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 4:1 untuk jenis chest pain non-anginal pain.
# - Terdapat 23 pasien dengan jenis chest pain asymptotic. Dari 23 pasien tersebut 16 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 2:1 untuk jenis chest pain asymptotic.
# ## **4. trestbps (Resting Blood Pressure)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("trestbps:Q", bin=True, title="Resting Blood Pressure"),
y=alt.Y("count()", title="Count"),
tooltip=["trestbps"],
)
rule = base.mark_rule(color="red").encode(
x="mean(trestbps):Q", tooltip=["mean(trestbps)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki resting blood pressure sekitar 120 - 160 mmHg. Secara keseluruhan pasien memiliki rata-rata resting blood pressure sebesar 131.6 mmHg. Terlihat juga, variabel resting blood pressure mendekati distribusi normal.
# ## **5. Chol (Serum Cholesterol)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("chol:Q", bin=True, title="Serum Cholesterol"),
y=alt.Y("count()", title="Count"),
tooltip=["chol"],
)
rule = base.mark_rule(color="red").encode(
x="mean(chol):Q", tooltip=["mean(chol)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki serum cholesterol sekitar 150 - 250 mg/dl dan secara keseluruhan pasien memiliki rata-rata serum cholesterol sebesar 246.5 mg/dl. Selain itu, beberapa pasien memiliki serum cholesterol yang tinggi, yaitu berkisar 550-600 mg/dl. Bentuk distribusi variabel ini cenderung menceng kanan.
# ## **6. fbs (Fasting Blood Sugar)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("fbs:N", title="Fasting Blood Sugar (0:<=120 mg/dl, 1:>120 mg/dl) "),
y=alt.Y("count(fbs):Q", title="Count of fbs"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(fbs):Q",
)
.properties(height=300, width=400, title="Barplot of Fasting Blood Sugar")
)
# Barplot diatas menunjukkan distribusi dari variabel Fasting Blood Sugar dengan kategori 0 (120 mg/dl).
# - Terlihat bahwa pasien pada dataset ini kebanyakan memiliki fasting blood sugar 120 mg/dl sekitar 6:1.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 257 pasien yang memiliki fasting blood sugar <= 120 mg/dl, terdapat 141 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Dari 45 pasien yang memiliki fasting blood sugar >120 mg/dl, terdapat 23 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# ## **7. Restecg (resting electrocardiographic results)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("restecg:N", title="restecg"),
y=alt.Y("count(restecg):Q", title="Count of restecg"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(restecg):Q",
)
.properties(
height=300, width=400, title="Barplot of Resting Electrocardiographic Results"
)
)
# Barplot diatas menunjukkan distribusi dari variabel Resting Electrocardiographic Results dengan kategori 0 (normal), 1 (having ST-T wave abnormality), 2(left ventricular hyperthrophy).
# - Terlihat bahwa pasien pada dataset ini memiliki hasil Resting Electrocardiographic normal atau memiliki ST-T wave abnormality. Selain itu, sangat jarang pasien memiliki hasil Resting Electrocardiographic left ventricular hyperthrophy.
# - Selanjutnya, jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa :
# - Dari 147 pasien yang memiliki hasil Resting Electrocardiographic normal, terdapat 68 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Dari 151 pasien yyang memiliki hasil Resting Electrocardiographic memiliki gelombang ST-T abnormality, terdapat 95 pasien yang terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 2:1.
# - Dari 4 pasien yang memiliki hasil Resting Electrocardiographic left ventricular hyperthrophy, terdapat 1 pasien yang terkena heart disease.
# ## **8. thalach (Max Heart Rate Achieved)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X("thalach:Q", bin=True, title="Max Heart Rate Achieved"),
y=alt.Y("count()", title="Count"),
tooltip=["thalach"],
)
rule = base.mark_rule(color="red").encode(
x="mean(thalach):Q", tooltip=["mean(thalach)"], size=alt.value(5)
)
bar + rule
# Histogram diatas menunjukkan distribusi variabel Max Heart Rate Achieved dengan garis merah merupakan rata-rata. Kebanyakan pasien pada dataset ini memiliki Max Heart Rate Achieved sekitar 160-180. Secara keseluruhan pasien memiliki rata-rata Max Heart Rate Achieved 149.6. Terlihat juga bahwa variabel ini cenderung berdistribusi menceng kiri.
# ## **9. Exang (Angina included by exercise)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("exang:N", title="Angina included by exercise (0:no. 1:yes)"),
y=alt.Y("count(exang):Q", title="Count of exang"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(exang):Q",
)
.properties(height=300, width=400, title="Barplot of Angina included by exercise")
)
# Barplot diatas menunjukkan distribusi dari variabel Angina included by exercise dengan kategori 0 (no) dan 1 (yes).
# - Terlihat bahwa pasien pada dataset ini kebanyakan tidak melakukan angina exercise. Dengan perbandingan pasien yang angina included by exercise ya dan tidak sebesar 1:2.
# - Jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 203 pasien yang tidak angina exercise. Dari 203 pasien tersebut 141 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sekitar 2:1.
# - Terdapat 99 pasien dengan angina exercise. Dari 99 pasien tersebut 23 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 1:3.
# ## **10. Oldpeak (ST depression induced by exercise relative to rest)**
base = alt.Chart(heart)
bar = base.mark_bar().encode(
x=alt.X(
"oldpeak:Q",
bin=True,
title="ST depression induced by exercise relative to rest",
),
y=alt.Y("count()", title="Count"),
tooltip=["oldpeak"],
)
rule = base.mark_rule(color="red").encode(
x="mean(oldpeak):Q", tooltip=["mean(oldpeak)"], size=alt.value(5)
)
bar + rule
# Kebanyakan pasien memiliki ST depression induced by exercise relative to rest kurang dari 1. Kebanyakan pula tidak memiliki ST depression induced by exercise relative to rest. Secara keseluruhan pasien memiliki rata-rata ST depression induced by exercise relative to rest sebesar 1.04. Selain itu, beberapa pasien memiliki ST depression induced by exercise relative to rest yang tinggi, yaitu berkisar 6-7. Bentuk distribusi variabel ini cenderung menceng kanan.
# ## **11. slope (Peak exercise ST segment)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X(
"slope:N",
title="Peak exercise ST segment (0:upsloping, 1:flat, 2:downsloping)",
),
y=alt.Y("count(slope):Q", title="Count of slope"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(slope):Q",
)
.properties(height=300, width=400, title="Barplot of Peak exercise ST segment")
)
# Barplot diatas menunjukkan distribusi dari variabel Peak exercise ST segment dengan kategori 0 (upsloping), 1 (flat), dan 2 (downsloping).
# - Terlihat bahwa pasien pada dataset ini kebanyakan dengan peak exercise ST segment flat atau downsloping. Selain itu, jarang pasien dengan peak exercise ST segment upsloping.
# - Jika dikategorikan berdasarkan varibel target dengan 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease) dapat dilihat bahwa
# - Terdapat 21 pasien dengan peak exercise ST segment upsloping. Dari 21 pasien tersebut 9 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease hampir sama.
# - Terdapat 140 pasien dengan peak exercise ST segment flat. Dari 140 pasien tersebut 49 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 1:2.
# - Terdapat 141 pasien dengan peak exercise ST segment downsloping. Dari 141 pasien tersebut 106 diantaranya terkena heart disease. Hal ini berarti perbandingan status disease dan non disease sebesar 3:1.
# ## **12. ca (Number of major vessels colored by fluoroscopy)**
# Number of major vessels colored by fluoroscopy merupakan data numerik dengan rentang 0-4. Rentang tersebut cukup kecil sehingga visualisasi dengan barplot tidak masalah.
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("ca:N", title="Number of major vessels colored by fluoroscopy"),
y=alt.Y("count(ca):Q", title="Count of ca"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(ca):Q",
)
.properties(
height=300,
width=400,
title="Barplot of Number of major vessels colored by fluoroscopy",
)
)
# Kebanyakan pasien tidak memiliki major vessels colored by fluroscopy. Kebanyakan pula mereka yang tidak memiliki major vessels colored by fluroscopy terkena heart disease. Selain itu, sangat jarang pasien memiliki jumlah major vessels colored by fluroscopy sebanyak 4.
# ## **13. thal (thalassemia)**
(
alt.Chart(heart)
.mark_bar()
.encode(
x=alt.X("thal:N", title="Thalassemia"),
y=alt.Y("count(thal):Q", title="Count of thal"),
color=alt.Color("target:N", title="heart disease"),
tooltip="count(thal):Q",
)
.properties(height=300, width=400, title="Barplot of Thalassemia")
)
# Barplot diatas menunjukkan distribusi dari variabel Thalasemia. Terlihat bahwa pasien pada dataset ini kebanyakan memiliki jenis Thalasemia kategori 2. Selain itu, jarang pasien yang memiliki jenis thalasemia kategori 0. Selanjutnya, jika dilihat dari variabel target dimana 0 (pasien tidak terkena heart disease) dan 1 (pasien terkena heart disease), kebanyakan pasien dengan Thalasemia jenis 2 memiliki status heart disease.
#
# Pairplot
sns.pairplot(
data=heart[["age", "trestbps", "chol", "thalach", "oldpeak", "ca", "target"]],
hue="target",
diag_kind="kde",
height=2,
)
plt.show()
# Output diatas merupakan pairplot, dimana sumbu non diagonal menunjukkan Scaterplot. Dapat dilihat bahwa Scaterplot masing-masing fitur terhadap target cenderung membentuk kurva sigmoid. Sedangkan sumbu diagonal menunjukkan distribusi dari masing-masing fitur.
# - Untuk variabel Age, terlihat bahwa baik pasien yang terkena penyakit jantung dan tidak terkena penyakit jantung cenderung memiliki penyebaran umur yang relatif sama.
# - Untuk variabel trestbps, pada dataset ini terlihat bahwa pasien yang tidak terkena penyakit jantung sedikit cenderung memiliki resting blood pressure yang lebih besar daripada pasien yang terkena penyakit jantung.
# - Untuk variabel chol, pada dataset ini terlihat bahwa pasien yang terkena penyakit jantung cenderung memiliki serum cholesterol dalam mg/dl yang lebih besar daripada pasien yang tidak terkena penyakit jantung.
# - Untuk variabel thalac, pada dataset ini terlihat bahwa pasien yang terkena penyakit jantung cenderung memiliki Maximum Heart Rate Achieved yang lebih besar daripada pasien yang tidak terkena penyakit jantung.
# - Untuk variabel oldpeak, pada dataset ini terlihat bahwa pasien yang tidak terkena penyakit jantung sedikit cenderung memiliki ST depression induced by exercise relative to rest yang lebih besar daripada pasien yang terkena penyakit jantung.
# - Untuk variabel ca, pada dataset ini terlihat bahwa baik pasien yang terkena penyakit jantung dan tidak terkena penyakit jantung cenderung memiliki jumlah major vessels colored by fluroscopy yang relatif sama.
# # **Automated EDA**
# ## **dataprep package**
from dataprep.eda import create_report
create_report(heart).show()
# # **Feature Analysis**
# ## **Variabel Numerik vs Variabel Target**
# **Point-biserial correlation coefficient:** This is a measure specifically designed for assessing the correlation between a binary (dichotomous) nominal variable and a numeric variable. It calculates the correlation between the binary variable (e.g., presence or absence of a characteristic) and the numeric variable (e.g., continuous or discrete variable).
from scipy.stats import pointbiserialr
numeric_vars = heart[
["age", "trestbps", "chol", "thalach", "oldpeak", "ca"]
] # Numeric independent variables
# Loop through each numeric variable and calculate point-biserial correlation coefficient
for col in numeric_vars:
correlation_coefficient, p_value = pointbiserialr(
heart["target"], numeric_vars[col]
)
print(
f"Point-Biserial Correlation Coefficient for {col}: {correlation_coefficient:.3f}"
)
# Feature chol, trestbps, dan age karena masing-masing feature tersebut memiliki korelasi yang rendah terhadap variabel target, yaitu secara berurutan -0.081, -0.146, -0.221.
# ## **Chi Square Test**
# The chi-square test can be used to test the association between a nominal dependent variable and a categorical independent variable, whether it is nominal or ordinal.
from sklearn.feature_selection import chi2
X = heart.drop(
["target", "age", "trestbps", "chol", "thalach", "oldpeak", "ca"], axis=1
)
y = heart["target"]
chi_scores = chi2(X, y)
p_values = pd.Series(chi_scores[1], index=X.columns)
p_values.sort_values(ascending=False, inplace=True)
p_values.plot.bar()
|
[{"heart-disease-dataset/heart.csv": {"column_names": "[\"age\", \"sex\", \"cp\", \"trestbps\", \"chol\", \"fbs\", \"restecg\", \"thalach\", \"exang\", \"oldpeak\", \"slope\", \"ca\", \"thal\", \"target\"]", "column_data_types": "{\"age\": \"int64\", \"sex\": \"int64\", \"cp\": \"int64\", \"trestbps\": \"int64\", \"chol\": \"int64\", \"fbs\": \"int64\", \"restecg\": \"int64\", \"thalach\": \"int64\", \"exang\": \"int64\", \"oldpeak\": \"float64\", \"slope\": \"int64\", \"ca\": \"int64\", \"thal\": \"int64\", \"target\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1025 entries, 0 to 1024\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 1025 non-null int64 \n 1 sex 1025 non-null int64 \n 2 cp 1025 non-null int64 \n 3 trestbps 1025 non-null int64 \n 4 chol 1025 non-null int64 \n 5 fbs 1025 non-null int64 \n 6 restecg 1025 non-null int64 \n 7 thalach 1025 non-null int64 \n 8 exang 1025 non-null int64 \n 9 oldpeak 1025 non-null float64\n 10 slope 1025 non-null int64 \n 11 ca 1025 non-null int64 \n 12 thal 1025 non-null int64 \n 13 target 1025 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 112.2 KB\n", "summary": "{\"age\": {\"count\": 1025.0, \"mean\": 54.43414634146342, \"std\": 9.072290233244278, \"min\": 29.0, \"25%\": 48.0, \"50%\": 56.0, \"75%\": 61.0, \"max\": 77.0}, \"sex\": {\"count\": 1025.0, \"mean\": 0.6956097560975609, \"std\": 0.4603733241196493, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"cp\": {\"count\": 1025.0, \"mean\": 0.9424390243902439, \"std\": 1.029640743645865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"trestbps\": {\"count\": 1025.0, \"mean\": 131.61170731707318, \"std\": 17.516718005376408, \"min\": 94.0, \"25%\": 120.0, \"50%\": 130.0, \"75%\": 140.0, \"max\": 200.0}, \"chol\": {\"count\": 1025.0, \"mean\": 246.0, \"std\": 51.59251020618206, \"min\": 126.0, \"25%\": 211.0, \"50%\": 240.0, \"75%\": 275.0, \"max\": 564.0}, \"fbs\": {\"count\": 1025.0, \"mean\": 0.14926829268292682, \"std\": 0.3565266897271575, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"restecg\": {\"count\": 1025.0, \"mean\": 0.5297560975609756, \"std\": 0.5278775668748921, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 2.0}, \"thalach\": {\"count\": 1025.0, \"mean\": 149.11414634146342, \"std\": 23.005723745977207, \"min\": 71.0, \"25%\": 132.0, \"50%\": 152.0, \"75%\": 166.0, \"max\": 202.0}, \"exang\": {\"count\": 1025.0, \"mean\": 0.33658536585365856, \"std\": 0.47277237600371186, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"oldpeak\": {\"count\": 1025.0, \"mean\": 1.0715121951219515, \"std\": 1.175053255150176, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.8, \"75%\": 1.8, \"max\": 6.2}, \"slope\": {\"count\": 1025.0, \"mean\": 1.3853658536585365, \"std\": 0.6177552671745918, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 2.0}, \"ca\": {\"count\": 1025.0, \"mean\": 0.7541463414634146, \"std\": 1.0307976650242823, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 4.0}, \"thal\": {\"count\": 1025.0, \"mean\": 2.32390243902439, \"std\": 0.6206602380510298, \"min\": 0.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 3.0}, \"target\": {\"count\": 1025.0, \"mean\": 0.5131707317073171, \"std\": 0.5000704980788014, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"age\":{\"0\":52,\"1\":53,\"2\":70,\"3\":61},\"sex\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"cp\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"trestbps\":{\"0\":125,\"1\":140,\"2\":145,\"3\":148},\"chol\":{\"0\":212,\"1\":203,\"2\":174,\"3\":203},\"fbs\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"restecg\":{\"0\":1,\"1\":0,\"2\":1,\"3\":1},\"thalach\":{\"0\":168,\"1\":155,\"2\":125,\"3\":161},\"exang\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"oldpeak\":{\"0\":1.0,\"1\":3.1,\"2\":2.6,\"3\":0.0},\"slope\":{\"0\":2,\"1\":0,\"2\":0,\"3\":2},\"ca\":{\"0\":2,\"1\":0,\"2\":0,\"3\":1},\"thal\":{\"0\":3,\"1\":3,\"2\":3,\"3\":3},\"target\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>heart-disease-dataset/heart.csv:
<column_names>
['age', 'sex', 'cp', 'trestbps', 'chol', 'fbs', 'restecg', 'thalach', 'exang', 'oldpeak', 'slope', 'ca', 'thal', 'target']
<column_types>
{'age': 'int64', 'sex': 'int64', 'cp': 'int64', 'trestbps': 'int64', 'chol': 'int64', 'fbs': 'int64', 'restecg': 'int64', 'thalach': 'int64', 'exang': 'int64', 'oldpeak': 'float64', 'slope': 'int64', 'ca': 'int64', 'thal': 'int64', 'target': 'int64'}
<dataframe_Summary>
{'age': {'count': 1025.0, 'mean': 54.43414634146342, 'std': 9.072290233244278, 'min': 29.0, '25%': 48.0, '50%': 56.0, '75%': 61.0, 'max': 77.0}, 'sex': {'count': 1025.0, 'mean': 0.6956097560975609, 'std': 0.4603733241196493, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'cp': {'count': 1025.0, 'mean': 0.9424390243902439, 'std': 1.029640743645865, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'trestbps': {'count': 1025.0, 'mean': 131.61170731707318, 'std': 17.516718005376408, 'min': 94.0, '25%': 120.0, '50%': 130.0, '75%': 140.0, 'max': 200.0}, 'chol': {'count': 1025.0, 'mean': 246.0, 'std': 51.59251020618206, 'min': 126.0, '25%': 211.0, '50%': 240.0, '75%': 275.0, 'max': 564.0}, 'fbs': {'count': 1025.0, 'mean': 0.14926829268292682, 'std': 0.3565266897271575, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'restecg': {'count': 1025.0, 'mean': 0.5297560975609756, 'std': 0.5278775668748921, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 2.0}, 'thalach': {'count': 1025.0, 'mean': 149.11414634146342, 'std': 23.005723745977207, 'min': 71.0, '25%': 132.0, '50%': 152.0, '75%': 166.0, 'max': 202.0}, 'exang': {'count': 1025.0, 'mean': 0.33658536585365856, 'std': 0.47277237600371186, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'oldpeak': {'count': 1025.0, 'mean': 1.0715121951219515, 'std': 1.175053255150176, 'min': 0.0, '25%': 0.0, '50%': 0.8, '75%': 1.8, 'max': 6.2}, 'slope': {'count': 1025.0, 'mean': 1.3853658536585365, 'std': 0.6177552671745918, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 2.0}, 'ca': {'count': 1025.0, 'mean': 0.7541463414634146, 'std': 1.0307976650242823, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 4.0}, 'thal': {'count': 1025.0, 'mean': 2.32390243902439, 'std': 0.6206602380510298, 'min': 0.0, '25%': 2.0, '50%': 2.0, '75%': 3.0, 'max': 3.0}, 'target': {'count': 1025.0, 'mean': 0.5131707317073171, 'std': 0.5000704980788014, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 1025 entries, 0 to 1024
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 1025 non-null int64
1 sex 1025 non-null int64
2 cp 1025 non-null int64
3 trestbps 1025 non-null int64
4 chol 1025 non-null int64
5 fbs 1025 non-null int64
6 restecg 1025 non-null int64
7 thalach 1025 non-null int64
8 exang 1025 non-null int64
9 oldpeak 1025 non-null float64
10 slope 1025 non-null int64
11 ca 1025 non-null int64
12 thal 1025 non-null int64
13 target 1025 non-null int64
dtypes: float64(1), int64(13)
memory usage: 112.2 KB
<some_examples>
{'age': {'0': 52, '1': 53, '2': 70, '3': 61}, 'sex': {'0': 1, '1': 1, '2': 1, '3': 1}, 'cp': {'0': 0, '1': 0, '2': 0, '3': 0}, 'trestbps': {'0': 125, '1': 140, '2': 145, '3': 148}, 'chol': {'0': 212, '1': 203, '2': 174, '3': 203}, 'fbs': {'0': 0, '1': 1, '2': 0, '3': 0}, 'restecg': {'0': 1, '1': 0, '2': 1, '3': 1}, 'thalach': {'0': 168, '1': 155, '2': 125, '3': 161}, 'exang': {'0': 0, '1': 1, '2': 1, '3': 0}, 'oldpeak': {'0': 1.0, '1': 3.1, '2': 2.6, '3': 0.0}, 'slope': {'0': 2, '1': 0, '2': 0, '3': 2}, 'ca': {'0': 2, '1': 0, '2': 0, '3': 1}, 'thal': {'0': 3, '1': 3, '2': 3, '3': 3}, 'target': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 8,294 | 0 | 9,631 | 8,294 |
129099600
|
# ## Proyecto de reconocimiento de imagenes con base de datos de imagenes de malaria en la sangre
# ## Importando librerias
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import string
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import Callback
# clase que de callback para detener el entrenamiento al 95% de accuracy
class TrainingCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("accuracy") > 0.95:
print("Lo logramos, nuestro modelo llego a 95%, detenemos el entrenamiento")
self.model.stop_training = True
import keras_tuner as kt
from tensorflow import keras
from tensorflow.keras.callbacks import ModelCheckpoint
# ## Direccion de la base de datos
train_dir = "../input/malariadataset/Train"
test_dir = "../input/malariadataset/Test"
# ## Uso de Data Generators
train_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255, validation_split=0.2)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
subset="training",
)
validation_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
)
# ## Visualizacion de la base de datos
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(10, 10))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img[:, :, 0])
ax.axis("off")
plt.tight_layout()
plt.show()
sample_training_images, _ = next(train_generator)
plotImages(sample_training_images[:5])
# ## Primer Modelo
model_base = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(142, 148, 1)),
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
# ## Primer Entrenamiento
model_base.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history = model_base.fit(
train_generator, epochs=20, validation_data=validation_generator
)
# ## Evaluacion del modelo
results = model_base.evaluate(test_generator)
# ## Visualizacion de los datos
def visualizacion_resultados(history):
epochs = [i for i in range(20)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history["accuracy"]
train_loss = history.history["loss"]
val_acc = history.history["val_accuracy"]
val_loss = history.history["val_loss"]
fig.set_size_inches(16, 9)
ax[0].plot(epochs, train_acc, "go-", label=" Entrenamiento accuracy")
ax[0].plot(epochs, val_acc, "ro-", label="Validacion accuracy")
ax[0].set_title("Entrenamiento & validación accuracy")
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs, train_loss, "go-", label=" Entrenamiento Loss")
ax[1].plot(epochs, val_loss, "ro-", label="Validacion Loss")
ax[1].set_title("Entrenamiento & validación Loss")
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
visualizacion_resultados(history)
# ## Modelo con regularizadores
model_optimizer = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(142, 148, 1)),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_optimizer.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_optimizer = model_optimizer.fit(
train_generator, epochs=20, validation_data=validation_generator
)
results = model_optimizer.evaluate(test_generator)
visualizacion_resultados(history_optimizer)
# ## Agregando Convulucionales
model_convolutional = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_convolutional.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_convolutional = model_convolutional.fit(
train_generator, epochs=20, validation_data=validation_generator
)
results = model_convolutional.evaluate(test_generator)
visualizacion_resultados(history_convolutional)
# ## Modelo con callbacks
model_callback = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_callback.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
callback = TrainingCallback()
history_callback = model_callback.fit(
train_generator,
epochs=20,
callbacks=[callback],
validation_data=validation_generator,
)
# ## Clase que devuelve modelo base
def get_model():
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
return model
# ## Modelo con Early stopping
model_early = get_model()
model_early.summary()
model_early.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
callback_early = tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=3, mode="auto"
)
history_early = model_early.fit(
train_generator,
epochs=20,
callbacks=[callback_early],
validation_data=validation_generator,
)
# ## Modelo funcional con Auto Tunner Keras
def constructor_modelos(hp):
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(25, (3, 3), activation="relu", input_shape=(142, 148, 1))
)
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())
hp_units = hp.Int("units", min_value=32, max_value=512, step=32)
model.add(
tf.keras.layers.Dense(
units=hp_units, activation="relu", kernel_regularizer=regularizers.l2(1e-5)
)
)
model.add(tf.keras.layers.Dropout(0.2))
model.add(
tf.keras.layers.Dense(
128, activation="relu", kernel_regularizer=regularizers.l2(1e-5)
)
)
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return model
tuner = kt.Hyperband(
constructor_modelos,
objective="val_accuracy",
max_epochs=20,
factor=3,
directory="models/",
project_name="platzi-tunner",
)
tuner.search(train_generator, epochs=20, validation_data=validation_generator)
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
print(best_hps.get("units"))
print(best_hps.get("learning_rate"))
hypermodel = tuner.hypermodel.build(best_hps)
history_hypermodel = hypermodel.fit(
train_generator,
epochs=20,
callbacks=[callback_early],
validation_data=validation_generator,
)
# ## Guardando únicamente arquitectura del modelo
config_dict = hypermodel.get_config()
print(config_dict)
# ## Creando un nuevo modelo desde la arquitectura
model_same_config = tf.keras.Sequential.from_config(config_dict)
model_same_config.summary()
# ## Guardar y cargar unicamente los pesos
model_weight = get_model()
model_weight.summary()
model_weight.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
checkpoint_path = "model_checkpoints/checkpoint"
checkpoint_weighs = ModelCheckpoint(
filepath=checkpoint_path, frecuency="epoch", save_weights_only=True, verbose=1
)
history_weight = model_weight.fit(
train_generator,
epochs=20,
callbacks=[checkpoint_weighs],
validation_data=validation_generator,
)
# ## Guardamos los pesos de forma manual
model_weight.save_weights("model_manual/my_model")
# ## Creamos la arquitectura y cargamos los pesos
model_weights2 = get_model()
model_weights2.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
model_weights2.load_weights(checkpoint_path)
model_weight.evaluate(test_generator)
model_weights2.evaluate(test_generator)
# ## Almacenando infraestructura y peso
checkpoint_path = "model_checkpoints_complete"
checkpoint_weighs = ModelCheckpoint(
filepath=checkpoint_path,
frecuency="epoch",
save_weights_only=False,
monitor="val_accuracy",
save_best_only=True,
verbose=1,
)
model_complete = get_model()
model_complete.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_complete = model_complete.fit(
train_generator,
epochs=20,
callbacks=[checkpoint_weighs],
validation_data=validation_generator,
)
# ## Almacenar infraestructura y peso manual
model_complete.save("saved_model_complete/mymodel")
# ## Cargamos nuestro modelo completo
model_complete3 = tf.keras.models.load_model("saved_model_complete/mymodel")
model_complete3.evaluate(test_generator)
model_complete.evaluate(test_generator)
# ## Usar el formato .h5 para guardar modelos
model_complete3.save("my_model.h5")
model_complete4 = tf.keras.models.load_model("my_model.h5")
# ## Creamos nuestra modelo pre-entrenado con modelos de Keras
train_generator_resize = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
subset="training",
)
validation_generator_resize = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
subset="validation",
)
test_generator_resize = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
)
from tensorflow.keras.applications.inception_v3 import InceptionV3
pre_trained_model = InceptionV3(
include_top=False, input_tensor=tf.keras.layers.Input(shape=(150, 150, 3))
)
for layer in pre_trained_model.layers:
layer.trainable = False
pre_trained_model.summary()
# ## Congelamos el modelo
last_layers = pre_trained_model.get_layer("mixed7")
last_output = last_layers.output
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dense(128, activation="relu")(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model_keras = tf.keras.Model(pre_trained_model.input, x)
model_keras.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model_keras.summary()
history_keras = model_keras.fit(
train_generator_resize, epochs=5, validation_data=validation_generator_resize
)
model_keras.evaluate(test_generator_resize)
# ## Creamos nuestra modelo pre-entrenado con modelos desde TensorFlow Hub
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/classification/4"
model_hub = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(150, 150, 3)),
hub.KerasLayer(module_url, trainable=False),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_hub.build((None, 150, 150, 3))
model_hub.summary()
model_hub.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history_hub = model_hub.fit(
train_generator_resize, epochs=5, validation_data=validation_generator_resize
)
model_hub.evaluate(test_generator_resize)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/099/129099600.ipynb
| null | null |
[{"Id": 129099600, "ScriptId": 36948362, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1281150, "CreationDate": "05/11/2023 02:32:53", "VersionNumber": 1.0, "Title": "Malaria recognition with machine learning", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 455.0, "LinesInsertedFromPrevious": 455.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Proyecto de reconocimiento de imagenes con base de datos de imagenes de malaria en la sangre
# ## Importando librerias
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import string
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras import regularizers
from tensorflow.keras.callbacks import Callback
# clase que de callback para detener el entrenamiento al 95% de accuracy
class TrainingCallback(Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("accuracy") > 0.95:
print("Lo logramos, nuestro modelo llego a 95%, detenemos el entrenamiento")
self.model.stop_training = True
import keras_tuner as kt
from tensorflow import keras
from tensorflow.keras.callbacks import ModelCheckpoint
# ## Direccion de la base de datos
train_dir = "../input/malariadataset/Train"
test_dir = "../input/malariadataset/Test"
# ## Uso de Data Generators
train_datagen = ImageDataGenerator(rescale=1 / 255)
test_datagen = ImageDataGenerator(rescale=1 / 255, validation_split=0.2)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
subset="training",
)
validation_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
subset="validation",
)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(142, 148),
batch_size=128,
class_mode="binary",
color_mode="grayscale",
)
# ## Visualizacion de la base de datos
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(10, 10))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img[:, :, 0])
ax.axis("off")
plt.tight_layout()
plt.show()
sample_training_images, _ = next(train_generator)
plotImages(sample_training_images[:5])
# ## Primer Modelo
model_base = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(142, 148, 1)),
tf.keras.layers.Dense(256, activation="relu"),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
# ## Primer Entrenamiento
model_base.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history = model_base.fit(
train_generator, epochs=20, validation_data=validation_generator
)
# ## Evaluacion del modelo
results = model_base.evaluate(test_generator)
# ## Visualizacion de los datos
def visualizacion_resultados(history):
epochs = [i for i in range(20)]
fig, ax = plt.subplots(1, 2)
train_acc = history.history["accuracy"]
train_loss = history.history["loss"]
val_acc = history.history["val_accuracy"]
val_loss = history.history["val_loss"]
fig.set_size_inches(16, 9)
ax[0].plot(epochs, train_acc, "go-", label=" Entrenamiento accuracy")
ax[0].plot(epochs, val_acc, "ro-", label="Validacion accuracy")
ax[0].set_title("Entrenamiento & validación accuracy")
ax[0].legend()
ax[0].set_xlabel("Epochs")
ax[0].set_ylabel("Accuracy")
ax[1].plot(epochs, train_loss, "go-", label=" Entrenamiento Loss")
ax[1].plot(epochs, val_loss, "ro-", label="Validacion Loss")
ax[1].set_title("Entrenamiento & validación Loss")
ax[1].legend()
ax[1].set_xlabel("Epochs")
ax[1].set_ylabel("Loss")
plt.show()
visualizacion_resultados(history)
# ## Modelo con regularizadores
model_optimizer = tf.keras.models.Sequential(
[
tf.keras.layers.Flatten(input_shape=(142, 148, 1)),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_optimizer.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_optimizer = model_optimizer.fit(
train_generator, epochs=20, validation_data=validation_generator
)
results = model_optimizer.evaluate(test_generator)
visualizacion_resultados(history_optimizer)
# ## Agregando Convulucionales
model_convolutional = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_convolutional.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_convolutional = model_convolutional.fit(
train_generator, epochs=20, validation_data=validation_generator
)
results = model_convolutional.evaluate(test_generator)
visualizacion_resultados(history_convolutional)
# ## Modelo con callbacks
model_callback = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_callback.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
callback = TrainingCallback()
history_callback = model_callback.fit(
train_generator,
epochs=20,
callbacks=[callback],
validation_data=validation_generator,
)
# ## Clase que devuelve modelo base
def get_model():
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(
25, (3, 3), activation="relu", input_shape=(142, 148, 1)
),
tf.keras.layers.MaxPool2D((2, 2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(
256, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(
128, kernel_regularizer=regularizers.l2(1e-5), activation="relu"
),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
return model
# ## Modelo con Early stopping
model_early = get_model()
model_early.summary()
model_early.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
callback_early = tf.keras.callbacks.EarlyStopping(
monitor="loss", patience=3, mode="auto"
)
history_early = model_early.fit(
train_generator,
epochs=20,
callbacks=[callback_early],
validation_data=validation_generator,
)
# ## Modelo funcional con Auto Tunner Keras
def constructor_modelos(hp):
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(25, (3, 3), activation="relu", input_shape=(142, 148, 1))
)
model.add(tf.keras.layers.MaxPooling2D((2, 2)))
model.add(tf.keras.layers.Flatten())
hp_units = hp.Int("units", min_value=32, max_value=512, step=32)
model.add(
tf.keras.layers.Dense(
units=hp_units, activation="relu", kernel_regularizer=regularizers.l2(1e-5)
)
)
model.add(tf.keras.layers.Dropout(0.2))
model.add(
tf.keras.layers.Dense(
128, activation="relu", kernel_regularizer=regularizers.l2(1e-5)
)
)
model.add(tf.keras.layers.Dropout(0.2))
model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
hp_learning_rate = hp.Choice("learning_rate", values=[1e-2, 1e-3, 1e-4])
model.compile(
optimizer=keras.optimizers.Adam(learning_rate=hp_learning_rate),
loss="binary_crossentropy",
metrics=["accuracy"],
)
return model
tuner = kt.Hyperband(
constructor_modelos,
objective="val_accuracy",
max_epochs=20,
factor=3,
directory="models/",
project_name="platzi-tunner",
)
tuner.search(train_generator, epochs=20, validation_data=validation_generator)
best_hps = tuner.get_best_hyperparameters(num_trials=1)[0]
print(best_hps.get("units"))
print(best_hps.get("learning_rate"))
hypermodel = tuner.hypermodel.build(best_hps)
history_hypermodel = hypermodel.fit(
train_generator,
epochs=20,
callbacks=[callback_early],
validation_data=validation_generator,
)
# ## Guardando únicamente arquitectura del modelo
config_dict = hypermodel.get_config()
print(config_dict)
# ## Creando un nuevo modelo desde la arquitectura
model_same_config = tf.keras.Sequential.from_config(config_dict)
model_same_config.summary()
# ## Guardar y cargar unicamente los pesos
model_weight = get_model()
model_weight.summary()
model_weight.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
checkpoint_path = "model_checkpoints/checkpoint"
checkpoint_weighs = ModelCheckpoint(
filepath=checkpoint_path, frecuency="epoch", save_weights_only=True, verbose=1
)
history_weight = model_weight.fit(
train_generator,
epochs=20,
callbacks=[checkpoint_weighs],
validation_data=validation_generator,
)
# ## Guardamos los pesos de forma manual
model_weight.save_weights("model_manual/my_model")
# ## Creamos la arquitectura y cargamos los pesos
model_weights2 = get_model()
model_weights2.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
model_weights2.load_weights(checkpoint_path)
model_weight.evaluate(test_generator)
model_weights2.evaluate(test_generator)
# ## Almacenando infraestructura y peso
checkpoint_path = "model_checkpoints_complete"
checkpoint_weighs = ModelCheckpoint(
filepath=checkpoint_path,
frecuency="epoch",
save_weights_only=False,
monitor="val_accuracy",
save_best_only=True,
verbose=1,
)
model_complete = get_model()
model_complete.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
history_complete = model_complete.fit(
train_generator,
epochs=20,
callbacks=[checkpoint_weighs],
validation_data=validation_generator,
)
# ## Almacenar infraestructura y peso manual
model_complete.save("saved_model_complete/mymodel")
# ## Cargamos nuestro modelo completo
model_complete3 = tf.keras.models.load_model("saved_model_complete/mymodel")
model_complete3.evaluate(test_generator)
model_complete.evaluate(test_generator)
# ## Usar el formato .h5 para guardar modelos
model_complete3.save("my_model.h5")
model_complete4 = tf.keras.models.load_model("my_model.h5")
# ## Creamos nuestra modelo pre-entrenado con modelos de Keras
train_generator_resize = train_datagen.flow_from_directory(
train_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
subset="training",
)
validation_generator_resize = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
subset="validation",
)
test_generator_resize = test_datagen.flow_from_directory(
test_dir,
target_size=(150, 150),
batch_size=128,
class_mode="binary",
color_mode="rgb",
)
from tensorflow.keras.applications.inception_v3 import InceptionV3
pre_trained_model = InceptionV3(
include_top=False, input_tensor=tf.keras.layers.Input(shape=(150, 150, 3))
)
for layer in pre_trained_model.layers:
layer.trainable = False
pre_trained_model.summary()
# ## Congelamos el modelo
last_layers = pre_trained_model.get_layer("mixed7")
last_output = last_layers.output
x = tf.keras.layers.Flatten()(last_output)
x = tf.keras.layers.Dense(128, activation="relu")(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model_keras = tf.keras.Model(pre_trained_model.input, x)
model_keras.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model_keras.summary()
history_keras = model_keras.fit(
train_generator_resize, epochs=5, validation_data=validation_generator_resize
)
model_keras.evaluate(test_generator_resize)
# ## Creamos nuestra modelo pre-entrenado con modelos desde TensorFlow Hub
import tensorflow_hub as hub
module_url = "https://tfhub.dev/google/imagenet/mobilenet_v1_050_160/classification/4"
model_hub = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(150, 150, 3)),
hub.KerasLayer(module_url, trainable=False),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(rate=0.2),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model_hub.build((None, 150, 150, 3))
model_hub.summary()
model_hub.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
history_hub = model_hub.fit(
train_generator_resize, epochs=5, validation_data=validation_generator_resize
)
model_hub.evaluate(test_generator_resize)
| false | 0 | 4,272 | 0 | 4,272 | 4,272 |
||
129989296
|
<jupyter_start><jupyter_text>Smoking Dataset from UK
```
Survey data on smoking habits from the United Kingdom. The data set can be used for analyzing the demographic characteristics of smokers and types of tobacco consumed. A data frame with 1691 observations on the following 12 variables.
```
| Column | Description |
| --- | --- |
| gender | Gender with levels Female and Male. |
| age | Age. |
| marital_status | Marital status with levels Divorced, Married, Separated, Single and Widowed. |
| highest_qualification | Highest education level with levels A Levels, Degree, GCSE/CSE, GCSE/O Level, Higher/Sub Degree, No Qualification, ONC/BTEC and Other/Sub Degree |
| nationality | Nationality with levels British, English, Irish, Scottish, Welsh, Other, Refused and Unknown. |
| ethnicity | Ethnicity with levels Asian, Black, Chinese, Mixed, White and Refused Unknown. |
| gross_income | Gross income with levels Under 2,600, 2,600 to 5,200, 5,200 to 10,400, 10,400 to 15,600, 15,600 to 20,800, 20,800 to 28,600, 28,600 to 36,400, Above 36,400, Refused and Unknown. |
| region | Region with levels London, Midlands & East Anglia, Scotland, South East, South West, The North and Wales |
| smoke | Smoking status with levels No and Yes |
| amt_weekends | Number of cigarettes smoked per day on weekends. |
| amt_weekdays | Number of cigarettes smoked per day on weekdays. |
| type | Type of cigarettes smoked with levels Packets, Hand-Rolled, Both/Mainly Packets and Both/Mainly Hand-Rolled
|
# Source
National STEM Centre, Large Datasets from stats4schools, https://www.stem.org.uk/resources/elibrary/resource/28452/large-datasets-stats4schools.
Kaggle dataset identifier: smoking-dataset-from-uk
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/smoking-dataset-from-uk/smoking.csv")
df.head()
df.drop(columns="Unnamed: 0", inplace=True)
df.info()
df.describe()
sns.set_theme()
box = sns.boxplot(x=df["amt_weekdays"])
sns.scatterplot(data=df, x="age", y="amt_weekdays")
sns.histplot(df, x="age", bins=81)
plt.title("Smokers by Age")
sns.histplot(df, x="amt_weekdays", bins=25)
df2 = (
df[df["smoke"] == "Yes"]
.groupby("highest_qualification")
.size()
.reset_index(name="total_smokers")
)
df2.head()
sns.barplot(df2, x="highest_qualification", y="total_smokers")
plt.xticks(rotation=45)
plt.title("Smokers by Highest Qualification")
df3 = df[
(df["smoke"] == "Yes")
& (df["gross_income"] != "Unknown")
& (df["gross_income"] != "Refused")
].reset_index()
income_levels = [
"Under 2,600",
"2,600 to 5,200",
"5,200 to 10,400",
"10,400 to 15,600",
"15,600 to 20,800",
"20,800 to 28,600",
"28,600 to 36,400",
"Above 36,400",
]
df3["gross_income"] = pd.Categorical(df3["gross_income"], categories=income_levels)
# df3 = df3.sort_values(by='gross_income')
# df3 = df3.groupby('gross_income').size().reset_index(name='total_smokers')
df3.head(10)
sns.barplot(df3, x="gross_income", y="total_smokers")
plt.xticks(rotation=45)
plt.title("Smokers by Gross Income")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/989/129989296.ipynb
|
smoking-dataset-from-uk
|
utkarshx27
|
[{"Id": 129989296, "ScriptId": 38367428, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12374955, "CreationDate": "05/18/2023 01:09:21", "VersionNumber": 1.0, "Title": "EDA - Smokers in the UK", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 57.0, "LinesInsertedFromPrevious": 57.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186437333, "KernelVersionId": 129989296, "SourceDatasetVersionId": 5651804}]
|
[{"Id": 5651804, "DatasetId": 3248690, "DatasourceVersionId": 5727175, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/10/2023 05:41:12", "VersionNumber": 1.0, "Title": "Smoking Dataset from UK", "Slug": "smoking-dataset-from-uk", "Subtitle": "Demographic Characteristics & Tobacco Consumption Habits: UK Smoking Survey Data", "Description": "``` \nSurvey data on smoking habits from the United Kingdom. The data set can be used for analyzing the demographic characteristics of smokers and types of tobacco consumed. A data frame with 1691 observations on the following 12 variables.\n```\n| Column | Description |\n| --- | --- |\n| gender | Gender with levels Female and Male. |\n| age | Age. |\n| marital_status | Marital status with levels Divorced, Married, Separated, Single and Widowed. |\n| highest_qualification | Highest education level with levels A Levels, Degree, GCSE/CSE, GCSE/O Level, Higher/Sub Degree, No Qualification, ONC/BTEC and Other/Sub Degree |\n| nationality | Nationality with levels British, English, Irish, Scottish, Welsh, Other, Refused and Unknown. |\n| ethnicity | Ethnicity with levels Asian, Black, Chinese, Mixed, White and Refused Unknown. |\n| gross_income | Gross income with levels Under 2,600, 2,600 to 5,200, 5,200 to 10,400, 10,400 to 15,600, 15,600 to 20,800, 20,800 to 28,600, 28,600 to 36,400, Above 36,400, Refused and Unknown. |\n| region | Region with levels London, Midlands & East Anglia, Scotland, South East, South West, The North and Wales |\n| smoke | Smoking status with levels No and Yes |\n| amt_weekends | Number of cigarettes smoked per day on weekends. |\n| amt_weekdays | Number of cigarettes smoked per day on weekdays. |\n| type | Type of cigarettes smoked with levels Packets, Hand-Rolled, Both/Mainly Packets and Both/Mainly Hand-Rolled\n |\n\n# Source\nNational STEM Centre, Large Datasets from stats4schools, https://www.stem.org.uk/resources/elibrary/resource/28452/large-datasets-stats4schools.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3248690, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5651804.0, "CurrentDatasourceVersionId": 5727175.0, "ForumId": 3314043, "Type": 2, "CreationDate": "05/10/2023 05:41:12", "LastActivityDate": "05/10/2023", "TotalViews": 14838, "TotalDownloads": 2967, "TotalVotes": 58, "TotalKernels": 10}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/smoking-dataset-from-uk/smoking.csv")
df.head()
df.drop(columns="Unnamed: 0", inplace=True)
df.info()
df.describe()
sns.set_theme()
box = sns.boxplot(x=df["amt_weekdays"])
sns.scatterplot(data=df, x="age", y="amt_weekdays")
sns.histplot(df, x="age", bins=81)
plt.title("Smokers by Age")
sns.histplot(df, x="amt_weekdays", bins=25)
df2 = (
df[df["smoke"] == "Yes"]
.groupby("highest_qualification")
.size()
.reset_index(name="total_smokers")
)
df2.head()
sns.barplot(df2, x="highest_qualification", y="total_smokers")
plt.xticks(rotation=45)
plt.title("Smokers by Highest Qualification")
df3 = df[
(df["smoke"] == "Yes")
& (df["gross_income"] != "Unknown")
& (df["gross_income"] != "Refused")
].reset_index()
income_levels = [
"Under 2,600",
"2,600 to 5,200",
"5,200 to 10,400",
"10,400 to 15,600",
"15,600 to 20,800",
"20,800 to 28,600",
"28,600 to 36,400",
"Above 36,400",
]
df3["gross_income"] = pd.Categorical(df3["gross_income"], categories=income_levels)
# df3 = df3.sort_values(by='gross_income')
# df3 = df3.groupby('gross_income').size().reset_index(name='total_smokers')
df3.head(10)
sns.barplot(df3, x="gross_income", y="total_smokers")
plt.xticks(rotation=45)
plt.title("Smokers by Gross Income")
| false | 1 | 752 | 0 | 1,340 | 752 |
||
129989746
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objs as go
import plotly
plotly.offline.init_notebook_mode(connected=True)
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from plotly.subplots import make_subplots
from plotly import graph_objects as go
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from xgboost import XGBClassifier, XGBRegressor
import shap
# #Extract zip files
import zipfile
training = zipfile.ZipFile("../input/billion-word-imputation/train_v2.txt.zip")
training.extractall()
test = zipfile.ZipFile("../input/billion-word-imputation/test_v2.txt.zip")
test.extractall()
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt", error_bad_lines=False
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
import os
import glob
filepath = "/kaggle/input/billion-word-imputation/"
for f in glob.glob(filepath + "*.txt"):
os.system("cat " + f + " >> test_v2.txt")
import csv
# Code by Shirsh Mall https://www.kaggle.com/code/shirshmall/search-for-higgs-boson-decay-modes
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt",
sep="\t",
error_bad_lines=False,
quoting=csv.QUOTE_NONE,
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
Data_Test = pd.read_csv(
"../input/billion-word-imputation/test_v2.txt",
sep="\t",
error_bad_lines=False,
quoting=csv.QUOTE_NONE,
)
# Data_Test_itemid = Data_Test[['itemid']]
Data.head()
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt", error_bad_lines=False
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
imp = open("../input/billion-word-imputation/train_v2.txt", "r").read()
print(imp[:53])
text = imp
# #Since train_v2 is huge and tried to allocated more memory. To avoid the Cloud I opened test_v2 with
# [:53]
# Load Dataset
filename = "test_v2.txt"
text = open(filename, encoding="utf-8").read()
text = text.lower()
print("corpus length:", len(text))
# Find all the unique characters
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
vocab_size = len(chars)
print("List of unique characters : \n", chars)
print("Number of unique characters : \n", vocab_size)
print("Character to integer mapping : \n", char_indices)
print(filename[:53])
# #All work for that? How can someone impute that above? I barely could open it!
# Preprocessing Dataset
# Original below was 64, though it rendered: dic instead of dickens
max_seq_len = 68 # cut text in semi-redundant sequences of max_seq_len characters
step = 3
sentences = [] # list_X
next_chars = [] # list_Y
for i in range(0, len(text) - max_seq_len, step):
sentences.append(text[i : i + max_seq_len])
next_chars.append(text[i + max_seq_len])
print("nb sequences:", len(sentences))
num_sequences = len(sentences)
print("Number of sequences: ", num_sequences)
print(sentences[0])
# #Finally, one line: "He added that people should not mess with mother
# #Vectorization
print("Vectorization...")
train_X = np.zeros((len(sentences), max_seq_len, len(chars)), dtype=np.bool)
train_Y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# train_X[i, t, char_indices[char]] = 1
# train_Y[i, char_indices[next_chars[i]]] = 1
print(train_X.shape)
print(train_Y.shape)
input_shape = (max_seq_len, vocab_size)
print(input_shape)
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Dropout,
LSTM,
Input,
Activation,
concatenate,
Bidirectional,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
# Build Model
model = Sequential()
model.add(Bidirectional(LSTM(64, return_sequences=True), input_shape=input_shape))
model.add(Bidirectional(LSTM(64)))
model.add(Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.summary()
# Train Model
num_epochs = 3 # Original is 30
batch_size = 128
# model_path = "BiLSTM-DonaldTrumpRallies .h5"
# checkpoint = ModelCheckpoint(model_path, monitor='loss', save_best_only=True, verbose=1, mode='min')
# callbacks_list = [checkpoint]
model.fit(
train_X, train_Y, epochs=num_epochs, batch_size=batch_size, verbose=1
) # , callbacks=callbacks_list)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/989/129989746.ipynb
| null | null |
[{"Id": 129989746, "ScriptId": 38616724, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3012786, "CreationDate": "05/18/2023 01:16:18", "VersionNumber": 1.0, "Title": "Billion Word Imputation Zip file", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 193.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 10}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objs as go
import plotly
plotly.offline.init_notebook_mode(connected=True)
# Ignore warnings
import warnings
warnings.filterwarnings("ignore")
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from plotly.subplots import make_subplots
from plotly import graph_objects as go
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import classification_report
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from xgboost import XGBClassifier, XGBRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
# from sklearn.neighbors import KNeighborsClassifier
# from sklearn.naive_bayes import GaussianNB
from sklearn.experimental import enable_iterative_imputer
from sklearn.impute import IterativeImputer
from xgboost import XGBClassifier, XGBRegressor
import shap
# #Extract zip files
import zipfile
training = zipfile.ZipFile("../input/billion-word-imputation/train_v2.txt.zip")
training.extractall()
test = zipfile.ZipFile("../input/billion-word-imputation/test_v2.txt.zip")
test.extractall()
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt", error_bad_lines=False
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
import os
import glob
filepath = "/kaggle/input/billion-word-imputation/"
for f in glob.glob(filepath + "*.txt"):
os.system("cat " + f + " >> test_v2.txt")
import csv
# Code by Shirsh Mall https://www.kaggle.com/code/shirshmall/search-for-higgs-boson-decay-modes
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt",
sep="\t",
error_bad_lines=False,
quoting=csv.QUOTE_NONE,
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
Data_Test = pd.read_csv(
"../input/billion-word-imputation/test_v2.txt",
sep="\t",
error_bad_lines=False,
quoting=csv.QUOTE_NONE,
)
# Data_Test_itemid = Data_Test[['itemid']]
Data.head()
Data = pd.read_csv(
"../input/billion-word-imputation/train_v2.txt", error_bad_lines=False
)
# Data_itemid = Data[['itemid']]#Original Data_Event_ID = Data[['EventId']]
# Data_Label = Data[['Label']]
imp = open("../input/billion-word-imputation/train_v2.txt", "r").read()
print(imp[:53])
text = imp
# #Since train_v2 is huge and tried to allocated more memory. To avoid the Cloud I opened test_v2 with
# [:53]
# Load Dataset
filename = "test_v2.txt"
text = open(filename, encoding="utf-8").read()
text = text.lower()
print("corpus length:", len(text))
# Find all the unique characters
chars = sorted(list(set(text)))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
vocab_size = len(chars)
print("List of unique characters : \n", chars)
print("Number of unique characters : \n", vocab_size)
print("Character to integer mapping : \n", char_indices)
print(filename[:53])
# #All work for that? How can someone impute that above? I barely could open it!
# Preprocessing Dataset
# Original below was 64, though it rendered: dic instead of dickens
max_seq_len = 68 # cut text in semi-redundant sequences of max_seq_len characters
step = 3
sentences = [] # list_X
next_chars = [] # list_Y
for i in range(0, len(text) - max_seq_len, step):
sentences.append(text[i : i + max_seq_len])
next_chars.append(text[i + max_seq_len])
print("nb sequences:", len(sentences))
num_sequences = len(sentences)
print("Number of sequences: ", num_sequences)
print(sentences[0])
# #Finally, one line: "He added that people should not mess with mother
# #Vectorization
print("Vectorization...")
train_X = np.zeros((len(sentences), max_seq_len, len(chars)), dtype=np.bool)
train_Y = np.zeros((len(sentences), len(chars)), dtype=np.bool)
# for i, sentence in enumerate(sentences):
# for t, char in enumerate(sentence):
# train_X[i, t, char_indices[char]] = 1
# train_Y[i, char_indices[next_chars[i]]] = 1
print(train_X.shape)
print(train_Y.shape)
input_shape = (max_seq_len, vocab_size)
print(input_shape)
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Dropout,
LSTM,
Input,
Activation,
concatenate,
Bidirectional,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
# Build Model
model = Sequential()
model.add(Bidirectional(LSTM(64, return_sequences=True), input_shape=input_shape))
model.add(Bidirectional(LSTM(64)))
model.add(Dense(len(chars), activation="softmax"))
model.compile(loss="categorical_crossentropy", optimizer="rmsprop")
model.summary()
# Train Model
num_epochs = 3 # Original is 30
batch_size = 128
# model_path = "BiLSTM-DonaldTrumpRallies .h5"
# checkpoint = ModelCheckpoint(model_path, monitor='loss', save_best_only=True, verbose=1, mode='min')
# callbacks_list = [checkpoint]
model.fit(
train_X, train_Y, epochs=num_epochs, batch_size=batch_size, verbose=1
) # , callbacks=callbacks_list)
| false | 0 | 1,845 | 10 | 1,845 | 1,845 |
||
129989976
|
<jupyter_start><jupyter_text>Uber Fares Dataset

### Description:
The project is about on world's largest taxi company Uber inc. In this project, we're looking to predict the fare for their future transactional cases. Uber delivers service to lakhs of customers daily. Now it becomes really important to manage their data properly to come up with new business ideas to get best results. Eventually, it becomes really important to estimate the fare prices accurately.
### The datset contains the following fields:
* key - a unique identifier for each trip
* fare_amount - the cost of each trip in usd
* pickup_datetime - date and time when the meter was engaged
* passenger_count - the number of passengers in the vehicle (driver entered value)
* pickup_longitude - the longitude where the meter was engaged
* pickup_latitude - the latitude where the meter was engaged
* dropoff_longitude - the longitude where the meter was disengaged
* dropoff_latitude - the latitude where the meter was disengaged
### Acknowledgement:
The dataset is referred from Kaggle.
### Objective:
- Understand the Dataset & cleanup (if required).
- Build Regression models to predict the fare price of uber ride.
- Also evaluate the models & compare thier respective scores like R2, RMSE, etc.
Kaggle dataset identifier: uber-fares-dataset
<jupyter_script># # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
# # Data Importing
data = pd.read_csv("../input/uber-fares-dataset/uber.csv")
data
# # Data Describe
data.head(5)
data.info()
data.describe()
# # Data Preprocessing & Cleaning
df = data.copy()
df
df = df.drop(["Unnamed: 0"], axis=1)
df.columns
df.isnull().sum()
df.dropna(subset=["dropoff_longitude"], inplace=True)
df.dropna(subset=["dropoff_longitude"], inplace=True)
df.isnull().sum()
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# now we don't have null values
# # Encoding the String Dataset
df.describe(include=object)
df.drop(columns=["key", "pickup_datetime"], inplace=True)
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
from sklearn.model_selection import train_test_split
x = df.drop("fare_amount", axis=1).values
y = df["fare_amount"].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.8, random_state=42
)
# # modeling
# # Random Forest Regressor
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf = RandomForestRegressor(max_depth=7, max_features=3, n_estimators=100)
rf.fit(x_train, y_train)
rf.score(x_train, y_train)
rf.score(x_test, y_test)
# # Model Evaluation
y_pred = rf.predict(x_test)
y_pred
df3 = pd.DataFrame({"Y_test": y_test, "Y_pred": y_pred})
df3.head(20)
plt.figure(figsize=(20, 6))
plt.plot(df3[:500])
plt.legend(["Actual", "Predicted"])
from sklearn.metrics import r2_score
rf_score = r2_score(y_test, y_pred)
rf_score
from sklearn.metrics import (
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
)
mse = mean_squared_error(y_test, y_pred)
print(mse)
mae = mean_absolute_error(y_test, y_pred)
print(mae)
mape = mean_absolute_percentage_error(y_test, y_pred)
print(mape)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/989/129989976.ipynb
|
uber-fares-dataset
|
yasserh
|
[{"Id": 129989976, "ScriptId": 38617671, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10938564, "CreationDate": "05/18/2023 01:20:18", "VersionNumber": 3.0, "Title": "Uber fare prediction", "EvaluationDate": "05/18/2023", "IsChange": false, "TotalLines": 104.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 104.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 186438421, "KernelVersionId": 129989976, "SourceDatasetVersionId": 2994100}]
|
[{"Id": 2994100, "DatasetId": 1834623, "DatasourceVersionId": 3041889, "CreatorUserId": 8833583, "LicenseName": "CC0: Public Domain", "CreationDate": "01/01/2022 15:54:14", "VersionNumber": 1.0, "Title": "Uber Fares Dataset", "Slug": "uber-fares-dataset", "Subtitle": "Can you predict the fare for Uber Rides - Regression Problem", "Description": "\n\n### Description:\n\nThe project is about on world's largest taxi company Uber inc. In this project, we're looking to predict the fare for their future transactional cases. Uber delivers service to lakhs of customers daily. Now it becomes really important to manage their data properly to come up with new business ideas to get best results. Eventually, it becomes really important to estimate the fare prices accurately.\n\n### The datset contains the following fields:\n* key - a unique identifier for each trip\n* fare_amount - the cost of each trip in usd\n* pickup_datetime - date and time when the meter was engaged\n* passenger_count - the number of passengers in the vehicle (driver entered value)\n* pickup_longitude - the longitude where the meter was engaged\n* pickup_latitude - the latitude where the meter was engaged\n* dropoff_longitude - the longitude where the meter was disengaged\n* dropoff_latitude - the latitude where the meter was disengaged\n\n### Acknowledgement: \nThe dataset is referred from Kaggle.\n\n### Objective:\n- Understand the Dataset & cleanup (if required).\n- Build Regression models to predict the fare price of uber ride.\n- Also evaluate the models & compare thier respective scores like R2, RMSE, etc.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1834623, "CreatorUserId": 8833583, "OwnerUserId": 8833583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2994100.0, "CurrentDatasourceVersionId": 3041889.0, "ForumId": 1857414, "Type": 2, "CreationDate": "01/01/2022 15:54:14", "LastActivityDate": "01/01/2022", "TotalViews": 80016, "TotalDownloads": 14647, "TotalVotes": 97, "TotalKernels": 39}]
|
[{"Id": 8833583, "UserName": "yasserh", "DisplayName": "M Yasser H", "RegisterDate": "11/09/2021", "PerformanceTier": 3}]
|
# # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
# # Data Importing
data = pd.read_csv("../input/uber-fares-dataset/uber.csv")
data
# # Data Describe
data.head(5)
data.info()
data.describe()
# # Data Preprocessing & Cleaning
df = data.copy()
df
df = df.drop(["Unnamed: 0"], axis=1)
df.columns
df.isnull().sum()
df.dropna(subset=["dropoff_longitude"], inplace=True)
df.dropna(subset=["dropoff_longitude"], inplace=True)
df.isnull().sum()
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# now we don't have null values
# # Encoding the String Dataset
df.describe(include=object)
df.drop(columns=["key", "pickup_datetime"], inplace=True)
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
from sklearn.model_selection import train_test_split
x = df.drop("fare_amount", axis=1).values
y = df["fare_amount"].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.8, random_state=42
)
# # modeling
# # Random Forest Regressor
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor()
rf = RandomForestRegressor(max_depth=7, max_features=3, n_estimators=100)
rf.fit(x_train, y_train)
rf.score(x_train, y_train)
rf.score(x_test, y_test)
# # Model Evaluation
y_pred = rf.predict(x_test)
y_pred
df3 = pd.DataFrame({"Y_test": y_test, "Y_pred": y_pred})
df3.head(20)
plt.figure(figsize=(20, 6))
plt.plot(df3[:500])
plt.legend(["Actual", "Predicted"])
from sklearn.metrics import r2_score
rf_score = r2_score(y_test, y_pred)
rf_score
from sklearn.metrics import (
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
)
mse = mean_squared_error(y_test, y_pred)
print(mse)
mae = mean_absolute_error(y_test, y_pred)
print(mae)
mape = mean_absolute_percentage_error(y_test, y_pred)
print(mape)
| false | 1 | 701 | 2 | 1,059 | 701 |
||
129989058
|
<jupyter_start><jupyter_text>Economics Journal Subscription Data
# Description
Subscriptions to economics journals at US libraries, for the year 2000.
# Usage
data("Journals")
# Format
A data frame containing 180 observations on 10 variables.
### title
Journal title.
### publisher
factor with publisher name.
### society
factor. Is the journal published by a scholarly society?
### price
Library subscription price.
### pages
Number of pages.
### charpp
Characters per page.
### citations
Total number of citations.
### foundingyear
Year journal was founded.
### subs
Number of library subscriptions.
### field
factor with field description.
# Details
Data on 180 economic journals, collected in particular for analyzing journal pricing. See also https://econ.ucsb.edu/~tedb/Journals/jpricing.html for general information on this topic as well as a more up-to-date version of the data set. This version is taken from Stock and Watson (2007).
The data as obtained from the online complements for Stock and Watson (2007) contained two journals with title “World Development”. One of these (observation 80) seemed to be an error and was changed to “The World Economy”.
# Source
Online complements to Stock and Watson (2007).
# References
Bergstrom, T. (2001). Free Labor for Costly Journals? Journal of Economic Perspectives, 15, 183–198.
Stock, J.H. and Watson, M.W. (2007). Introduction to Econometrics, 2nd ed. Boston: Addison Wesley.
# Examples
## data and transformed variables
data("Journals")
journals <- Journals[, c("subs", "price")]
journals$citeprice <- Journals$price/Journals$citations
journals$age <- 2000 - Journals$foundingyear
journals$chars <- Journals$charpp*Journals$pages/10^6
## Stock and Watson (2007)
## Figure 8.9 (a) and (b)
plot(subs ~ citeprice, data = journals, pch = 19)
plot(log(subs) ~ log(citeprice), data = journals, pch = 19)
fm1 <- lm(log(subs) ~ log(citeprice), data = journals)
abline(fm1)
## Table 8.2, use HC1 for comparability with Stata
fm2 <- lm(subs ~ citeprice + age + chars, data = log(journals))
fm3 <- lm(subs ~ citeprice + I(citeprice^2) + I(citeprice^3) +
age + I(age * citeprice) + chars, data = log(journals))
fm4 <- lm(subs ~ citeprice + age + I(age * citeprice) + chars, data = log(journals))
coeftest(fm1, vcov = vcovHC(fm1, type = "HC1"))
coeftest(fm2, vcov = vcovHC(fm2, type = "HC1"))
coeftest(fm3, vcov = vcovHC(fm3, type = "HC1"))
coeftest(fm4, vcov = vcovHC(fm4, type = "HC1"))
waldtest(fm3, fm4, vcov = vcovHC(fm3, type = "HC1"))
## changes with respect to age
library("strucchange")
## Nyblom-Hansen test
scus <- gefp(subs ~ citeprice, data = log(journals), fit = lm, order.by = ~ age)
plot(scus, functional = meanL2BB)
## estimate breakpoint(s)
journals <- journals[order(journals$age),]
bp <- breakpoints(subs ~ citeprice, data = log(journals), h = 20)
plot(bp)
bp.age <- journals$age[bp$breakpoints]
## visualization
plot(subs ~ citeprice, data = log(journals), pch = 19, col = (age > log(bp.age)) + 1)
abline(coef(bp)[1,], col = 1)
abline(coef(bp)[2,], col = 2)
legend("bottomleft", legend = c("age > 18", "age < 18"), lty = 1, col = 2:1, bty = "n")
Kaggle dataset identifier: economics-journal-subscription-data
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.read_csv("/kaggle/input/economics-journal-subscription-data/Journals.csv")
df.head()
df.info()
df.isnull().sum()
df.drop(columns=["Unnamed: 0"], inplace=True)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 10))
sns.scatterplot(ax=axes[0, 0], x="pages", y="price", data=df, color="red")
sns.scatterplot(ax=axes[0, 1], x="foundingyear", y="price", data=df, color="green")
sns.barplot(ax=axes[1, 0], x="society", y="price", data=df, color="Fuchsia")
sns.scatterplot(ax=axes[1, 1], x="subs", y="price", data=df, color="orange")
axes[0, 0].title.set_text("Relationship between price and number of pages")
axes[0, 1].title.set_text("Corrlation between founding year and price")
axes[1, 0].title.set_text("Seeing a difference between accrediation and price")
axes[1, 1].title.set_text("Number of subscribers compared to price")
plt.figure(figsize=(15, 10))
plt.xticks(rotation=(70))
sns.countplot(x="field", data=df, palette="rocket")
plt.title("Most popular topics")
plt.figure(figsize=(15, 10))
sns.barplot(
data=df.sort_values(by="subs", ascending=False),
x="subs",
y="field",
orient="h",
errwidth=1,
palette="rocket",
)
plt.title("Number of total subscribers for each topic")
df.groupby("publisher").agg({"price": "median"}).sort_values(
by="price", ascending=False
).head(10).plot(
kind="barh", figsize=(10, 5), title="Cost per magazine for each publisher"
)
df["society"] = df["society"].replace({"no": 0, "yes": 1})
df["society"].dtype
corr_data = df.loc[
:, ["price", "pages", "charpp", "citations", "subs", "society"]
].corr()
plt.figure(figsize=(15, 10))
sns.heatmap(data=corr_data, annot=True)
plt.title("Correlation")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/989/129989058.ipynb
|
economics-journal-subscription-data
|
utkarshx27
|
[{"Id": 129989058, "ScriptId": 38669154, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15029794, "CreationDate": "05/18/2023 01:05:20", "VersionNumber": 1.0, "Title": "Exploratory analysis of Journals", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 69.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 186436875, "KernelVersionId": 129989058, "SourceDatasetVersionId": 5625635}]
|
[{"Id": 5625635, "DatasetId": 3234443, "DatasourceVersionId": 5700853, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/07/2023 12:34:14", "VersionNumber": 1.0, "Title": "Economics Journal Subscription Data", "Slug": "economics-journal-subscription-data", "Subtitle": "Economics Journal Subscription Dataset", "Description": "# Description\nSubscriptions to economics journals at US libraries, for the year 2000.\n\n# Usage\ndata(\"Journals\")\n\n# Format\nA data frame containing 180 observations on 10 variables.\n\n### title\nJournal title.\n\n### publisher\nfactor with publisher name.\n\n### society\nfactor. Is the journal published by a scholarly society?\n\n### price\nLibrary subscription price.\n\n### pages\nNumber of pages.\n\n### charpp\nCharacters per page.\n\n### citations\nTotal number of citations.\n\n### foundingyear\nYear journal was founded.\n\n### subs\nNumber of library subscriptions.\n\n### field\nfactor with field description.\n\n# Details\nData on 180 economic journals, collected in particular for analyzing journal pricing. See also https://econ.ucsb.edu/~tedb/Journals/jpricing.html for general information on this topic as well as a more up-to-date version of the data set. This version is taken from Stock and Watson (2007).\n\nThe data as obtained from the online complements for Stock and Watson (2007) contained two journals with title \u201cWorld Development\u201d. One of these (observation 80) seemed to be an error and was changed to \u201cThe World Economy\u201d.\n\n# Source\nOnline complements to Stock and Watson (2007).\n\n# References\nBergstrom, T. (2001). Free Labor for Costly Journals? Journal of Economic Perspectives, 15, 183\u2013198.\n\nStock, J.H. and Watson, M.W. (2007). Introduction to Econometrics, 2nd ed. Boston: Addison Wesley.\n\n\n# Examples\n\n## data and transformed variables\ndata(\"Journals\")\njournals <- Journals[, c(\"subs\", \"price\")]\njournals$citeprice <- Journals$price/Journals$citations\njournals$age <- 2000 - Journals$foundingyear\njournals$chars <- Journals$charpp*Journals$pages/10^6\n\n## Stock and Watson (2007)\n## Figure 8.9 (a) and (b)\nplot(subs ~ citeprice, data = journals, pch = 19)\nplot(log(subs) ~ log(citeprice), data = journals, pch = 19)\nfm1 <- lm(log(subs) ~ log(citeprice), data = journals)\nabline(fm1)\n\n## Table 8.2, use HC1 for comparability with Stata \nfm2 <- lm(subs ~ citeprice + age + chars, data = log(journals))\nfm3 <- lm(subs ~ citeprice + I(citeprice^2) + I(citeprice^3) +\n age + I(age * citeprice) + chars, data = log(journals))\nfm4 <- lm(subs ~ citeprice + age + I(age * citeprice) + chars, data = log(journals))\ncoeftest(fm1, vcov = vcovHC(fm1, type = \"HC1\"))\ncoeftest(fm2, vcov = vcovHC(fm2, type = \"HC1\"))\ncoeftest(fm3, vcov = vcovHC(fm3, type = \"HC1\"))\ncoeftest(fm4, vcov = vcovHC(fm4, type = \"HC1\"))\nwaldtest(fm3, fm4, vcov = vcovHC(fm3, type = \"HC1\"))\n\n## changes with respect to age\nlibrary(\"strucchange\")\n## Nyblom-Hansen test\nscus <- gefp(subs ~ citeprice, data = log(journals), fit = lm, order.by = ~ age)\nplot(scus, functional = meanL2BB)\n## estimate breakpoint(s)\njournals <- journals[order(journals$age),]\nbp <- breakpoints(subs ~ citeprice, data = log(journals), h = 20)\nplot(bp)\nbp.age <- journals$age[bp$breakpoints]\n## visualization\nplot(subs ~ citeprice, data = log(journals), pch = 19, col = (age > log(bp.age)) + 1)\nabline(coef(bp)[1,], col = 1)\nabline(coef(bp)[2,], col = 2)\nlegend(\"bottomleft\", legend = c(\"age > 18\", \"age < 18\"), lty = 1, col = 2:1, bty = \"n\")", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3234443, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5625635.0, "CurrentDatasourceVersionId": 5700853.0, "ForumId": 3299601, "Type": 2, "CreationDate": "05/07/2023 12:34:14", "LastActivityDate": "05/07/2023", "TotalViews": 3087, "TotalDownloads": 442, "TotalVotes": 27, "TotalKernels": 3}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.read_csv("/kaggle/input/economics-journal-subscription-data/Journals.csv")
df.head()
df.info()
df.isnull().sum()
df.drop(columns=["Unnamed: 0"], inplace=True)
fig, axes = plt.subplots(nrows=2, ncols=2, figsize=(20, 10))
sns.scatterplot(ax=axes[0, 0], x="pages", y="price", data=df, color="red")
sns.scatterplot(ax=axes[0, 1], x="foundingyear", y="price", data=df, color="green")
sns.barplot(ax=axes[1, 0], x="society", y="price", data=df, color="Fuchsia")
sns.scatterplot(ax=axes[1, 1], x="subs", y="price", data=df, color="orange")
axes[0, 0].title.set_text("Relationship between price and number of pages")
axes[0, 1].title.set_text("Corrlation between founding year and price")
axes[1, 0].title.set_text("Seeing a difference between accrediation and price")
axes[1, 1].title.set_text("Number of subscribers compared to price")
plt.figure(figsize=(15, 10))
plt.xticks(rotation=(70))
sns.countplot(x="field", data=df, palette="rocket")
plt.title("Most popular topics")
plt.figure(figsize=(15, 10))
sns.barplot(
data=df.sort_values(by="subs", ascending=False),
x="subs",
y="field",
orient="h",
errwidth=1,
palette="rocket",
)
plt.title("Number of total subscribers for each topic")
df.groupby("publisher").agg({"price": "median"}).sort_values(
by="price", ascending=False
).head(10).plot(
kind="barh", figsize=(10, 5), title="Cost per magazine for each publisher"
)
df["society"] = df["society"].replace({"no": 0, "yes": 1})
df["society"].dtype
corr_data = df.loc[
:, ["price", "pages", "charpp", "citations", "subs", "society"]
].corr()
plt.figure(figsize=(15, 10))
sns.heatmap(data=corr_data, annot=True)
plt.title("Correlation")
| false | 1 | 808 | 5 | 1,953 | 808 |
||
129989354
|
<jupyter_start><jupyter_text>Nobel Laureates, 1901-Present
# Context
Between 1901 and 2016, the Nobel Prizes and the Prize in Economic Sciences were awarded 579 times to 911 people and organizations. The Nobel Prize is an international award administered by the Nobel Foundation in Stockholm, Sweden, and based on the fortune of Alfred Nobel, Swedish inventor and entrepreneur. In 1968, Sveriges Riksbank established The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel, founder of the Nobel Prize. Each Prize consists of a medal, a personal diploma, and a cash award.
A person or organization awarded the Nobel Prize is called Nobel Laureate. The word "laureate" refers to being signified by the laurel wreath. In ancient Greece, laurel wreaths were awarded to victors as a sign of honor.
# Content
This dataset includes a record for every individual or organization that was awarded the Nobel Prize since 1901.
# Acknowledgements
The Nobel laureate data was acquired from the Nobel Prize API.
# Inspiration
Which country has won the most prizes in each category? What words are most frequently written in the prize motivation? Can you predict the age, gender, and nationality of next year's Nobel laureates?
Kaggle dataset identifier: nobel-laureates
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Introduction
# The nobel prizes are awarded annually to individuals and organizations who make outstanding contributions in the fields of chemistry, physics, literature, peace, and physiology or medicine. The first Prize was awarded in in 1901.
# In this project we will find out more about nobel prozes, like what are the winners characteristics? How many winners awarded in a year? Which country gets the prize most often? Is there any winners gotten it twice? and more. The data set is used from kaggle
# We will try to answer these questions during our analysis:
# 1. The most Nobel of Prizes
# 2. who gets the Nobel Prize?
# 3. USA dominance
# 4. USA dominance, visualized
# 5. What is the gender of a typical Nobel Prize winner?
# 6. The first woman to win the Nobel Prize
# 7. Repeat laureates
# 8. How old are you when you get the prize?
# 9. Age differences between prize categories
# 10. Oldest and youngest winners
# First we need to import all nessary libraries
import pandas as pd
import requests
import json
from bs4 import BeautifulSoup
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import html5lib
# for visualizations
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import plotly.express as px
# read the csv file and save it in dataframe
data = pd.read_csv("/kaggle/input/nobel-laureates/archive.csv")
data.head(3)
# # Exploratory Analysis
# check the number of data
data.shape
data.size
# check the data types
data.info()
# We can find out about some quick statistics about the data with
data.describe()
# Find if there is any duplicates
data.duplicated().sum()
# Check for non values
data.isnull().sum()
# # Let is find the most Nobel of Prizes
#
data["Category"].value_counts()
# From this we can say that the most prizes are awarded in Medicine field.
# # Who gets the Nobel Prize?
data["Laureate Type"].value_counts()
# This is shows that the Nobel Prize is awarded either to individuals or an organizations .
# # USA dominance
data["Birth Country"].value_counts().head(10)
# ***** The majority of Prize winners are born in the USA
# # What is the gender of a typical Nobel Prize winner?
Gender_count = data["Sex"].value_counts()
Gender_count
# Its not suprised that the most prizes awarded to male than female, as the first Nobel Prize was handed out in 1901, and at that time the prize was male-focused.
# Create a bar chart
plt.bar(Gender_count.index, Gender_count.values)
# add chart title and axis labels
plt.title("Gender Distribution of Nobel Prize Winners")
plt.xlabel("Gender")
plt.ylabel("Number of Winners")
# show the chart
plt.show()
# # The first woman to win the Nobel Prize
#
# Find all female winners
data["female_winner"] = data["Sex"] == "Female"
data[data["female_winner"]].head(5)
# Find the first woman won the price
data[data["female_winner"]].nsmallest(1, "Year")
# The fist female won the Nobel Prize in 1903, she was from Poland.
# # The number of women Nobel Prize winners from each country
# filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
print(len(df_women))
# create a countplot of women Nobel Prize winners by country
plt.figure(figsize=(12, 6))
sns.countplot(x="Birth Country", data=df_women)
plt.title("Women Nobel Prize Winners by Country")
plt.xlabel("Country")
plt.ylabel("Count")
# rotate x-axis labels for better readability
plt.xticks(rotation=90)
# show the plot
plt.show()
# filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
# group by birth country and count the number of women Nobel Prize winners
df_women_count = df_women.groupby("Birth Country").size().reset_index(name="count")
# create a choropleth map of women Nobel Prize winners by country
fig = px.choropleth(
df_women_count,
locations="Birth Country",
locationmode="country names",
color="count",
title="Women Nobel Prize Winners by Country",
)
# show the map
fig.show()
# Filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
# Retrieve the unique prize categories for women Nobel Prize winners
women_prize_categories = df_women["Category"].unique()
# Print the women winners with their prize categories
for category in women_prize_categories:
women_winners = df_women[df_women["Category"] == category]["Full Name"].tolist()
print(f"Prize Category: {category}")
print("Women Winners:")
for winner in women_winners:
print(winner)
print()
# Count the number of women winners in each prize category
category_counts = df_women["Category"].value_counts()
# Create a bar plot
plt.figure(figsize=(10, 6))
sns.barplot(x=category_counts.index, y=category_counts.values)
plt.title("Women Nobel Prize Winners by Category")
plt.xlabel("Prize Category")
plt.ylabel("Count")
plt.xticks(rotation=45)
plt.show()
# Count the number of women winners in each prize category
category_counts = df_women["Category"].value_counts()
# Create a pie chart
plt.figure(figsize=(8, 8))
plt.pie(category_counts.values, labels=category_counts.index, autopct="%1.1f%%")
plt.title("Women Nobel Prize Winners by Category")
plt.show()
# The majority of Women Nobel Prize winners got the prize in Peace
# # Repeat laureates
# Selecting the laureates that have received 2 or more prizes.
data.groupby("Full Name").filter(lambda num: len(num) >= 2)
# This is shows that there are 19 lucky winners have got the prize twice.
# # How old are they when they get the prize?
from datetime import datetime
# calculate the age of each winner at the time of the award
# handle invalid birth_date values
data.loc[data["Birth Date"] == "1898-00-00", "Birth Date"] = "1898-01-01"
# handle invalid birth_date values
data.loc[data["Birth Date"] == "1943-00-00", "Birth Date"] = "1943-01-01"
ages = []
for index, row in data.iterrows():
if not pd.isna(row["Birth Date"]):
birth_date = datetime.strptime(row["Birth Date"], "%Y-%m-%d")
award_date = datetime.strptime(str(row["Year"]), "%Y")
if not pd.isna(row["Death Date"]):
death_date = datetime.strptime(row["Death Date"], "%Y-%m-%d")
age = (death_date - birth_date).days / 365.25
else:
age = (award_date - birth_date).days / 365.25
ages.append(age)
# print out the average age of winners
print("Average age of Nobel Prize winners: {:.2f}".format(sum(ages) / len(ages)))
# Let is visualize the age distribution of Nobel Prize winners using seaborn, by creating a histogram using the sns.histplot() function.
# create a histogram of age distribution using seaborn
sns.histplot(data=ages, bins=20, kde=True)
# set the title and x-label for the plot
plt.title("Age Distribution of Nobel Prize Winners")
plt.xlabel("Age at the time of the award")
# show the plot
plt.show()
# # How long Nobel Prize winners lived?
# We can subtract their birth date from their death date (if it exists) or from the current date (if they are still alive) to get their lifespan.
# calculate the lifespan of each winner
lifespans = []
for index, row in data.iterrows():
birth_date_str = str(row["Birth Date"]).replace("nan", "1900-01-01")
birth_date = datetime.strptime(birth_date_str, "%Y-%m-%d")
if pd.notnull(row["Death Date"]):
death_date = datetime.strptime(str(row["Death Date"]), "%Y-%m-%d")
lifespan = (death_date - birth_date).days / 365.25
else:
lifespan = (datetime.now() - birth_date).days / 365.25
lifespans.append(lifespan)
# add the lifespan column to the dataframe
data["lifespan"] = lifespans
# print the lifespan statistics
print(data["lifespan"].describe())
# check the lifespan column added to the dataframe using df['lifespan'] = lifespans.
data
# create a histogram of lifespan distribution using matplotlib
plt.hist(lifespans, bins=20)
# set the title and x-label for the plot
plt.title("Lifespan Distribution of Nobel Prize Winners")
plt.xlabel("Lifespan (years)")
# show the plot
plt.show()
# This histogram shows the lifespan of Nobel Prize winners, majority of the winners between 72 to 82 years.
# # Age of Nobel winners over year
# Let is see the relationship between the age of Nobel Prize winners and the year they won the prize.
import matplotlib.pyplot as plt
import seaborn as sns
# Converting birth_date from String to datetime
data["Birth Date"] = pd.to_datetime(
data["Birth Date"], format="%Y-%m-%d", errors="coerce"
)
# Calculating the age of winners
data["Age"] = data["Year"] - (data["Birth Date"].dt.year)
# Plotting the age of Nobel Prize winners
plt.figure(figsize=(10, 6))
sns.set_style("darkgrid")
sns.lmplot(x="Year", y="Age", data=data, aspect=2, line_kws={"color": "red"})
plt.title("Age of Nobel Prize Winners over Time", fontsize=16)
plt.xlabel("Year", fontsize=14)
plt.ylabel("Age", fontsize=14)
# remove any unnecessary whitespace in the plot.
plt.tight_layout()
# show the result
plt.show()
# The age of Nobel Prize winners has undergone some changes over the years. In the early 19th century, winners were generally younger than they are now. However, in recent times, the trend has shifted, with winners now receiving the prize at an older age, typically in their mid-60s.
# # Age differences between prize categories
# convert birth_date column to datetime object
data["Birth Date"] = pd.to_datetime(data["Birth Date"])
# extract the year from the birth_date column and store it as a new column
data["Birth_year"] = data["Birth Date"].dt.year
# calculate the age of each winner at the time they received the prize
data["age_at_award"] = data["Year"] - data["Birth_year"]
# create a box plot of age differences by prize category
sns.boxplot(x="Category", y="age_at_award", data=data)
# The box plot shows the age differences across different prize categories of each Nobel Prize winners
# # Oldest and youngest winners
#
# The oldest winner of a Nobel Prize
display(data.nlargest(1, "Age"))
# The oldest winner of Nobel was in Economics at 90, when she recieved the Prize. That is why Leonid Hurwicz is the oldest winner among 969.
# The youngest winner of a Nobel Prize as of 2016
display(data.nsmallest(1, "Age"))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/989/129989354.ipynb
|
nobel-laureates
| null |
[{"Id": 129989354, "ScriptId": 38532602, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9144137, "CreationDate": "05/18/2023 01:10:19", "VersionNumber": 3.0, "Title": "Nobel Prize Analysis", "EvaluationDate": "05/18/2023", "IsChange": true, "TotalLines": 328.0, "LinesInsertedFromPrevious": 76.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186437423, "KernelVersionId": 129989354, "SourceDatasetVersionId": 1538}]
|
[{"Id": 1538, "DatasetId": 839, "DatasourceVersionId": 1538, "CreatorUserId": 797864, "LicenseName": "Unknown", "CreationDate": "02/16/2017 00:31:00", "VersionNumber": 1.0, "Title": "Nobel Laureates, 1901-Present", "Slug": "nobel-laureates", "Subtitle": "Which country has won the most prizes in each category?", "Description": "# Context \n\nBetween 1901 and 2016, the Nobel Prizes and the Prize in Economic Sciences were awarded 579 times to 911 people and organizations. The Nobel Prize is an international award administered by the Nobel Foundation in Stockholm, Sweden, and based on the fortune of Alfred Nobel, Swedish inventor and entrepreneur. In 1968, Sveriges Riksbank established The Sveriges Riksbank Prize in Economic Sciences in Memory of Alfred Nobel, founder of the Nobel Prize. Each Prize consists of a medal, a personal diploma, and a cash award.\n\nA person or organization awarded the Nobel Prize is called Nobel Laureate. The word \"laureate\" refers to being signified by the laurel wreath. In ancient Greece, laurel wreaths were awarded to victors as a sign of honor.\n\n\n# Content\n\nThis dataset includes a record for every individual or organization that was awarded the Nobel Prize since 1901.\n\n\n# Acknowledgements\n\nThe Nobel laureate data was acquired from the Nobel Prize API.\n\n\n# Inspiration\n\nWhich country has won the most prizes in each category? What words are most frequently written in the prize motivation? Can you predict the age, gender, and nationality of next year's Nobel laureates?", "VersionNotes": "Initial release", "TotalCompressedBytes": 289963.0, "TotalUncompressedBytes": 289963.0}]
|
[{"Id": 839, "CreatorUserId": 797864, "OwnerUserId": NaN, "OwnerOrganizationId": 461.0, "CurrentDatasetVersionId": 1538.0, "CurrentDatasourceVersionId": 1538.0, "ForumId": 2645, "Type": 2, "CreationDate": "02/16/2017 00:31:00", "LastActivityDate": "02/05/2018", "TotalViews": 48851, "TotalDownloads": 5647, "TotalVotes": 110, "TotalKernels": 59}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Introduction
# The nobel prizes are awarded annually to individuals and organizations who make outstanding contributions in the fields of chemistry, physics, literature, peace, and physiology or medicine. The first Prize was awarded in in 1901.
# In this project we will find out more about nobel prozes, like what are the winners characteristics? How many winners awarded in a year? Which country gets the prize most often? Is there any winners gotten it twice? and more. The data set is used from kaggle
# We will try to answer these questions during our analysis:
# 1. The most Nobel of Prizes
# 2. who gets the Nobel Prize?
# 3. USA dominance
# 4. USA dominance, visualized
# 5. What is the gender of a typical Nobel Prize winner?
# 6. The first woman to win the Nobel Prize
# 7. Repeat laureates
# 8. How old are you when you get the prize?
# 9. Age differences between prize categories
# 10. Oldest and youngest winners
# First we need to import all nessary libraries
import pandas as pd
import requests
import json
from bs4 import BeautifulSoup
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import html5lib
# for visualizations
import matplotlib.pyplot as plt
import seaborn as sns
import missingno as msno
import plotly.express as px
# read the csv file and save it in dataframe
data = pd.read_csv("/kaggle/input/nobel-laureates/archive.csv")
data.head(3)
# # Exploratory Analysis
# check the number of data
data.shape
data.size
# check the data types
data.info()
# We can find out about some quick statistics about the data with
data.describe()
# Find if there is any duplicates
data.duplicated().sum()
# Check for non values
data.isnull().sum()
# # Let is find the most Nobel of Prizes
#
data["Category"].value_counts()
# From this we can say that the most prizes are awarded in Medicine field.
# # Who gets the Nobel Prize?
data["Laureate Type"].value_counts()
# This is shows that the Nobel Prize is awarded either to individuals or an organizations .
# # USA dominance
data["Birth Country"].value_counts().head(10)
# ***** The majority of Prize winners are born in the USA
# # What is the gender of a typical Nobel Prize winner?
Gender_count = data["Sex"].value_counts()
Gender_count
# Its not suprised that the most prizes awarded to male than female, as the first Nobel Prize was handed out in 1901, and at that time the prize was male-focused.
# Create a bar chart
plt.bar(Gender_count.index, Gender_count.values)
# add chart title and axis labels
plt.title("Gender Distribution of Nobel Prize Winners")
plt.xlabel("Gender")
plt.ylabel("Number of Winners")
# show the chart
plt.show()
# # The first woman to win the Nobel Prize
#
# Find all female winners
data["female_winner"] = data["Sex"] == "Female"
data[data["female_winner"]].head(5)
# Find the first woman won the price
data[data["female_winner"]].nsmallest(1, "Year")
# The fist female won the Nobel Prize in 1903, she was from Poland.
# # The number of women Nobel Prize winners from each country
# filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
print(len(df_women))
# create a countplot of women Nobel Prize winners by country
plt.figure(figsize=(12, 6))
sns.countplot(x="Birth Country", data=df_women)
plt.title("Women Nobel Prize Winners by Country")
plt.xlabel("Country")
plt.ylabel("Count")
# rotate x-axis labels for better readability
plt.xticks(rotation=90)
# show the plot
plt.show()
# filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
# group by birth country and count the number of women Nobel Prize winners
df_women_count = df_women.groupby("Birth Country").size().reset_index(name="count")
# create a choropleth map of women Nobel Prize winners by country
fig = px.choropleth(
df_women_count,
locations="Birth Country",
locationmode="country names",
color="count",
title="Women Nobel Prize Winners by Country",
)
# show the map
fig.show()
# Filter the data to include only women Nobel Prize winners
df_women = data[data["Sex"] == "Female"]
# Retrieve the unique prize categories for women Nobel Prize winners
women_prize_categories = df_women["Category"].unique()
# Print the women winners with their prize categories
for category in women_prize_categories:
women_winners = df_women[df_women["Category"] == category]["Full Name"].tolist()
print(f"Prize Category: {category}")
print("Women Winners:")
for winner in women_winners:
print(winner)
print()
# Count the number of women winners in each prize category
category_counts = df_women["Category"].value_counts()
# Create a bar plot
plt.figure(figsize=(10, 6))
sns.barplot(x=category_counts.index, y=category_counts.values)
plt.title("Women Nobel Prize Winners by Category")
plt.xlabel("Prize Category")
plt.ylabel("Count")
plt.xticks(rotation=45)
plt.show()
# Count the number of women winners in each prize category
category_counts = df_women["Category"].value_counts()
# Create a pie chart
plt.figure(figsize=(8, 8))
plt.pie(category_counts.values, labels=category_counts.index, autopct="%1.1f%%")
plt.title("Women Nobel Prize Winners by Category")
plt.show()
# The majority of Women Nobel Prize winners got the prize in Peace
# # Repeat laureates
# Selecting the laureates that have received 2 or more prizes.
data.groupby("Full Name").filter(lambda num: len(num) >= 2)
# This is shows that there are 19 lucky winners have got the prize twice.
# # How old are they when they get the prize?
from datetime import datetime
# calculate the age of each winner at the time of the award
# handle invalid birth_date values
data.loc[data["Birth Date"] == "1898-00-00", "Birth Date"] = "1898-01-01"
# handle invalid birth_date values
data.loc[data["Birth Date"] == "1943-00-00", "Birth Date"] = "1943-01-01"
ages = []
for index, row in data.iterrows():
if not pd.isna(row["Birth Date"]):
birth_date = datetime.strptime(row["Birth Date"], "%Y-%m-%d")
award_date = datetime.strptime(str(row["Year"]), "%Y")
if not pd.isna(row["Death Date"]):
death_date = datetime.strptime(row["Death Date"], "%Y-%m-%d")
age = (death_date - birth_date).days / 365.25
else:
age = (award_date - birth_date).days / 365.25
ages.append(age)
# print out the average age of winners
print("Average age of Nobel Prize winners: {:.2f}".format(sum(ages) / len(ages)))
# Let is visualize the age distribution of Nobel Prize winners using seaborn, by creating a histogram using the sns.histplot() function.
# create a histogram of age distribution using seaborn
sns.histplot(data=ages, bins=20, kde=True)
# set the title and x-label for the plot
plt.title("Age Distribution of Nobel Prize Winners")
plt.xlabel("Age at the time of the award")
# show the plot
plt.show()
# # How long Nobel Prize winners lived?
# We can subtract their birth date from their death date (if it exists) or from the current date (if they are still alive) to get their lifespan.
# calculate the lifespan of each winner
lifespans = []
for index, row in data.iterrows():
birth_date_str = str(row["Birth Date"]).replace("nan", "1900-01-01")
birth_date = datetime.strptime(birth_date_str, "%Y-%m-%d")
if pd.notnull(row["Death Date"]):
death_date = datetime.strptime(str(row["Death Date"]), "%Y-%m-%d")
lifespan = (death_date - birth_date).days / 365.25
else:
lifespan = (datetime.now() - birth_date).days / 365.25
lifespans.append(lifespan)
# add the lifespan column to the dataframe
data["lifespan"] = lifespans
# print the lifespan statistics
print(data["lifespan"].describe())
# check the lifespan column added to the dataframe using df['lifespan'] = lifespans.
data
# create a histogram of lifespan distribution using matplotlib
plt.hist(lifespans, bins=20)
# set the title and x-label for the plot
plt.title("Lifespan Distribution of Nobel Prize Winners")
plt.xlabel("Lifespan (years)")
# show the plot
plt.show()
# This histogram shows the lifespan of Nobel Prize winners, majority of the winners between 72 to 82 years.
# # Age of Nobel winners over year
# Let is see the relationship between the age of Nobel Prize winners and the year they won the prize.
import matplotlib.pyplot as plt
import seaborn as sns
# Converting birth_date from String to datetime
data["Birth Date"] = pd.to_datetime(
data["Birth Date"], format="%Y-%m-%d", errors="coerce"
)
# Calculating the age of winners
data["Age"] = data["Year"] - (data["Birth Date"].dt.year)
# Plotting the age of Nobel Prize winners
plt.figure(figsize=(10, 6))
sns.set_style("darkgrid")
sns.lmplot(x="Year", y="Age", data=data, aspect=2, line_kws={"color": "red"})
plt.title("Age of Nobel Prize Winners over Time", fontsize=16)
plt.xlabel("Year", fontsize=14)
plt.ylabel("Age", fontsize=14)
# remove any unnecessary whitespace in the plot.
plt.tight_layout()
# show the result
plt.show()
# The age of Nobel Prize winners has undergone some changes over the years. In the early 19th century, winners were generally younger than they are now. However, in recent times, the trend has shifted, with winners now receiving the prize at an older age, typically in their mid-60s.
# # Age differences between prize categories
# convert birth_date column to datetime object
data["Birth Date"] = pd.to_datetime(data["Birth Date"])
# extract the year from the birth_date column and store it as a new column
data["Birth_year"] = data["Birth Date"].dt.year
# calculate the age of each winner at the time they received the prize
data["age_at_award"] = data["Year"] - data["Birth_year"]
# create a box plot of age differences by prize category
sns.boxplot(x="Category", y="age_at_award", data=data)
# The box plot shows the age differences across different prize categories of each Nobel Prize winners
# # Oldest and youngest winners
#
# The oldest winner of a Nobel Prize
display(data.nlargest(1, "Age"))
# The oldest winner of Nobel was in Economics at 90, when she recieved the Prize. That is why Leonid Hurwicz is the oldest winner among 969.
# The youngest winner of a Nobel Prize as of 2016
display(data.nsmallest(1, "Age"))
| false | 0 | 3,364 | 1 | 3,761 | 3,364 |
||
129447842
|
<jupyter_start><jupyter_text>Classify the bitter or sweet taste of compounds
# Context
Throughout human evolution, we have been drawn toward sweet-tasting foods and averted from bitter tastes - sweet is good or desirable, bitter is undesirable, ear wax or medicinal. Therefore, a better understanding of molecular features that determine the bitter-sweet taste of substances is crucial for identifying natural and synthetic compounds for various purposes.
# Sources
This dataset is adapted from https://github.com/cosylabiiit/bittersweet (https://www.nature.com/articles/s41598-019-43664-y). In chemoinformatics, molecules are often represented as compact [SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) strings. In this dataset, SMILES structures, along with their names and targets (bitter, sweet, tasteless, and non-bitter), were obtained from the original study. Subsequently, SMILES were converted into canonical SMILES using RDKit, and the features (molecular descriptors, both 2D and 3D) were calculated using Mordred (https://github.com/mordred-descriptor/mordred). Secondly, tasteless and non-bitter categories were merged into a single category of non-bitter-sweet. Finally, since many of the compounds were missing names, IUPAC names were fetched using PubChemPy (https://pubchempy.readthedocs.io/en/latest/) for all the compounds, and for still missing names, a generic compound + incrementor name was assigned.
# Inspiration
This is a classification dataset with the first three columns carrying names, SMILES, and canonical SMILES. Any of these columns can be used to refer to a molecule. The fourth column is the target (taste category). And all numeric features are from the 5th column until the end of the file. Many features have cells with string annotations due to errors produced by Mordred. Therefore, the following data science techniques can be learned while working on this dataset:
1. Data cleanup
2. Features selection (since the number of features is quite large in proportion to the data points)
3. Feature scaling/transformation/normalization
4. Dimensionality reduction
5. Binomial classification (bitter vs. sweet) - utilize non-bitter-sweet as a negative class.
6. Multinomial classification (bitter vs. sweet vs. non-bitter-sweet)
7. Since SMILES can be converted into molecular graphs, graph-based modeling should also be possible.
# Initial data preparation
A copy of the original dataset and the scripts and notebooks used to convert SMILES to canonical SMILES, generate features, fetch names, and export the final TSV file for Kaggle is loosely maintained at https://github.com/rohitfarmer/bittersweet.
Kaggle dataset identifier: classify-the-bitter-or-sweet-taste-of-compounds
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# # approach
# > 1. modeling with all data without SMILES
# > 2. modeling with SMILES
# # loading data
bitter_or_sweet_df = pd.read_csv(
"/kaggle/input/classify-the-bitter-or-sweet-taste-of-compounds/2022-09-21-bitter-sweet-pre-cleanup.tsv",
sep="\t",
)
print(bitter_or_sweet_df.shape)
bitter_or_sweet_df.head()
bitter_or_sweet_df.info()
bitter_or_sweet_df.isnull().sum().sum()
# # data wrangling
# # Object_df
object_df = bitter_or_sweet_df.select_dtypes(include="object")
print(object_df.shape)
object_df.head()
object_df["Name"].value_counts()[:5]
object_df.drop_duplicates().shape
# object_cols
object_cols = (
object_df.drop(["Name", "SMILES", "Canonical_SMILES", "Target"], axis=1)
).columns
object_cols
# # numeric_df
numeric_df = bitter_or_sweet_df.select_dtypes(include=["float", "int"])
print(numeric_df.shape)
numeric_df.head()
numeric_df["ABC"].value_counts()
numeric_df["nAcid"].value_counts()
numeric_df["nBase"].value_counts()
numeric_df["nAromAtom"].value_counts()
numeric_df.describe()
# numeric_cols
numeric_cols = numeric_df.columns
numeric_cols
# # target and features
# multi-classification
target = bitter_or_sweet_df["Target"]
print(target.shape)
print(target.value_counts())
features = bitter_or_sweet_df.drop(
["Name", "SMILES", "Canonical_SMILES", "Target"], axis=1
)
print(features.shape)
features.head()
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=5, shuffle=True, random_state=2305)
for tr_idx, va_idx in kf.split(features):
X_train, X_test = features.iloc[tr_idx], features.iloc[va_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[va_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# # data preprocessing
# standardization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train[numeric_cols])
X_train[numeric_cols] = scaler.transform(X_train[numeric_cols])
X_test[numeric_cols] = scaler.transform(X_test[numeric_cols])
X_train.describe()
# one-hot encoding
X_all = pd.concat([X_train, X_test])
X_all = pd.get_dummies(X_all, columns=object_cols)
X_train = X_all.iloc[: X_train.shape[0], :].reset_index(drop=True)
X_test = X_all.iloc[: X_test.shape[0], :].reset_index(drop=True)
X_train.info()
X_train.shape, X_test.shape
# 라이브러리를 임포트합니다.
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
# 로지스틱 회귀 모델을 만듭니다.
logit = LogisticRegression()
# 정확도를 사용하여 교차검증을 수행합니다.
cross_val_score(logit, X_train, y_train, scoring="accuracy")
# 마크로 평균 F1 점수를 사용하여 교차검증을 수행합니다.
cross_val_score(logit, X_train, y_train, scoring="f1_macro")
from xgboost import XGBClassifier
xgbc = XGBClassifier(
n_estimators=100, random_state=2305, eval_metric="mlogloss", use_label_encoder=False
)
xgbc.fit(X_train, y_train)
print("train_accuracy:", xgbc.score(X_train, y_train))
print("test_accuracy:", xgbc.score(X_test, y_test))
xgbc_pred = xgbc.predict(X_test)
print("matching:", np.sum(y_test == xgbc_pred))
print("non_matching:", np.sum(y_test != xgbc_pred))
classes = target.unique()
classes
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(xgbc, X_train, y_train, X_test, y_test, classes=target.unique())
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/447/129447842.ipynb
|
classify-the-bitter-or-sweet-taste-of-compounds
|
rohitfarmer
|
[{"Id": 129447842, "ScriptId": 38417264, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4500939, "CreationDate": "05/13/2023 22:57:16", "VersionNumber": 6.0, "Title": "classify bitter or sweet", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 148.0, "LinesInsertedFromPrevious": 28.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 120.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185505772, "KernelVersionId": 129447842, "SourceDatasetVersionId": 4234193}]
|
[{"Id": 4234193, "DatasetId": 2495548, "DatasourceVersionId": 4291457, "CreatorUserId": 2120955, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "09/21/2022 21:12:18", "VersionNumber": 1.0, "Title": "Classify the bitter or sweet taste of compounds", "Slug": "classify-the-bitter-or-sweet-taste-of-compounds", "Subtitle": "Dataset for 2,393 small molecules with 1,826 features", "Description": "# Context\nThroughout human evolution, we have been drawn toward sweet-tasting foods and averted from bitter tastes - sweet is good or desirable, bitter is undesirable, ear wax or medicinal. Therefore, a better understanding of molecular features that determine the bitter-sweet taste of substances is crucial for identifying natural and synthetic compounds for various purposes. \n\n# Sources\nThis dataset is adapted from https://github.com/cosylabiiit/bittersweet (https://www.nature.com/articles/s41598-019-43664-y). In chemoinformatics, molecules are often represented as compact [SMILES](https://en.wikipedia.org/wiki/Simplified_molecular-input_line-entry_system) strings. In this dataset, SMILES structures, along with their names and targets (bitter, sweet, tasteless, and non-bitter), were obtained from the original study. Subsequently, SMILES were converted into canonical SMILES using RDKit, and the features (molecular descriptors, both 2D and 3D) were calculated using Mordred (https://github.com/mordred-descriptor/mordred). Secondly, tasteless and non-bitter categories were merged into a single category of non-bitter-sweet. Finally, since many of the compounds were missing names, IUPAC names were fetched using PubChemPy (https://pubchempy.readthedocs.io/en/latest/) for all the compounds, and for still missing names, a generic compound + incrementor name was assigned.\n\n# Inspiration\nThis is a classification dataset with the first three columns carrying names, SMILES, and canonical SMILES. Any of these columns can be used to refer to a molecule. The fourth column is the target (taste category). And all numeric features are from the 5th column until the end of the file. Many features have cells with string annotations due to errors produced by Mordred. Therefore, the following data science techniques can be learned while working on this dataset:\n1. Data cleanup\n2. Features selection (since the number of features is quite large in proportion to the data points)\n3. Feature scaling/transformation/normalization\n4. Dimensionality reduction\n5. Binomial classification (bitter vs. sweet) - utilize non-bitter-sweet as a negative class.\n6. Multinomial classification (bitter vs. sweet vs. non-bitter-sweet) \n7. Since SMILES can be converted into molecular graphs, graph-based modeling should also be possible. \n\n# Initial data preparation\nA copy of the original dataset and the scripts and notebooks used to convert SMILES to canonical SMILES, generate features, fetch names, and export the final TSV file for Kaggle is loosely maintained at https://github.com/rohitfarmer/bittersweet.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2495548, "CreatorUserId": 2120955, "OwnerUserId": 2120955.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4234193.0, "CurrentDatasourceVersionId": 4291457.0, "ForumId": 2523691, "Type": 2, "CreationDate": "09/21/2022 21:12:18", "LastActivityDate": "09/21/2022", "TotalViews": 1962, "TotalDownloads": 153, "TotalVotes": 4, "TotalKernels": 4}]
|
[{"Id": 2120955, "UserName": "rohitfarmer", "DisplayName": "Rohit Farmer", "RegisterDate": "08/02/2018", "PerformanceTier": 1}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
# # approach
# > 1. modeling with all data without SMILES
# > 2. modeling with SMILES
# # loading data
bitter_or_sweet_df = pd.read_csv(
"/kaggle/input/classify-the-bitter-or-sweet-taste-of-compounds/2022-09-21-bitter-sweet-pre-cleanup.tsv",
sep="\t",
)
print(bitter_or_sweet_df.shape)
bitter_or_sweet_df.head()
bitter_or_sweet_df.info()
bitter_or_sweet_df.isnull().sum().sum()
# # data wrangling
# # Object_df
object_df = bitter_or_sweet_df.select_dtypes(include="object")
print(object_df.shape)
object_df.head()
object_df["Name"].value_counts()[:5]
object_df.drop_duplicates().shape
# object_cols
object_cols = (
object_df.drop(["Name", "SMILES", "Canonical_SMILES", "Target"], axis=1)
).columns
object_cols
# # numeric_df
numeric_df = bitter_or_sweet_df.select_dtypes(include=["float", "int"])
print(numeric_df.shape)
numeric_df.head()
numeric_df["ABC"].value_counts()
numeric_df["nAcid"].value_counts()
numeric_df["nBase"].value_counts()
numeric_df["nAromAtom"].value_counts()
numeric_df.describe()
# numeric_cols
numeric_cols = numeric_df.columns
numeric_cols
# # target and features
# multi-classification
target = bitter_or_sweet_df["Target"]
print(target.shape)
print(target.value_counts())
features = bitter_or_sweet_df.drop(
["Name", "SMILES", "Canonical_SMILES", "Target"], axis=1
)
print(features.shape)
features.head()
from sklearn.model_selection import train_test_split, KFold
kf = KFold(n_splits=5, shuffle=True, random_state=2305)
for tr_idx, va_idx in kf.split(features):
X_train, X_test = features.iloc[tr_idx], features.iloc[va_idx]
y_train, y_test = target.iloc[tr_idx], target.iloc[va_idx]
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# # data preprocessing
# standardization
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X_train[numeric_cols])
X_train[numeric_cols] = scaler.transform(X_train[numeric_cols])
X_test[numeric_cols] = scaler.transform(X_test[numeric_cols])
X_train.describe()
# one-hot encoding
X_all = pd.concat([X_train, X_test])
X_all = pd.get_dummies(X_all, columns=object_cols)
X_train = X_all.iloc[: X_train.shape[0], :].reset_index(drop=True)
X_test = X_all.iloc[: X_test.shape[0], :].reset_index(drop=True)
X_train.info()
X_train.shape, X_test.shape
# 라이브러리를 임포트합니다.
from sklearn.model_selection import cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_classification
# 로지스틱 회귀 모델을 만듭니다.
logit = LogisticRegression()
# 정확도를 사용하여 교차검증을 수행합니다.
cross_val_score(logit, X_train, y_train, scoring="accuracy")
# 마크로 평균 F1 점수를 사용하여 교차검증을 수행합니다.
cross_val_score(logit, X_train, y_train, scoring="f1_macro")
from xgboost import XGBClassifier
xgbc = XGBClassifier(
n_estimators=100, random_state=2305, eval_metric="mlogloss", use_label_encoder=False
)
xgbc.fit(X_train, y_train)
print("train_accuracy:", xgbc.score(X_train, y_train))
print("test_accuracy:", xgbc.score(X_test, y_test))
xgbc_pred = xgbc.predict(X_test)
print("matching:", np.sum(y_test == xgbc_pred))
print("non_matching:", np.sum(y_test != xgbc_pred))
classes = target.unique()
classes
from yellowbrick.classifier import confusion_matrix
plt.figure(figsize=(3, 3))
confusion_matrix(xgbc, X_train, y_train, X_test, y_test, classes=target.unique())
plt.show()
| false | 0 | 1,280 | 0 | 1,978 | 1,280 |
||
129447300
|
# ## 0\. Preparação do ambiente
# Neste exercício vamos utilizar a base de dados de ações da bolsa de valores dos EUA, a Dow Jones. Os dados estão disponíveis para *download* neste [link](https://archive.ics.uci.edu/ml/datasets/Dow+Jones+Index). Vamos utilizar o pacote `wget` para fazer o *download* dos dados.
# - Instalando o pacote `wget` na versão 3.2.
# - Fazendo o download dos dados no arquivo compactado `dados.zip`.
import wget
wget.download(
url="https://archive.ics.uci.edu/ml/machine-learning-databases/00312/dow_jones_index.zip",
out="./dados.zip",
)
# - Descompactando os `dados` na pasta dados com o pacote nativo `zipfile`.
import zipfile
with zipfile.ZipFile("./dados.zip", "r") as fp:
fp.extractall("./dados")
# Verifique a pasta dados criada, ela deve conter dois arquivos:
# - **dow_jones_index.data**: um arquivo com os dados;
# - **dow_jones_index.names**: um arquivo com a descrição completa dos dados.
# É possível observar que o arquivo de dados é um arquivo separado por virgulas, o famoso `csv`. Vamos renomear o arquivo de dados para que ele tenha a extensão `csv` com o pacote nativo `os`.
# - Renomeando o arquivo com o pacote nativo `os`.
import os
os.rename("./dados/dow_jones_index.data", "./dados/dow_jones_index.csv")
# Pronto! Abra o arquivo e o Google Colab irá apresentar uma visualização bem legal dos dados.
# ---
# ## 1\. Pandas
# Para processar os dados, vamos utilizar o pacote `pandas` na versão `1.1.5`. A documentação completa por ser encontrada neste [link](https://pandas.pydata.org/docs/)
# Vamos importar o pacote com o apelido (alias) `pd`.
import pandas as pd
# Estamos prontos para ler o arquivo.
df = pd.read_csv("./dados/dow_jones_index.csv")
# O pandas trabalha com o conceito de dataframe, uma estrutura de dados com muitos métodos e atributos que aceleram o processamento de dados. Alguns exemplos:
# - Visualizando as `n` primeiras linhas:
df.head(n=10)
# - Visualizando o nome das colunas:
df.columns.to_list()
# - Verificando o número de linhas e colunas.
linhas, colunas = df.shape
print(f"Número de linhas: {linhas}")
print(f"Número de colunas: {colunas}")
# Vamos selecionar os valores de abertura, fechamento, máximo e mínimo das ações do McDonalds, listado na Dow Jones como MCD:
# - Selecionando as linha do dataframe original `df` em que a coluna `stock` é igual a `MCD`.
df_mcd = df[df["stock"] == "MCD"]
# - Selecionando apenas as colunas de data e valores de ações.
df_mcd = df_mcd[["date", "open", "high", "low", "close"]]
# Excelente, o problema é que as colunas com os valores possuem o carater `$` e são do tipo texto (`object` no `pandas`).
df_mcd.head(n=10)
df_mcd.dtypes
# Vamos limpar as colunas com o método `apply`, que permite a aplicação de uma função anônima (`lambda`) qualquer. A função `lambda` remove o caracter **$** e faz a conversão do tipo de `str` para `float`.
for col in ["open", "high", "low", "close"]:
df_mcd[col] = df_mcd[col].apply(lambda value: float(value.split(sep="$")[-1]))
# Verifique novamente os dados e seus tipos.
df_mcd.head(n=10)
df_mcd.dtypes
# Excelente, agora podemos explorar os dados visualmente.
# **Agora é a sua vez!** Conduza o mesmo processo para extrair e tratar os dados da empresa Coca-Cola (`stock` column igual a `KO`).
# - Selecionando as linha do dataframe original `df` em que a coluna `stock` é igual a `KO`.
df_KO = df[df["stock"] == "KO"] # extração e tratamento dos dados da empresa Coca-Cola.
# Vamos selecionar os valores de abertura, fechamento, máximo e mínimo das ações da empresa Coca-Cola, listado na Dow Jones como KO:
# - Selecionando apenas as colunas de data e valores de ações.
df_KO = df_KO[["date", "open", "high", "low", "close"]]
# Excelente, o problema é que as colunas com os valores possuem o carater `$` e são do tipo texto (`object` no `pandas`).
df_KO.head(n=10) # Visualize os dados do dataframe
df_KO.dtypes # Verifique o tipo dos dados
# Vamos limpar as colunas com o método `apply`, que permite a aplicação de uma função anônima (`lambda`) qualquer. A função `lambda` remove o caracter **$** e faz a conversão do tipo de `str` para `float`.
for col in ["open", "high", "low", "close"]:
df_KO[col] = df_KO[col].apply(lambda value: float(value.split(sep="$")[-1]))
# Verifique novamente os dados e seus tipos.
df_KO.head(n=10) # Visualize novamente os dados do dataframe
df_KO.dtypes # Verifique novamente o tipo dos dados
# Excelente, agora podemos explorar os dados visualmente.
# ---
# ## 2\. Seaborn
# Para visualizar os dados, vamos utilizar o pacote `seaborn` na versão `0.11.1`. A documentação completa por ser encontrada neste [link](https://seaborn.pydata.org/)
# Vamos importar o pacote com o apelido (alias) `sns`.
import seaborn as sns
# Vamos visualizar os valores de abertura das ações ao longo do tempo.
plot = sns.lineplot(x="date", y="open", data=df_mcd)
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Vamos também visualizar os valores de fechamento das ações ao longo do tempo.
plot = sns.lineplot(x="date", y="close", data=df_mcd)
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Para facilitar a comparação, vamo visualizar os quatro valores no mesmo gráfico.
plot = sns.lineplot(x="date", y="value", hue="variable", data=pd.melt(df_mcd, ["date"]))
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Para finalizar, vamos salvar o gráfico numa figura.
plot.figure.savefig("./mcd.png")
# **Agora é a sua vez,** faça o gráfico acima para a empresa Coca-Cola e salve a imagem com o nome `ko.png`.
# Vamos visualizar os valores de abertura das ações ao longo do tempo.
plot_KO = sns.lineplot(x="date", y="open", data=df_KO)
_ = plot_KO.set_xticklabels(
labels=df_KO["date"], rotation=90
) # visualização dos dados da Coca-Cola.
# Vamos também visualizar os valores de fechamento das ações ao longo do tempo.
plot_KO = sns.lineplot(x="date", y="close", data=df_KO)
_ = plot_KO.set_xticklabels(labels=df_KO["date"], rotation=90)
# Para facilitar a comparação, vamo visualizar os quatro valores no mesmo gráfico.
plot_KO = sns.lineplot(
x="date", y="value", hue="variable", data=pd.melt(df_KO, ["date"])
)
_ = plot_KO.set_xticklabels(labels=df_KO["date"], rotation=90)
# Para finalizar, vamos salvar o gráfico numa figura.
plot_KO.figure.savefig("./ko.png")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/447/129447300.ipynb
| null | null |
[{"Id": 129447300, "ScriptId": 38489600, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13199124, "CreationDate": "05/13/2023 22:45:38", "VersionNumber": 1.0, "Title": "a\u00e7\u00f5es do Mc'donalds", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 183.0, "LinesInsertedFromPrevious": 183.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## 0\. Preparação do ambiente
# Neste exercício vamos utilizar a base de dados de ações da bolsa de valores dos EUA, a Dow Jones. Os dados estão disponíveis para *download* neste [link](https://archive.ics.uci.edu/ml/datasets/Dow+Jones+Index). Vamos utilizar o pacote `wget` para fazer o *download* dos dados.
# - Instalando o pacote `wget` na versão 3.2.
# - Fazendo o download dos dados no arquivo compactado `dados.zip`.
import wget
wget.download(
url="https://archive.ics.uci.edu/ml/machine-learning-databases/00312/dow_jones_index.zip",
out="./dados.zip",
)
# - Descompactando os `dados` na pasta dados com o pacote nativo `zipfile`.
import zipfile
with zipfile.ZipFile("./dados.zip", "r") as fp:
fp.extractall("./dados")
# Verifique a pasta dados criada, ela deve conter dois arquivos:
# - **dow_jones_index.data**: um arquivo com os dados;
# - **dow_jones_index.names**: um arquivo com a descrição completa dos dados.
# É possível observar que o arquivo de dados é um arquivo separado por virgulas, o famoso `csv`. Vamos renomear o arquivo de dados para que ele tenha a extensão `csv` com o pacote nativo `os`.
# - Renomeando o arquivo com o pacote nativo `os`.
import os
os.rename("./dados/dow_jones_index.data", "./dados/dow_jones_index.csv")
# Pronto! Abra o arquivo e o Google Colab irá apresentar uma visualização bem legal dos dados.
# ---
# ## 1\. Pandas
# Para processar os dados, vamos utilizar o pacote `pandas` na versão `1.1.5`. A documentação completa por ser encontrada neste [link](https://pandas.pydata.org/docs/)
# Vamos importar o pacote com o apelido (alias) `pd`.
import pandas as pd
# Estamos prontos para ler o arquivo.
df = pd.read_csv("./dados/dow_jones_index.csv")
# O pandas trabalha com o conceito de dataframe, uma estrutura de dados com muitos métodos e atributos que aceleram o processamento de dados. Alguns exemplos:
# - Visualizando as `n` primeiras linhas:
df.head(n=10)
# - Visualizando o nome das colunas:
df.columns.to_list()
# - Verificando o número de linhas e colunas.
linhas, colunas = df.shape
print(f"Número de linhas: {linhas}")
print(f"Número de colunas: {colunas}")
# Vamos selecionar os valores de abertura, fechamento, máximo e mínimo das ações do McDonalds, listado na Dow Jones como MCD:
# - Selecionando as linha do dataframe original `df` em que a coluna `stock` é igual a `MCD`.
df_mcd = df[df["stock"] == "MCD"]
# - Selecionando apenas as colunas de data e valores de ações.
df_mcd = df_mcd[["date", "open", "high", "low", "close"]]
# Excelente, o problema é que as colunas com os valores possuem o carater `$` e são do tipo texto (`object` no `pandas`).
df_mcd.head(n=10)
df_mcd.dtypes
# Vamos limpar as colunas com o método `apply`, que permite a aplicação de uma função anônima (`lambda`) qualquer. A função `lambda` remove o caracter **$** e faz a conversão do tipo de `str` para `float`.
for col in ["open", "high", "low", "close"]:
df_mcd[col] = df_mcd[col].apply(lambda value: float(value.split(sep="$")[-1]))
# Verifique novamente os dados e seus tipos.
df_mcd.head(n=10)
df_mcd.dtypes
# Excelente, agora podemos explorar os dados visualmente.
# **Agora é a sua vez!** Conduza o mesmo processo para extrair e tratar os dados da empresa Coca-Cola (`stock` column igual a `KO`).
# - Selecionando as linha do dataframe original `df` em que a coluna `stock` é igual a `KO`.
df_KO = df[df["stock"] == "KO"] # extração e tratamento dos dados da empresa Coca-Cola.
# Vamos selecionar os valores de abertura, fechamento, máximo e mínimo das ações da empresa Coca-Cola, listado na Dow Jones como KO:
# - Selecionando apenas as colunas de data e valores de ações.
df_KO = df_KO[["date", "open", "high", "low", "close"]]
# Excelente, o problema é que as colunas com os valores possuem o carater `$` e são do tipo texto (`object` no `pandas`).
df_KO.head(n=10) # Visualize os dados do dataframe
df_KO.dtypes # Verifique o tipo dos dados
# Vamos limpar as colunas com o método `apply`, que permite a aplicação de uma função anônima (`lambda`) qualquer. A função `lambda` remove o caracter **$** e faz a conversão do tipo de `str` para `float`.
for col in ["open", "high", "low", "close"]:
df_KO[col] = df_KO[col].apply(lambda value: float(value.split(sep="$")[-1]))
# Verifique novamente os dados e seus tipos.
df_KO.head(n=10) # Visualize novamente os dados do dataframe
df_KO.dtypes # Verifique novamente o tipo dos dados
# Excelente, agora podemos explorar os dados visualmente.
# ---
# ## 2\. Seaborn
# Para visualizar os dados, vamos utilizar o pacote `seaborn` na versão `0.11.1`. A documentação completa por ser encontrada neste [link](https://seaborn.pydata.org/)
# Vamos importar o pacote com o apelido (alias) `sns`.
import seaborn as sns
# Vamos visualizar os valores de abertura das ações ao longo do tempo.
plot = sns.lineplot(x="date", y="open", data=df_mcd)
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Vamos também visualizar os valores de fechamento das ações ao longo do tempo.
plot = sns.lineplot(x="date", y="close", data=df_mcd)
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Para facilitar a comparação, vamo visualizar os quatro valores no mesmo gráfico.
plot = sns.lineplot(x="date", y="value", hue="variable", data=pd.melt(df_mcd, ["date"]))
_ = plot.set_xticklabels(labels=df_mcd["date"], rotation=90)
# Para finalizar, vamos salvar o gráfico numa figura.
plot.figure.savefig("./mcd.png")
# **Agora é a sua vez,** faça o gráfico acima para a empresa Coca-Cola e salve a imagem com o nome `ko.png`.
# Vamos visualizar os valores de abertura das ações ao longo do tempo.
plot_KO = sns.lineplot(x="date", y="open", data=df_KO)
_ = plot_KO.set_xticklabels(
labels=df_KO["date"], rotation=90
) # visualização dos dados da Coca-Cola.
# Vamos também visualizar os valores de fechamento das ações ao longo do tempo.
plot_KO = sns.lineplot(x="date", y="close", data=df_KO)
_ = plot_KO.set_xticklabels(labels=df_KO["date"], rotation=90)
# Para facilitar a comparação, vamo visualizar os quatro valores no mesmo gráfico.
plot_KO = sns.lineplot(
x="date", y="value", hue="variable", data=pd.melt(df_KO, ["date"])
)
_ = plot_KO.set_xticklabels(labels=df_KO["date"], rotation=90)
# Para finalizar, vamos salvar o gráfico numa figura.
plot_KO.figure.savefig("./ko.png")
| false | 0 | 2,194 | 0 | 2,194 | 2,194 |
||
129447973
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_california_housing
california_housing = fetch_california_housing(as_frame=True)
# Check the dataset description.
print(california_housing.DESCR)
california_housing.frame.head()
california_housing.data.head()
california_housing.target.head()
california_housing.frame.info()
# A look at the features of the dataset.
california_housing.frame.hist(figsize=(12, 10), bins=30, edgecolor="black")
plt.subplots_adjust(hspace=0.7, wspace=0.4)
features_of_interest = ["AveRooms", "AveBedrms", "AveOccup", "Population"]
california_housing.frame[features_of_interest].describe()
import seaborn as sns
sns.scatterplot(
data=california_housing.frame,
x="Longitude",
y="Latitude",
size="MedHouseVal",
palette="viridis",
hue="MedHouseVal",
alpha=0.5,
)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 0.95), loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
# Let's do a random subsampling to have less data points to plot but that could still allow us to see these specificities.
rng = np.random.RandomState(0)
indices = rng.choice(
np.arange(california_housing.frame.shape[0]), size=500, replace=False
)
sns.scatterplot(
data=california_housing.frame.iloc[indices],
x="Longitude",
y="Latitude",
size="MedHouseVal",
hue="MedHouseVal",
palette="viridis",
alpha=0.5,
)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 1), loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
import pandas as pd
# Let's drop the longitude and latitude columns.
columns_drop = ["Longitude", "Latitude"]
subset = california_housing.frame.iloc[indices].drop(columns=columns_drop)
# Quantisize the target and keep the midpoint for each interval
subset["MedHouseVal"] = pd.qcut(subset["MedHouseVal"], 6, retbins=False)
subset["MedHouseVal"] = subset["MedHouseVal"].apply(lambda x: x.mid)
_ = sns.pairplot(data=subset, hue="MedHouseVal", palette="viridis")
# Create a linear predictive model and show the values of the coefficients obtained via cross-validation.
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
alphas = np.logspace(-3, 1, num=30)
model = make_pipeline(StandardScaler(), RidgeCV(alphas=alphas))
cv_results = cross_validate(
model,
california_housing.data,
california_housing.target,
return_estimator=True,
n_jobs=2,
)
score = cv_results["test_score"]
print(f"R2 score: {score.mean():.3f} -+ {score.std():.3f}")
coefs = pd.DataFrame(
[est[-1].coef_ for est in cv_results["estimator"]],
columns=california_housing.feature_names,
)
color = {"whiskers": "black", "medians": "black", "caps": "black"}
coefs.plot.box(vert=False, color=color)
plt.axvline(x=0, ymin=-1, ymax=1, color="black", linestyle="--")
_ = plt.title("Coefficients of Ridge models\n via cross validation")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/447/129447973.ipynb
| null | null |
[{"Id": 129447973, "ScriptId": 38488270, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11805806, "CreationDate": "05/13/2023 23:00:37", "VersionNumber": 1.0, "Title": "notebookea7e5da7a3", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 99.0, "LinesInsertedFromPrevious": 99.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_california_housing
california_housing = fetch_california_housing(as_frame=True)
# Check the dataset description.
print(california_housing.DESCR)
california_housing.frame.head()
california_housing.data.head()
california_housing.target.head()
california_housing.frame.info()
# A look at the features of the dataset.
california_housing.frame.hist(figsize=(12, 10), bins=30, edgecolor="black")
plt.subplots_adjust(hspace=0.7, wspace=0.4)
features_of_interest = ["AveRooms", "AveBedrms", "AveOccup", "Population"]
california_housing.frame[features_of_interest].describe()
import seaborn as sns
sns.scatterplot(
data=california_housing.frame,
x="Longitude",
y="Latitude",
size="MedHouseVal",
palette="viridis",
hue="MedHouseVal",
alpha=0.5,
)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 0.95), loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
# Let's do a random subsampling to have less data points to plot but that could still allow us to see these specificities.
rng = np.random.RandomState(0)
indices = rng.choice(
np.arange(california_housing.frame.shape[0]), size=500, replace=False
)
sns.scatterplot(
data=california_housing.frame.iloc[indices],
x="Longitude",
y="Latitude",
size="MedHouseVal",
hue="MedHouseVal",
palette="viridis",
alpha=0.5,
)
plt.legend(title="MedHouseVal", bbox_to_anchor=(1.05, 1), loc="upper left")
_ = plt.title("Median house value depending of\n their spatial location")
import pandas as pd
# Let's drop the longitude and latitude columns.
columns_drop = ["Longitude", "Latitude"]
subset = california_housing.frame.iloc[indices].drop(columns=columns_drop)
# Quantisize the target and keep the midpoint for each interval
subset["MedHouseVal"] = pd.qcut(subset["MedHouseVal"], 6, retbins=False)
subset["MedHouseVal"] = subset["MedHouseVal"].apply(lambda x: x.mid)
_ = sns.pairplot(data=subset, hue="MedHouseVal", palette="viridis")
# Create a linear predictive model and show the values of the coefficients obtained via cross-validation.
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import RidgeCV
from sklearn.model_selection import cross_validate
from sklearn.pipeline import make_pipeline
alphas = np.logspace(-3, 1, num=30)
model = make_pipeline(StandardScaler(), RidgeCV(alphas=alphas))
cv_results = cross_validate(
model,
california_housing.data,
california_housing.target,
return_estimator=True,
n_jobs=2,
)
score = cv_results["test_score"]
print(f"R2 score: {score.mean():.3f} -+ {score.std():.3f}")
coefs = pd.DataFrame(
[est[-1].coef_ for est in cv_results["estimator"]],
columns=california_housing.feature_names,
)
color = {"whiskers": "black", "medians": "black", "caps": "black"}
coefs.plot.box(vert=False, color=color)
plt.axvline(x=0, ymin=-1, ymax=1, color="black", linestyle="--")
_ = plt.title("Coefficients of Ridge models\n via cross validation")
| false | 0 | 1,127 | 0 | 1,127 | 1,127 |
||
129430897
|
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import (
auc,
f1_score,
accuracy_score,
roc_auc_score,
mean_absolute_error,
)
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings("ignore")
SEED = 69
train_df = pd.read_csv("/kaggle/input/spring-2023-property-prices/Train.csv")
train_df.head()
test_df = pd.read_csv("/kaggle/input/spring-2023-property-prices/Test.csv")
test_df.head()
X = train_df.copy()
Y = train_df["price"]
datetime = pd.DatetimeIndex(X["date"])
X["date"] = (datetime.year - 2011) * 12 + datetime.month
X = X.fillna(X.mean())
del X["price"]
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.25, random_state=SEED
)
X_train = X
Y_train = Y
X_test = test_df.fillna(test_df.mean())
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
labels = ["normalized_price"]
value_features = [
"build_tech",
"floor",
"area",
"metro_dist",
"kw1",
"kw2",
"kw3",
"kw4",
"kw5",
"kw6",
"kw7",
"kw8",
"kw9",
"kw10",
"kw11",
"kw12",
"kw13",
]
cat_features = ["street_id", "balcon", "rooms"]
pipe = Pipeline(
[
(
"preprocessor",
ColumnTransformer(
transformers=[
("value", "passthrough", value_features),
("cat", OneHotEncoder(), cat_features),
]
),
),
(
"rnd_forest",
GradientBoostingRegressor(
n_estimators=1000, learning_rate=1.0, loss="absolute_error"
),
),
]
)
# pipe = GradientBoostingRegressor(n_estimators=1000, learning_rate=1.)
pipe.fit(X_train, Y_train)
# y_pred = pipe.predict(
# X_test
# )
# mean_absolute_error(y_pred, Y_test)
sub = pd.read_csv("/kaggle/input/spring-2023-property-prices/SampleSubmission.csv")
sub.price = pipe.predict(X_test)
sub.to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/430/129430897.ipynb
| null | null |
[{"Id": 129430897, "ScriptId": 38484269, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14658782, "CreationDate": "05/13/2023 18:23:59", "VersionNumber": 1.0, "Title": "notebook07a8b5899a", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import json
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import (
auc,
f1_score,
accuracy_score,
roc_auc_score,
mean_absolute_error,
)
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from tqdm import tqdm_notebook
import warnings
warnings.filterwarnings("ignore")
SEED = 69
train_df = pd.read_csv("/kaggle/input/spring-2023-property-prices/Train.csv")
train_df.head()
test_df = pd.read_csv("/kaggle/input/spring-2023-property-prices/Test.csv")
test_df.head()
X = train_df.copy()
Y = train_df["price"]
datetime = pd.DatetimeIndex(X["date"])
X["date"] = (datetime.year - 2011) * 12 + datetime.month
X = X.fillna(X.mean())
del X["price"]
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.25, random_state=SEED
)
X_train = X
Y_train = Y
X_test = test_df.fillna(test_df.mean())
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.preprocessing import OneHotEncoder, StandardScaler
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
labels = ["normalized_price"]
value_features = [
"build_tech",
"floor",
"area",
"metro_dist",
"kw1",
"kw2",
"kw3",
"kw4",
"kw5",
"kw6",
"kw7",
"kw8",
"kw9",
"kw10",
"kw11",
"kw12",
"kw13",
]
cat_features = ["street_id", "balcon", "rooms"]
pipe = Pipeline(
[
(
"preprocessor",
ColumnTransformer(
transformers=[
("value", "passthrough", value_features),
("cat", OneHotEncoder(), cat_features),
]
),
),
(
"rnd_forest",
GradientBoostingRegressor(
n_estimators=1000, learning_rate=1.0, loss="absolute_error"
),
),
]
)
# pipe = GradientBoostingRegressor(n_estimators=1000, learning_rate=1.)
pipe.fit(X_train, Y_train)
# y_pred = pipe.predict(
# X_test
# )
# mean_absolute_error(y_pred, Y_test)
sub = pd.read_csv("/kaggle/input/spring-2023-property-prices/SampleSubmission.csv")
sub.price = pipe.predict(X_test)
sub.to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 811 | 0 | 811 | 811 |
||
129430620
|
<jupyter_start><jupyter_text>haarcascade_frontalface_default
Kaggle dataset identifier: haarcascade-frontalface-default
<jupyter_script>import cv2
import matplotlib.pyplot as plt
# Reading img to BGR format
image_bgr = cv2.imread("/kaggle/input/abc-image/crk.jpeg")
# converting it to RGB format
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
# # Face detection using Opencv
faceCascade = cv2.CascadeClassifier(
"/kaggle/input/haarcascade-frontalface-default/haarcascade_frontalface_default.xml"
)
faces = faceCascade.detectMultiScale(
image_rgb, scaleFactor=1.075, minNeighbors=5, minSize=(15, 15)
)
faces
# drawing plots on img
for x, y, w, h in faces:
cv2.rectangle(
image_rgb, pt1=(x, y), pt2=(x + w, y + h), color=(255, 0, 0), thickness=2
)
plt.imshow(image_rgb)
# # Face Detection using Dlib
import dlib
import dlib
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/430/129430620.ipynb
|
haarcascade-frontalface-default
|
bhupendradewangan
|
[{"Id": 129430620, "ScriptId": 38322344, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9126816, "CreationDate": "05/13/2023 18:20:33", "VersionNumber": 1.0, "Title": "FACE_DETECTION_OPENCV", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 51.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185474070, "KernelVersionId": 129430620, "SourceDatasetVersionId": 3622352}, {"Id": 185474071, "KernelVersionId": 129430620, "SourceDatasetVersionId": 5645786}]
|
[{"Id": 3622352, "DatasetId": 2170413, "DatasourceVersionId": 3675976, "CreatorUserId": 4004880, "LicenseName": "Unknown", "CreationDate": "05/12/2022 13:00:21", "VersionNumber": 1.0, "Title": "haarcascade_frontalface_default", "Slug": "haarcascade-frontalface-default", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2170413, "CreatorUserId": 4004880, "OwnerUserId": 4004880.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3622352.0, "CurrentDatasourceVersionId": 3675976.0, "ForumId": 2196314, "Type": 2, "CreationDate": "05/12/2022 13:00:21", "LastActivityDate": "05/12/2022", "TotalViews": 229, "TotalDownloads": 15, "TotalVotes": 2, "TotalKernels": 0}]
|
[{"Id": 4004880, "UserName": "bhupendradewangan", "DisplayName": "BhupendraDewangan", "RegisterDate": "11/09/2019", "PerformanceTier": 0}]
|
import cv2
import matplotlib.pyplot as plt
# Reading img to BGR format
image_bgr = cv2.imread("/kaggle/input/abc-image/crk.jpeg")
# converting it to RGB format
image_rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
plt.imshow(image_rgb)
# # Face detection using Opencv
faceCascade = cv2.CascadeClassifier(
"/kaggle/input/haarcascade-frontalface-default/haarcascade_frontalface_default.xml"
)
faces = faceCascade.detectMultiScale(
image_rgb, scaleFactor=1.075, minNeighbors=5, minSize=(15, 15)
)
faces
# drawing plots on img
for x, y, w, h in faces:
cv2.rectangle(
image_rgb, pt1=(x, y), pt2=(x + w, y + h), color=(255, 0, 0), thickness=2
)
plt.imshow(image_rgb)
# # Face Detection using Dlib
import dlib
import dlib
| false | 0 | 268 | 0 | 302 | 268 |
||
129884050
|
# # News Sentiment Analysis
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
df = pd.read_csv("/kaggle/input/news-dataset/news.csv")
df.head()
# 1 --> Negative news
# 0 --> Positive news
df.shape
df.info()
# ## Preprocessing
prep = []
for i in df["Headline"]:
word = i.lower()
word = re.sub(
"[^a-z ]", "", word
) # replacing all the character except a-z and space with blank
prep.append(word)
df["cleaned_headlines"] = prep
df.head()
tfv = TfidfVectorizer()
x = tfv.fit_transform(df["cleaned_headlines"]).toarray()
y = df["Label"]
y.value_counts()
xtrain, xtest, ytrain, ytest = train_test_split(x, y, train_size=0.75)
# ### Building model
nn = Sequential()
# 1st hidden layer
nn.add(Dense(128, activation="relu", input_dim=xtrain.shape[1]))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# 2nd hidden layer
nn.add(Dense(64, activation="relu"))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# 3rd hidden layer
nn.add(Dense(32, activation="relu"))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# output
nn.add(Dense(1, activation="sigmoid"))
early_stop = EarlyStopping(monitor="val_loss", patience=10)
nn.summary()
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
hist = nn.fit(
xtrain,
ytrain,
validation_split=0.2,
batch_size=8,
epochs=100,
callbacks=[early_stop],
)
plt.plot(hist.history["loss"], c="blue")
plt.plot(hist.history["val_loss"], c="red")
nn.evaluate(xtrain, ytrain)
nn.evaluate(xtest, ytest)
# ### Sklearn Metrics
prob = nn.predict(xtest)
pred = []
for i in prob:
if i >= 0.5:
pred.append(1)
else:
pred.append(0)
print(classification_report(ytest, pred))
sns.heatmap(confusion_matrix(ytest, pred), annot=True, fmt="d")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/884/129884050.ipynb
| null | null |
[{"Id": 129884050, "ScriptId": 38632380, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12467287, "CreationDate": "05/17/2023 07:18:30", "VersionNumber": 1.0, "Title": "notebook56035e72c3", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 104.0, "LinesInsertedFromPrevious": 104.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # News Sentiment Analysis
import pandas as pd
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Dropout, BatchNormalization
from keras.callbacks import EarlyStopping
import matplotlib.pyplot as plt
from sklearn.metrics import classification_report, confusion_matrix
import seaborn as sns
df = pd.read_csv("/kaggle/input/news-dataset/news.csv")
df.head()
# 1 --> Negative news
# 0 --> Positive news
df.shape
df.info()
# ## Preprocessing
prep = []
for i in df["Headline"]:
word = i.lower()
word = re.sub(
"[^a-z ]", "", word
) # replacing all the character except a-z and space with blank
prep.append(word)
df["cleaned_headlines"] = prep
df.head()
tfv = TfidfVectorizer()
x = tfv.fit_transform(df["cleaned_headlines"]).toarray()
y = df["Label"]
y.value_counts()
xtrain, xtest, ytrain, ytest = train_test_split(x, y, train_size=0.75)
# ### Building model
nn = Sequential()
# 1st hidden layer
nn.add(Dense(128, activation="relu", input_dim=xtrain.shape[1]))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# 2nd hidden layer
nn.add(Dense(64, activation="relu"))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# 3rd hidden layer
nn.add(Dense(32, activation="relu"))
nn.add(Dropout(0.3))
nn.add(BatchNormalization())
# output
nn.add(Dense(1, activation="sigmoid"))
early_stop = EarlyStopping(monitor="val_loss", patience=10)
nn.summary()
nn.compile(loss="binary_crossentropy", optimizer="adam", metrics=["accuracy"])
hist = nn.fit(
xtrain,
ytrain,
validation_split=0.2,
batch_size=8,
epochs=100,
callbacks=[early_stop],
)
plt.plot(hist.history["loss"], c="blue")
plt.plot(hist.history["val_loss"], c="red")
nn.evaluate(xtrain, ytrain)
nn.evaluate(xtest, ytest)
# ### Sklearn Metrics
prob = nn.predict(xtest)
pred = []
for i in prob:
if i >= 0.5:
pred.append(1)
else:
pred.append(0)
print(classification_report(ytest, pred))
sns.heatmap(confusion_matrix(ytest, pred), annot=True, fmt="d")
| false | 0 | 699 | 1 | 699 | 699 |
||
129910089
|
# # Heart Disease data : Predicting using Decision trees
# - we have 5 features/vars out of which heart disease we need to predict using rest 4 features(bp,age,sex,cholestrol).
# - If we want to perform regression on a data set then we can perform decision tree also on it and make predictions.
import pandas as pd, numpy as np
import matplotlib.pyplot as plt, seaborn as sns
df = pd.read_csv("/Users/sakshimunde/Downloads/heart_v2.csv")
df.head()
df.info()
# - no null values
# - Let's split the data
# #### Train - Test split
from sklearn.model_selection import train_test_split
# assigning rest columns to X axis
X = df.drop("heart disease", axis=1)
X.head()
# assigning heart disease y axis
y = df["heart disease"]
y.head()
# or
# y = df.loc[: , "heart disease"]
# splitting data into train & test sets
# random_state any +ve random number we can give
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=42
)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# - y axis will be having only 1 column i.e., heart disease.
# - X axis is having rest all independent features/variables.
# ### Building a decision tree
# - we use all default parameters except depth.
# - depth is nodes or levels of a tree,if depth is default then it will create a lot nay nodes by it's own which will be difficult to interpret.So depth /height we will control.
# - We have decision tree classifier and regression. For clssification(categorical) data set we will use classifier and for Regression(linear) data we use 'regressor'.
# now we will use classifieer bcz of classification(binary) data
from sklearn.tree import DecisionTreeClassifier
#### In sklearn for decisiontreeclassifier we think it has a class & for class we create an object.
# depth/height/levels/nodes : we can give by our own.Here let we want depth to be 3 nodes.
dt = DecisionTreeClassifier(max_depth=3)
dt
# - If there is no limit set on a decision tree, it will give you 100% accuracy on the training data set because in the worse case it will end up making 1 leaf for each observation. Thus this affects the accuracy when predicting samples that are not part of the training set.
# fitting the model
dt.fit(X_train, y_train)
# ## plotting decision tree using plot tree in built function
# - Interpreting decision tree(explaining)
# now using Image function we get a image of plotted graph
from sklearn.tree import plot_tree
plt.figure(figsize=[50, 30])
plot_tree(
dt, feature_names=X.columns, class_names=["No Disease", "Disease"], filled=True
)
plt.show()
# - In classification, each data point in a leaf has a class label associated with it.
# - class = No disease /disease are labels.
# - here class = [label 0,lable 1] each class has labels.First label 0 then 1.It depends on us what we assign to label 0 and 1.Here we assigned label 0 = No disease and label 1= disease.
# ## Or using graphviz
# import libraries
from IPython.display import Image # for our tree structure this will display the image
from six import StringIO # output the graph into a file
from sklearn.tree import export_graphviz
import graphviz, pydotplus
# #### StringIO module allows us to manage the file related input and output operations.
# - StringIO object can be used as input or output to the most function that would expect a standard file object.When the stringio object is created it is intialized by passing a string to a constructor.if no string is passed then the stringio will start with empty string.
# now create an object of stringIO
dot_data = StringIO()
dot_data
# now take graphviz.
# this are values that will be there in each node.
export_graphviz(
dt,
out_file=dot_data,
filled=True,
rounded=True,
feature_names=X.columns,
class_names=["No Disease", "Disease"],
)
# This will create a graphviz object and put it into dot_data
# - filled = True is it will give different colors to each node/box
# - X.columns are columns which we gave in X axis .They are Age,bp,sex,cholestrol.
# - using the dot_data object ,we plot the graph
# creating a graph using dot data object
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph
# - Now that you have built the decision tree and visualised it using the graphviz library, let's now evaluate how the model that we built is performing on the unseen data.
#
Image(graph.create_png())
# - How many leaves does the tree have? : 8 bottom nodes that are further not splitted are called leaves/leaf.
# - we can see value = [101,88] => is 189 total patients out of which 101 are not diseased with heart and 88 are diseased.
# - class = NO disease or disease is there in each column,so how it is given.so a class with majority values (no disease,disease) is taken.If values have no disease people more then in that box class ia ssigned with no disease.
# - value = [19,0] => majority is 19 which is no disease side so class is said to be no disease.
# - value = [3,6] => majority is 6 diseased i.e., class is said to be diseased.
# ## Now let's evaluate the model performance
# - using confusion matrix & accuracy
# let's see y train predicted value
# we take model value of fit().dt and xtrain
y_train_pred = dt.predict(X_train)
# now predict y test
y_test_pred = dt.predict(X_test)
# using actual y train and predicted y train let's calculate accuracy score and confusion matrix
from sklearn.metrics import confusion_matrix, accuracy_score
print(confusion_matrix(y_train, y_train_pred))
accuracy_score(y_train, y_train_pred)
# - accuarcy is 74% which is good.
# - In confusion matrix 19 & 30 are wrongly predicted values.
# - let's see confusion matrix and accuracy of test data set
# test data
print(confusion_matrix(y_test, y_test_pred))
accuracy_score(y_test, y_test_pred)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/910/129910089.ipynb
| null | null |
[{"Id": 129910089, "ScriptId": 38642775, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12439749, "CreationDate": "05/17/2023 11:20:32", "VersionNumber": 1.0, "Title": "notebook87007e43b7", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 146.0, "LinesInsertedFromPrevious": 146.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Heart Disease data : Predicting using Decision trees
# - we have 5 features/vars out of which heart disease we need to predict using rest 4 features(bp,age,sex,cholestrol).
# - If we want to perform regression on a data set then we can perform decision tree also on it and make predictions.
import pandas as pd, numpy as np
import matplotlib.pyplot as plt, seaborn as sns
df = pd.read_csv("/Users/sakshimunde/Downloads/heart_v2.csv")
df.head()
df.info()
# - no null values
# - Let's split the data
# #### Train - Test split
from sklearn.model_selection import train_test_split
# assigning rest columns to X axis
X = df.drop("heart disease", axis=1)
X.head()
# assigning heart disease y axis
y = df["heart disease"]
y.head()
# or
# y = df.loc[: , "heart disease"]
# splitting data into train & test sets
# random_state any +ve random number we can give
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=42
)
X_train.shape, X_test.shape, y_train.shape, y_test.shape
# - y axis will be having only 1 column i.e., heart disease.
# - X axis is having rest all independent features/variables.
# ### Building a decision tree
# - we use all default parameters except depth.
# - depth is nodes or levels of a tree,if depth is default then it will create a lot nay nodes by it's own which will be difficult to interpret.So depth /height we will control.
# - We have decision tree classifier and regression. For clssification(categorical) data set we will use classifier and for Regression(linear) data we use 'regressor'.
# now we will use classifieer bcz of classification(binary) data
from sklearn.tree import DecisionTreeClassifier
#### In sklearn for decisiontreeclassifier we think it has a class & for class we create an object.
# depth/height/levels/nodes : we can give by our own.Here let we want depth to be 3 nodes.
dt = DecisionTreeClassifier(max_depth=3)
dt
# - If there is no limit set on a decision tree, it will give you 100% accuracy on the training data set because in the worse case it will end up making 1 leaf for each observation. Thus this affects the accuracy when predicting samples that are not part of the training set.
# fitting the model
dt.fit(X_train, y_train)
# ## plotting decision tree using plot tree in built function
# - Interpreting decision tree(explaining)
# now using Image function we get a image of plotted graph
from sklearn.tree import plot_tree
plt.figure(figsize=[50, 30])
plot_tree(
dt, feature_names=X.columns, class_names=["No Disease", "Disease"], filled=True
)
plt.show()
# - In classification, each data point in a leaf has a class label associated with it.
# - class = No disease /disease are labels.
# - here class = [label 0,lable 1] each class has labels.First label 0 then 1.It depends on us what we assign to label 0 and 1.Here we assigned label 0 = No disease and label 1= disease.
# ## Or using graphviz
# import libraries
from IPython.display import Image # for our tree structure this will display the image
from six import StringIO # output the graph into a file
from sklearn.tree import export_graphviz
import graphviz, pydotplus
# #### StringIO module allows us to manage the file related input and output operations.
# - StringIO object can be used as input or output to the most function that would expect a standard file object.When the stringio object is created it is intialized by passing a string to a constructor.if no string is passed then the stringio will start with empty string.
# now create an object of stringIO
dot_data = StringIO()
dot_data
# now take graphviz.
# this are values that will be there in each node.
export_graphviz(
dt,
out_file=dot_data,
filled=True,
rounded=True,
feature_names=X.columns,
class_names=["No Disease", "Disease"],
)
# This will create a graphviz object and put it into dot_data
# - filled = True is it will give different colors to each node/box
# - X.columns are columns which we gave in X axis .They are Age,bp,sex,cholestrol.
# - using the dot_data object ,we plot the graph
# creating a graph using dot data object
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
graph
# - Now that you have built the decision tree and visualised it using the graphviz library, let's now evaluate how the model that we built is performing on the unseen data.
#
Image(graph.create_png())
# - How many leaves does the tree have? : 8 bottom nodes that are further not splitted are called leaves/leaf.
# - we can see value = [101,88] => is 189 total patients out of which 101 are not diseased with heart and 88 are diseased.
# - class = NO disease or disease is there in each column,so how it is given.so a class with majority values (no disease,disease) is taken.If values have no disease people more then in that box class ia ssigned with no disease.
# - value = [19,0] => majority is 19 which is no disease side so class is said to be no disease.
# - value = [3,6] => majority is 6 diseased i.e., class is said to be diseased.
# ## Now let's evaluate the model performance
# - using confusion matrix & accuracy
# let's see y train predicted value
# we take model value of fit().dt and xtrain
y_train_pred = dt.predict(X_train)
# now predict y test
y_test_pred = dt.predict(X_test)
# using actual y train and predicted y train let's calculate accuracy score and confusion matrix
from sklearn.metrics import confusion_matrix, accuracy_score
print(confusion_matrix(y_train, y_train_pred))
accuracy_score(y_train, y_train_pred)
# - accuarcy is 74% which is good.
# - In confusion matrix 19 & 30 are wrongly predicted values.
# - let's see confusion matrix and accuracy of test data set
# test data
print(confusion_matrix(y_test, y_test_pred))
accuracy_score(y_test, y_test_pred)
| false | 0 | 1,637 | 0 | 1,637 | 1,637 |
||
129910905
|
# # Credit card fraud detection - Optuna+XGBoost and RandomForest
# # Credit Fraud Detection
# Credit fraud detection refers to the process of identifying and preventing fraudulent activities in credit card transactions.
# It involves analyzing patterns, behaviors, and characteristics of credit card transactions to identify any suspicious or fraudulent activities.
# The goal is to detect and prevent unauthorized or fraudulent transactions, protecting both the credit card issuer and the cardholder.
# Credit fraud can take various forms, such as stolen credit card information, unauthorized transactions, identity theft, and account takeover.
# Fraudsters often try to exploit vulnerabilities in the payment system to make fraudulent transactions without detection.
# **Goal of Credit Fraud Detection**
# *The goal of credit fraud detection is to strike a balance between accurately identifying fraudulent transactions while minimizing false positives that may inconvenience legitimate cardholders.*
# *It is an ongoing process that involves continuous monitoring, analysis, and improvement to stay ahead of evolving fraud techniques.*
# 
# # About Dataset
# **Context**
# It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
# **Content**
# The dataset contains transactions made by credit cards in September 2013 by European cardholders.
# This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
# It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data.
# Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'.
# Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset.
# The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
# Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
# # Steps of Project
# **Step: 1**
# * Analysis data: Now start Analysis of step which include a few EDU and exploration analysis small steps
# > Data Visualization: Use data visualization techniques to gain insights into the data.
#
# > Create visualizations such as histograms, scatter plots, and box plots to understand the distribution of transaction amounts, identify outliers, and detect any patterns or anomalies in the data.
#
# **Step: 2**
#
# * Get some conclutions from Data Analysis and make some Data Transformation: After Visualizations I must transform and sampling dataset
# > Feature Engineering: Create additional features from the existing data that might help in detecting fraudulent transactions.
#
# > For example, you can calculate features such as transaction frequency, average transaction amount, or time since the last transaction.
#
#
# **Step: 3**
# * Machine learning main part of the project to detect frauds: After Feature Engineering I must do some trick to create great performance of model and predict very accuratly every class
# > Sklearn and Xgboost: Which includes some extrimed model RandomForestClassifair and XGBoost to help us predict frauds and also they are so usefull for unbalanced dataset, but will use some other trick too
#
# > Hyperparameters Tuning: In this section will help us Optuna and cross_val_score, but Optuna is More advance and very efficient framework to opimize model, for some extra informationoptuna is really really fast framework compare to Sklearn RandomizedSearchCV and CridSearchCV model selection.
# # Instalation part
# **I will imoprt some essantial model and framework to help us detect frauds**
# !pip install feature_engine
# Optuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning. It features an imperative, define-by-run style user API.
import optuna
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from imblearn.over_sampling import SMOTE
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.preprocessing import StandardScaler, QuantileTransformer
from feature_engine.transformation import YeoJohnsonTransformer
from sklearn.feature_selection import (
chi2,
r_regression,
mutual_info_regression,
f_classif,
f_regression,
mutual_info_classif,
)
from sklearn.feature_selection import (
SelectKBest,
SelectFromModel,
SequentialFeatureSelector,
)
from feature_engine.selection import DropCorrelatedFeatures
from feature_engine.transformation import LogTransformer
# Imblearn techniques are the methods by which we can generate a data set that has an equal ratio of classes. The predictive model built on this type of data set would be able to generalize well.
# We mainly have two options to treat an imbalanced data set that are Upsampling and Downsampling
from imblearn.over_sampling import SMOTE
from imblearn.ensemble import BalancedBaggingClassifier, BalancedRandomForestClassifier
from imblearn.pipeline import Pipeline as imbPipeline
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
train_test_split,
cross_val_score,
KFold,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
mean_absolute_error,
mean_squared_error,
make_scorer,
classification_report,
confusion_matrix,
)
import warnings
warnings.filterwarnings("ignore")
# **Here are a few potential benefits of using the os module to read CSV files:**
# File Path Manipulation: The os module allows you to manipulate file paths, such as joining directory names and file names using os.path.join().
# This can be helpful when constructing the path to your CSV file dynamically or when working with files in different directories.
# While the os module itself may not directly provide CSV parsing capabilities, it complements other modules, such as csv or pandas, by helping with file-related tasks and ensuring smooth interaction with the operating system.
read_path = ""
import os
for dirname, _, filenames in os.walk("creditcardfraud/"):
for filename in filenames:
if ".csv" in filename:
read_path = os.path.join(dirname, filename)
break
df = pd.read_csv(read_path)
df = df.drop("Time", axis=1)
# # Step: 1 Visualization part
# This type of fraud does not require the criminal to have a physical credit card.
# Instead, they will obtain basic details, such as the account holder's name, the credit card number, and the expiration date.
# With this information, they can commit fraudulent activity by mail, via the phone, or online.
# Here I have some important function which is help me make conclution aboit dataset ,
# and from this decitions I will determine and create way to forward of my Feature Engineering and ML Engineering of the steps
# * Type: Corralation of Analysis.
# The main benefits of correlation analysis are that it helps companies determine which variables they want to investigate further,
#
# and it allows for rapid hypothesis testing. The main type of correlation analysis use Pearson's r formula to identify the degree of the linear relationship between two variables.
#
# * What is benefit of use Corralation Analysis to Classification Problems.
#
# * Correlation analysis can indeed help determine the importance of features in the machine learning process for classification problems. Here's how correlation analysis can be useful:**
#
# 1. Identify Strong Correlations: By calculating the correlation coefficient (such as Pearson's correlation
# coefficient), you can measure the strength and direction of the linear
# relationship between pairs of features. Positive values indicate a positive correlation, negative values
# indicate a negative correlation, and values close to zero indicate no
# correlation. Identifying strong correlations can give insights into which features might be important for
# the classification task.
# 2. Feature Selection: Correlation analysis can help in feature selection by identifying features that have a
# strong correlation with the target variable. Features with high correlation
# are more likely to have a strong influence on the target variable and may contribute significantly to the
# classification task. Such features can be selected for further analysis or
# included in the machine learning model.
#
# 3. Multicollinearity Detection: Correlation analysis can help detect multicollinearity, which refers to high
# correlations among predictor variables. Multicollinearity can lead to
# redundant information and instability in the model. By identifying highly correlated features, you can make
# informed decisions about whether to keep all correlated features, remove
# redundant features, or use dimensionality reduction techniques to address multicollinearity.
fig = figsize = (30, 30)
class get_corr_analysis:
def __init__(self, *, data=None, figure_analysis=(10, 8), figure_heat=(10, 10)):
self.data = data
self.figure_a = figure_analysis
self.figure_h = figure_heat
# Analysis by columns of the Class, show corr() analysis increace and decreace of the corralation
def analysis_by(self, *, which_column="Class"):
df = self.data
figure = self.figure_a
_, ax1 = plt.subplots(1, figsize=figure)
df_by_corr = np.abs(df.corr().round(3)[which_column])[:-1]
ax1.set_xticklabels(
labels=df_by_corr.index, rotation=90, fontdict={"fontsize": 15}
)
ax1.set_ylabel(
"Corr for Class column", fontdict={"fontsize": 20, "color": "gray"}
)
ax1.set_title(
"Correlation Analysis", fontdict={"fontsize": 20, "color": "gray"}
)
sns.lineplot(df_by_corr, ax=ax1)
plt.show()
# Here is same heatmap, but change some need part of visualizations
def heatmap(self, *, annot=False, top_xaxis=False):
df = self.data
figure = self.figure_h
_, ax11 = plt.subplots(1, figsize=figure)
ax11.set_title("Correlation", fontdict={"fontsize": 30, "color": "gray"})
ax11.set_xticklabels(labels=df.columns, fontdict={"fontsize": 15})
ax11.set_yticklabels(labels=df.columns, fontdict={"fontsize": 15})
ax = sns.heatmap(df.corr().round(4), annot=True, ax=ax11, linewidths=0.5)
if top_xaxis:
ax.xaxis.tick_top()
# this function can show us. Are classes balanced? from catplot
def isBalanced_(self, *, df=df):
fig = self.figure_a
_, ax = plt.subplots(1, figsize=fig)
ax.set_xticklabels(labels=df["Class"], fontdict={"fontsize": 15})
ax.set_ylabel("Class column", fontdict={"fontsize": 20, "color": "gray"})
ax.set_title("Count= Analysis", fontdict={"fontsize": 20, "color": "gray"})
sns.countplot(x=df["Class"], ax=ax)
plt.show()
CORR_A = get_corr_analysis(data=df, figure_analysis=(12, 8), figure_heat=fig)
# There we can see line which is rising and droping line itself by the corr() score
# This is help us determine which feature are important for fraud detection
# But, has some information in classification problems multicorralation are not so important like regression problems
# Whatever we should analyse it, and it can help us improve a little
# Here we can see relationship with Class, but that not so high results around of 0.3 or 0.25 corr() score mean value
# They are independent each of them, I thinks this dataset is so beatifull dataset, maybe you can work with same this data is rare events of live
# But, I should back to analyse to line of plot, Why middle of columns are so corralated by Class of column?
# Because the I think the middle of these columns which have high corr() score exist normal and standard of values inside of this column, but this gausing that not 100% fact
# Whatever will continue analysis of the other of visualizations
CORR_A.analysis_by(which_column="Class")
# **Heatmaps are commonly used to visualize and analyze the correlation or relationship between variables in a dataset.**
# **The heatmap provides a visual summary of the data, allowing patterns and trends to be easily identified. Here are some scenarios when a heatmap can be useful:**
# * Correlation Analysis: Heatmaps are particularly useful for exploring and visualizing the correlation matrix of a dataset. By plotting the correlations between variables as a heatmap, you can quickly identify strong positive or negative correlations. This helps in understanding the relationships between variables and identifying potential predictors or redundant features.
# * Feature Selection: Heatmaps can aid in feature selection by visually highlighting the relationships between features and the target variable. By examining the correlation of each feature with the target, you can identify the most influential or relevant features for your analysis or machine learning model.
# * Data Comparison: Heatmaps allow for easy comparison of multiple datasets or categories. For example, in the context of time-series data, you can use a heatmap to compare patterns and trends across different time periods or groups. This helps in identifying variations, anomalies, or similarities in the data.
# * Performance Evaluation: Heatmaps can be used to evaluate the performance of a model by visualizing the confusion matrix or classification results. This helps in identifying patterns of correct and incorrect predictions, highlighting areas of improvement, and understanding the strengths and weaknesses of the model.
CORR_A.heatmap(annot=True, top_xaxis=True)
CORR_A.isBalanced_(df=df)
# # Title: Analyzing Differences in Credit Fraud Data by Class
# **Introduction:**
# Fraud detection is a critical task in the field of credit card transactions. Analyzing the differences between fraudulent and non-fraudulent transactions can provide valuable insights for developing effective fraud detection models. This code provides a visual analysis of the mean values of different features for each class (fraudulent and non-fraudulent) in a credit fraud dataset.
# **Key Features:**
# * Grouping by Class: The code groups the data by the 'Class' column, allowing for a comparison between fraudulent and non-fraudulent transactions.
# * Mean Calculation: The mean values of the remaining columns are calculated for each class, providing an overview of the average values of different features in fraudulent and non-fraudulent transactions.
# * Bar Plot Visualization: The code creates a bar plot to visually represent the differences between the mean values of different features for each class. The x-axis represents the features, the y-axis represents the mean values, and the bars are grouped by the classes.
# * Adjustable Ratio: The code introduces a ratio multiplier (ratio_mult) to adjust the visual representation of the differences between classes. This allows for more pronounced differences between the bars, making it easier to identify significant variations.
# * Insight Generation: By visually comparing the bars for each feature, analysts can identify features that exhibit significant differences between fraudulent and non-fraudulent transactions. These insights can be used to understand the characteristics of fraudulent transactions and develop effective fraud detection models.
# **Benefits and Applications:**
# * Feature Importance: The bar plot provides an intuitive way to identify important features that contribute to fraud detection. Features with notable differences between classes can be considered as potential indicators of fraudulent activity.
# * Model Development: The analysis can guide the selection of relevant features for training machine learning models. By focusing on features that exhibit significant differences, the model's ability to detect fraud can be enhanced.
# * Anomaly Detection: The visual analysis can help identify outliers or anomalies in the data. Unusual patterns or discrepancies in feature means for specific classes may indicate potential fraud patterns that require further investigation.
# * Decision Support: The insights gained from the analysis can inform decision-making processes related to fraud prevention and mitigation. For example, businesses can implement targeted measures or enhanced
def Analysis_by_Fraud_mean(*, df=None, ratio_mult=2):
data = df.groupby("Class").mean()
_, ax2 = plt.subplots(1, figsize=(15, 10))
x, y, classes = (
data[data.columns[:28]].values,
[[i, i] for i in data.columns[:28]],
[data.index.values.tolist() for i in range(len(data.columns[:28]))],
)
ratio = (x[0] - x[1]) * ratio_mult
x[0] = x[0] + ratio
x[1] = x[1] + ratio
x_, y_, classes_ = (
np.stack(x).flatten(order="F"),
np.stack(y).flatten(),
np.stack(classes).flatten(),
)
to_df_x, to_df_class, to_df_columns_types = x_, classes_, y_
table = {"X": to_df_x, "Which_columns": to_df_columns_types, "Classes": classes_}
new_data = pd.DataFrame(table)
ax2.set_xticklabels(y_, fontdict={"fontsize": "12"})
ax2.set_ylabel("X", fontdict={"fontsize": "15", "color": "gray"})
ax2.set_xlabel("Which_columns", fontdict={"fontsize": "15", "color": "gray"})
ax2.set_title("Each Class differance", fontdict={"fontsize": "20", "color": "gray"})
sns.barplot(new_data, x="Which_columns", y="X", hue="Classes", ax=ax2)
plt.show()
# Ohhh, that really oblivious, The Determine differance of the each Class by grouping it help us to What was exists in features
# Here you can see most of features which have class 0 more higher then class 1, from this plot you can conclude:
# > based this information Ml predict 0 and 1 How???, When most of the features exist higher of value in class 0 then class 1 of values Model will predict 0:
Analysis_by_Fraud_mean(df=df, ratio_mult=2)
# # Title: Outlier Detection in Data Analysis using Quantile Range
# **Introduction:**
# Briefly explain the concept of outliers and their impact on data analysis.
# Introduce the Quantile Range (QR) method as a technique for outlier detection.
# **Code Description:**
# * Import the necessary modules, including Counter from collections for counting occurrences.
# * Define the function OutlierDetect that takes a dataframe (df) and optional parameters.
# * Iterate over each column in the dataframe using a for loop.
# * Calculate the Interquartile Range (IQR) for each column using the given QR percentiles.
# * Determine the outlier threshold as a multiple of the IQR.
# * Identify outliers by checking if values are below Q1 - IQR or above Q3 + IQR.
# * Collect the indices of the outliers in a list.
# * Use the Counter function to count the occurrences of each index in the list.
# * Filter out indices that occur more than a specified threshold (n1).
# * Return the resulting list of outlier indices.
# **Outlier Removal:**
# * Create a new dataframe (drop_outlier_df) by dropping the rows with outlier indices from the original dataframe.
# * Reset the index of the new dataframe.
# **Conclusion:**
# * Recap the purpose and usage of the code for outlier detection.
# * Highlight the importance of detecting and handling outliers in data analysis.
# * Discuss potential use cases and applications of the code.
# * Offer additional resources and references for further exploration.
# **Note:**
# > The blog post can be expanded with examples, visualizations, and explanations of the parameters, along with potential considerations and limitations of the method.
from collections import Counter
def OutlierDetect(*, df, QR=[25, 75], ratio=1, n1=2):
outliers = []
for i in [*df]:
IQ1 = np.percentile(df[i], QR[1])
IQ0 = np.percentile(df[i], QR[0])
IQR = (IQ1 - IQ0) * ratio
n = df[(df[i] < IQ0 - IQR) | (df[i] > IQ1 + IQR)]
outliers.extend(n.index)
outlier_indices = Counter(outliers)
multi_index = list(k for k, v in outlier_indices.items() if v > n1)
return multi_index
drop_outlier_df = df.drop(OutlierDetect(df=df, ratio=1.5), axis=0).reset_index(
drop=True
)
# # Title: Exploratory Data Analysis: Visualizing Actual Data Distribution
# **Introduction:**
# Introduce the importance of exploratory data analysis (EDA) in understanding the underlying structure and characteristics of data.
# Explain the significance of visualizing the actual data distribution.
#
# **Code Description:**
# * Import the necessary modules, including matplotlib.pyplot for data visualization.
# * Define the function plot_actual that takes a dataframe (df) as input.
# * Create a subplot grid with 3 rows and 2 columns, sharing the y-axis.
# * Iterate over each column in the dataframe using for loops.
# * Plot a histogram for each column using the hist function, customizing the plot appearance.
# * Assign appropriate labels to the histograms based on the column names.
# * Add legends to the histograms.
# * Ensure proper spacing and layout using tight_layout.
# **Data Visualization:**
# * Describe the visualizations produced by the code, highlighting the distribution of selected columns from the dataframe.
# * Emphasize the density estimation, binning, and alpha transparency settings used to create the histograms.
# * Discuss the benefits of visualizing the actual data distribution for understanding the range, skewness, and patterns within the data.
# **Conclusion:**
# * Summarize the purpose and functionality of the code for visualizing actual data distribution.
# * Emphasize the importance of exploratory data analysis in uncovering insights and informing subsequent data analysis tasks.
# * Encourage readers to explore further visualizations and customize the code for their specific datasets.
# * Provide additional resources and references for data visualization and exploratory data analysis.
# **Note:**
# > The blog post can be enhanced with example visualizations, explanations of the input dataframe, and potential insights that can be derived from the data distribution.
def plot_actual(*, df=None):
_, ax1 = plt.subplots(3, 2, sharey=True, squeeze=False, figsize=(15, 12))
for i in [*df][:4]:
ax1[0][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][0].legend()
for i in [*df][4:8]:
ax1[0][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][1].legend()
for i in [*df][8:12]:
ax1[1][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][0].legend()
for i in [*df][12:16]:
ax1[1][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][1].legend()
for i in [*df][16:20]:
ax1[2][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][0].legend()
for i in [*df][20:24]:
ax1[2][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][1].legend()
plt.tight_layout()
plot_actual(df=df)
# # Title: Exploratory Data Analysis: Visualizing Outlier Removal (Part 3)
# **Introduction:**
# Introduce the importance of handling outliers in data analysis and the impact they can have on model performance.
# Provide an overview of the previous blog posts on exploratory data analysis (EDA) and mention that this post focuses on visualizing the effect of outlier removal.
#
# **Code Description:**
# * Import the necessary modules, including matplotlib.pyplot for data visualization.
# * Define the function show_dropoutlier that takes a dataframe (df) as input.
# * Create a subplot grid with 3 rows and 2 columns, sharing the y-axis.
# * Iterate over selected columns from the dataframe using for loops.
# * Plot a histogram for each column using the hist function, customizing the plot appearance.
# * Customize the histogram's binning, linewidth, edgecolor, density, and alpha transparency settings.
# * Assign appropriate labels to the histograms based on the column names.
# * Add legends to the histograms.
# * Ensure proper spacing and layout using tight_layout.
# **Data Visualization:**
# * Describe the visualizations produced by the code, emphasizing the distribution of different sets of columns after removing outliers.
# * Highlight the effect of outlier removal on the shape, range, and density of the data distribution.
# * Discuss the potential benefits of outlier removal in improving model performance and reducing the influence of extreme values.
# **Conclusion:**
# * Summarize the purpose and functionality of the code for visualizing the effect of outlier removal.
# * Reinforce the importance of handling outliers in data analysis and decision-making.
# * Encourage readers to apply outlier removal techniques to their own datasets and explore the resulting visualizations.
# * Provide links to relevant resources for further exploration of outlier detection and handling.
# **Note:**
# > The blog post can be enhanced with example visualizations, explanations of the input dataframe, and potential insights that can be gained from the outlier removal process. Additionally, it can be presented as a continuation of the previous blog posts on EDA, showcasing the iterative nature of data analysis and improvement.
def show_dropoutlier(*, df=None):
_, ax1 = plt.subplots(3, 2, sharey=True, squeeze=False, figsize=(20, 17))
for i in [*df][:4]:
ax1[0][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][0].legend()
for i in [*df][4:8]:
ax1[0][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][1].legend()
for i in [*df][8:12]:
ax1[1][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][0].legend()
for i in [*df][12:16]:
ax1[1][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][1].legend()
for i in [*df][16:20]:
ax1[2][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][0].legend()
for i in [*df][20:24]:
ax1[2][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][1].legend()
plt.tight_layout()
show_dropoutlier(df=drop_outlier_df)
# **Data Preparation:**
# * Explain the purpose of separating features and the target variable.
# * Highlight the importance of removing outliers from the feature set to ensure the integrity and reliability of the machine learning model.
# * Demonstrate Data Transformation:
# * Show examples of the original feature set (X) and target variable (y) before outlier removal.
# * Display the outlier-removed feature set (X_drop_outlier) and target variable (y_drop_outlier), highlighting the differences.
# **Conclusion:**
# * Summarize the code's functionality in preparing the data for machine learning by handling outliers.
# * Emphasize the importance of data preprocessing and the role it plays in improving the performance and reliability of machine learning models.
# * Encourage readers to explore different data preprocessing techniques, such as outlier handling, to optimize their machine learning pipelines.
# * Provide links to additional resources for further learning on data preprocessing and outlier handling.
X, y = df[df.columns[:-1]], df[df.columns[-1]]
X_drop_outlier, y_drop_outlier = (
drop_outlier_df.iloc[:, :-1],
drop_outlier_df.iloc[:, -1],
)
# # Step: 2 Feature Engineering part.
# **DropCorrelatedFeatures:**
# * This step aims to drop highly correlated features from the dataset.
# * The threshold parameter defines the correlation threshold above which features will be considered highly correlated and dropped.
# * The instantiated object drop_corr can be used to fit and transform the dataset.
# **SelectKBest:**
# * This step performs feature selection based on univariate statistical tests.
# * The f_classif function is used as the scoring function to evaluate the significance of each feature in relation to the target variable.
# * The k parameter is set to select the top k features based on their scores.
# * The instantiated object model_s can be used to fit and transform the dataset.
# **StandardScaler:**
# * This step standardizes the features by subtracting the mean and scaling to unit variance.
# * Standardization is commonly used to ensure that features are on a similar scale, which can improve the performance of certain machine learning algorithms.
# * The instantiated object standard_features can be used to fit and transform the dataset.
# **SMOTE:**
# * This step performs Synthetic Minority Over-sampling Technique (SMOTE) to address class imbalance in the dataset.
# * SMOTE generates synthetic samples of the minority class to balance the class distribution.
# * The random_state parameter sets the seed for reproducibility.
# * The instantiated object smote can be used to fit and resample the dataset.
# * Each of these preprocessing steps serves a specific purpose in preparing the data for machine learning. The drop_corr and model_s steps focus on feature selection and dimensionality reduction, * while standard_features standardizes the data, and smote addresses class imbalance.
drop_corr = DropCorrelatedFeatures(threshold=0.6)
model_s = SelectKBest(f_classif, k=int(len(X.columns) / 2))
standard_features = StandardScaler()
smote = SMOTE(random_state=42)
# The code you provided creates a pipeline using the imbPipeline class from the imbalanced-learn library.
# The pipeline consists of three steps: dropping correlated features, performing model selection, and applying standard scaling. Here's a breakdown of the code:
# **imbPipeline:**
# * This class is used to create a pipeline that sequentially applies a list of transformers and a final estimator.
# * It is imported from the imbalanced-learn library (from imblearn.pipeline import Pipeline).
# * The instantiated object pipe represents the pipeline.
# **'drop_corr_features', 'model_selection', 'standardScaler':**
# * These are the names given to the steps in the pipeline. They are used as identifiers for each step.
# **drop_corr, model_s, standard_features:**
# * These are the instantiated objects previously defined for the respective preprocessing steps.
# **transform_X:**
# * This variable represents the transformed feature matrix after applying the pipeline.
# * It is created by fitting the pipeline (fit_transform) to the original feature matrix X and target variable y.
# * The transformed data is stored in a pandas DataFrame with column names obtained from the pipeline's get_feature_names_out() method.
# * **Seamless workflow:**
# > The pipeline allows you to define and apply multiple preprocessing steps in a sequential manner. This ensures a smooth and consistent data transformation process without the need for manual intervention at each step.
# * **Code organization:**
# > By encapsulating the preprocessing steps within a pipeline, the code becomes more organized and easier to read and maintain. Each step is named and can be easily referred to and modified if needed.
# * **Preventing data leakage:**
# > The pipeline automatically ensures that each preprocessing step is fitted only on the training data and applied consistently to both the training and test data.
#
# > This prevents data leakage, where information from the test set is inadvertently used during preprocessing.
# * **Efficient parameter tuning:**
# > When using a pipeline, you can perform parameter tuning and cross-validation on the entire pipeline instead of individual preprocessing steps. This enables more comprehensive and efficient hyperparameter optimization and model selection.
# * **Reusability and portability:**
# > Once defined, the pipeline can be easily applied to new datasets with the same structure. It encapsulates the entire preprocessing workflow, making it portable and reusable across different projects.
# * **Conclutions:**
# > using a pipeline simplifies and streamlines the preprocessing workflow, enhances code organization and readability, prevents data leakage, and facilitates efficient parameter tuning and reusability.
drop_corr = DropCorrelatedFeatures(threshold=0.6)
model_s = SelectKBest(f_classif, k=int(len(X.columns) / 2))
standard_features = StandardScaler()
smote = SMOTE(random_state=42)
pipe = imbPipeline(
[
("drop_corr_features", drop_corr),
("model_selection", model_s),
("standardScaler", standard_features),
]
)
transform_X = pd.DataFrame(
pipe.fit_transform(X, y), columns=pipe.get_feature_names_out()
)
balanced_transform_X, balanced_y = smote.fit_resample(transform_X, y)
X_trainB, X_testB, y_trainB, y_testB = train_test_split(
balanced_transform_X, balanced_y, train_size=0.3, random_state=42
)
# *SMOTE (Synthetic Minority Over-sampling Technique)* is a popular algorithm used for addressing class imbalance in machine learning datasets.
# Class imbalance occurs when one class (the minority class) is underrepresented compared to the other classes (the majority class(es)).
# SMOTE works by creating synthetic samples of the minority class to increase its representation in the dataset.
# It does this by randomly selecting a minority class sample and finding its k nearest neighbors.
# Synthetic samples are then generated by interpolating between the selected sample and its neighbors. The number of synthetic samples created is determined by the desired level of oversampling.
# The key benefit of using SMOTE is that it helps improve the performance of machine learning models when dealing with imbalanced datasets.
# By oversampling the minority class, SMOTE provides more training examples for the model to learn from, which can lead to better generalization and more accurate predictions.
# However, it's important to note that SMOTE should be used with caution and in combination with other techniques, as it may introduce some level of noise or overfitting, especially when the dataset is highly imbalanced or has overlapping classes.
# It's also crucial to apply SMOTE only to the training data and not the entire dataset to avoid information leakage.
# In the code snippet you provided, smote is an instance of the SMOTE class from the imbalanced-learn library. It can be used as a preprocessing step in the pipeline to apply the SMOTE oversampling technique to the training data before training a machine learning model.
balanced_transform_X, balanced_y = smote.fit_resample(transform_X, y)
X_trainB, X_testB, y_trainB, y_testB = train_test_split(
balanced_transform_X, balanced_y, train_size=0.3, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(
transform_X, y, train_size=0.3, random_state=42
)
X_train__outlier, X_test__outlier, y_train__outlier, y_test__outlier = train_test_split(
X_drop_outlier, y_drop_outlier, train_size=0.3, random_state=42
)
# * Check it balanced dataset and is this help us
BX = balanced_transform_X.copy()
BX["Class"] = balanced_y
CORR_A.isBalanced_(df=BX_train)
# # Step: 3 Machine Learning part. Main part
# Both XGBClassifier (XGBoost) and RandomForestClassifier are popular machine learning models for classification tasks. However, their performance can vary depending on the specific dataset and problem at hand. Here's a brief overview of each model's performance characteristics:
# **XGBClassifier (XGBoost):**
# * XGBoost is an ensemble learning algorithm that combines multiple weak models (decision trees) to create a strong predictive model.
# * It is known for its high predictive accuracy and performance on a wide range of datasets.
# * XGBoost incorporates regularization techniques to prevent overfitting and improve generalization.
# * It can handle a large number of features and is often effective in dealing with complex, high-dimensional data.
# * XGBoost is computationally efficient and scales well with large datasets.
# **RandomForestClassifier:**
# * RandomForestClassifier is also an ensemble learning algorithm that combines multiple decision trees to make predictions.
# * It is based on the concept of random decision forests, where each tree is built on a random subset of features and training samples.
# * RandomForestClassifier is known for its robustness to noise and outliers in the data.
# * It can handle both numerical and categorical features and is relatively insensitive to feature scaling.
# * RandomForestClassifier performs well on a wide range of datasets and is less prone to overfitting compared to individual decision trees.
# * It can capture complex interactions between features and provide insights into feature importance.
xgb = XGBClassifier(n_estimators=100)
random = RandomForestClassifier(n_estimators=100)
# The provided code is an example of using Optuna, a hyperparameter optimization framework, to tune the hyperparameters of a RandomForestClassifier model. Here's a breakdown of the code and its implications:
# * The objective2 function is the objective function that Optuna uses to evaluate different sets of hyperparameters. It takes an Optuna trial object as an argument, which allows sampling and suggesting values for the hyperparameters to be tuned.
# * Inside the objective2 function, a dictionary params_random_f is defined to store the hyperparameters for the RandomForestClassifier model. The hyperparameters being tuned in this example are n_estimators, max_depth, and criterion.
# * The random_model is instantiated with the suggested hyperparameters from the Optuna trial.
# * The random_model is trained on the training data (X_trainB and y_trainB).
# * Predictions are made on the test data (X_testB) using the trained random_model.
# * The accuracy score is calculated by comparing the predicted labels with the true labels (y_testB).
# * The accuracy score is returned as the value to be optimized by Optuna.
# * The learn_optuna object represents an Optuna study, which manages the optimization process. The optimize method is called on the study, specifying the objective2 function and the number of trials to run (n_trials=5).
# Finally, the best_trials attribute of the learn_optuna object is printed, which contains the best trial(s) with the highest optimization value (accuracy score in this case).
def objective2(trial: optuna.trial.Trial):
params_random_f = {
"n_estimators": trial.suggest_int("n_estimators", 100, 300, step=200),
"max_depth": trial.suggest_int("max_depth", 3, 6),
"criterion": trial.suggest_categorical("criterion", ["gini", "entropy"]),
}
random_model = RandomForestClassifier(**params_random_f, random_state=42)
random_model.fit(X_trainB, y_trainB)
predict_random_f = random_model.predict(X_testB)
return accuracy_score(predict_random_f, y_testB)
learn_optuna = optuna.study.create_study()
learn_optuna.optimize(objective2, n_trials=5)
# OOOHHHH, really !!! WOW around 0.93 accuracy, OPTUNA and RandomForestClassifeir are something else...
# Here this model_optimize can't be sensitive on overfiting...
# Continue to other extreme model XGBOOST algorithms with Optuna
print("RandomForestClassifier+Optuna: accuracy - ", learn_optuna.best_value)
#
# The provided code demonstrates the use of Optuna to optimize the hyperparameters of an XGBoost classifier model. Here's an overview of the code and its implications:
# * The objective function is defined as the objective function for Optuna's optimization. It takes an Optuna trial object as an argument, which is used to sample and suggest values for the * hyperparameters to be tuned.
# * Inside the objective function, the hyperparameters n_estimators and max_depth are sampled using trial.suggest_int. The n_estimators hyperparameter is suggested within the range of 100 to 1100 * with a step of 900, and the max_depth hyperparameter is suggested within the range of 3 to 6.
# * An XGBoost classifier model is created with the suggested hyperparameters.
# * The model is trained using the training data (X_trainB and y_trainB).
# * During training, evaluation metrics such as log loss and AUC are calculated using the training and testing data (X_trainB, y_trainB, X_testB, y_testB).
# * The model makes predictions on the testing data (X_testB), and the accuracy is calculated by comparing the predicted labels with the true labels (y_testB).
# * The accuracy is returned as the value to be optimized by Optuna.
# * The study object represents an Optuna study, which manages the optimization process. It is created using optuna.create_study with the direction set to 'maximize' to maximize the accuracy.
# * The optimize method is called on the study, specifying the objective function and the number of trials to run (n_trials=10).
def objective(trial):
n_estimators = trial.suggest_int("n_estimators", 100, 1100, step=900)
max_depth = trial.suggest_int("max_depth", 3, 6)
model = XGBClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=42
)
model.fit(
X_trainB,
y_trainB,
eval_metric=["logloss", "auc"],
eval_set=[(X_trainB, y_trainB), (X_testB, y_testB)],
verbose=100,
)
y_predB = model.predict(X_testB)
accuracy = accuracy_score(y_testB, y_predB)
return accuracy
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=10)
print("XGBClassifier+Optuna: accuracy - ", study.best_value)
# The objective1 function is defined as the objective function for Optuna's optimization. It takes an Optuna trial object as an argument, which is used to sample and suggest values for the hyperparameters to be tuned.
# Inside the objective1 function, the training and testing data are converted to xgboost's DMatrix format using xgb_class.DMatrix.
# The param dictionary is defined, specifying the objective function as "binary:logistic" and the evaluation metric as "logloss".
# The max_depth hyperparameter is sampled using trial.suggest_int within the range of 1 to 9.
# An XGBoost classifier model is trained using xgb_class.train. The param dictionary, training data (dtrain), and testing data (dtest) are provided. The evals parameter specifies the evaluation sets with their respective names ("validation_train" and "validation_test").
# The optuna.integration.XGBoostPruningCallback is used as a callback to perform early stopping based on the validation log loss (observation_key='validation_test-logloss').
# The trained model is used to make predictions on the testing data (dtest).
# The predicted probabilities are converted to binary labels using np.rint.
# The accuracy is calculated by comparing the predicted labels with the true labels (y_testB).
# The accuracy is returned as the value to be optimized by Optuna.
# The study1 object represents an Optuna study, which manages the optimization process. It is created using optuna.create_study.
# The optimize method is called on the study1, specifying the objective1 function and the number of trials to run (n_trials=30).
#
import xgboost as xgb_class
def objective1(trial):
dtrain = xgb_class.DMatrix(X_trainB, label=y_trainB)
dtest = xgb_class.DMatrix(X_testB, label=y_testB)
param = {
"objective": "binary:logistic",
"eval_metric": "logloss",
}
param["max_depth"] = trial.suggest_int("max_depth", 1, 9)
pruning_callback = optuna.integration.XGBoostPruningCallback(
trial, observation_key="validation_test-logloss"
)
bst = xgb_class.train(
param,
dtrain,
evals=[(dtrain, "validation_train"), (dtest, "validation_test")],
num_boost_round=300,
callbacks=[pruning_callback],
verbose_eval=100,
)
preds = bst.predict(dtest)
pred_labels = np.rint(preds)
accuracy = accuracy_score(y_testB, pred_labels)
return accuracy
study1 = optuna.create_study()
study1.optimize(objective1, n_trials=20)
print(study1.best_value)
print("XGBClassifier(DMatrix and train model)+Optuna: accuracy - ", study1.best_value)
# The VotingClassifier is imported from the sklearn.ensemble module.
# Two classifier objects, xgb and random, are created with specific configurations. xgb refers to an XGBoost classifier, and random refers to a Random Forest classifier.
# A VotingClassifier object named name is created, specifying the list of classifiers to be used and their corresponding names. In this case, the classifiers are xgb and random.
# The fit method of the VotingClassifier object is called with the training data (X_train, y_train), which trains the individual classifiers within the ensemble.
# The Voting Classifier learns to combine the predictions of xgb and random using a majority vote or weighted voting, depending on the specified parameters.
# The resulting name classifier can be used to make predictions on new data by calling its predict or predict_proba methods.
# The purpose of using a Voting Classifier is to leverage the strengths of multiple classifiers and improve overall prediction performance by aggregating their predictions.
vote = VotingClassifier([("xgb", xgb), ("random", random)])
vote.fit(X_trainB, y_trainB)
predict_votingB = vote.predict(X_testB)
print("Predict values: ", predict_votingB[:10])
print(
"Classification report: to check model\n",
classification_report(predict_votingB, y_testB),
)
print(
"Accuracy: ",
np.diagonal(confusion_matrix(y_testB, predict_votingB)).sum() / len(y_testB),
)
accuracy = cross_val_score(
xgb,
balanced_transform_X,
balanced_y,
scoring="accuracy",
fit_params={"eval_set": [(X_trainB, y_trainB), (X_testB, y_testB)], "verbose": 10},
)
print("All CV accuracy: ", accuracy)
print("Mean Accuracy: ", accuracy.mean())
early_stopping = xgb_class.callback.EarlyStopping(rounds=25)
models = XGBClassifier().fit(
X_trainB,
y_trainB,
eval_set=[(X_trainB, y_trainB), (X_testB, y_testB)],
callbacks=[early_stopping],
verbose=10,
)
predict_XGB = models.predict(X_testB)
print("Predict values: ", predict_XGB[:10])
print(
"Classification report: to check model\n",
classification_report(predict_XGB, y_testB),
)
print(
"Accuracy: ",
np.diagonal(confusion_matrix(y_testB, predict_XGB)).sum() / len(y_testB),
)
Analysis_by_Fraud_mean(df=BX, ratio_mult=2)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/910/129910905.ipynb
| null | null |
[{"Id": 129910905, "ScriptId": 38640937, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11708539, "CreationDate": "05/17/2023 11:26:33", "VersionNumber": 1.0, "Title": "notebooke0dce1fe41", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 812.0, "LinesInsertedFromPrevious": 812.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Credit card fraud detection - Optuna+XGBoost and RandomForest
# # Credit Fraud Detection
# Credit fraud detection refers to the process of identifying and preventing fraudulent activities in credit card transactions.
# It involves analyzing patterns, behaviors, and characteristics of credit card transactions to identify any suspicious or fraudulent activities.
# The goal is to detect and prevent unauthorized or fraudulent transactions, protecting both the credit card issuer and the cardholder.
# Credit fraud can take various forms, such as stolen credit card information, unauthorized transactions, identity theft, and account takeover.
# Fraudsters often try to exploit vulnerabilities in the payment system to make fraudulent transactions without detection.
# **Goal of Credit Fraud Detection**
# *The goal of credit fraud detection is to strike a balance between accurately identifying fraudulent transactions while minimizing false positives that may inconvenience legitimate cardholders.*
# *It is an ongoing process that involves continuous monitoring, analysis, and improvement to stay ahead of evolving fraud techniques.*
# 
# # About Dataset
# **Context**
# It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
# **Content**
# The dataset contains transactions made by credit cards in September 2013 by European cardholders.
# This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
# It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data.
# Features V1, V2, … V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'.
# Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset.
# The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
# Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
# # Steps of Project
# **Step: 1**
# * Analysis data: Now start Analysis of step which include a few EDU and exploration analysis small steps
# > Data Visualization: Use data visualization techniques to gain insights into the data.
#
# > Create visualizations such as histograms, scatter plots, and box plots to understand the distribution of transaction amounts, identify outliers, and detect any patterns or anomalies in the data.
#
# **Step: 2**
#
# * Get some conclutions from Data Analysis and make some Data Transformation: After Visualizations I must transform and sampling dataset
# > Feature Engineering: Create additional features from the existing data that might help in detecting fraudulent transactions.
#
# > For example, you can calculate features such as transaction frequency, average transaction amount, or time since the last transaction.
#
#
# **Step: 3**
# * Machine learning main part of the project to detect frauds: After Feature Engineering I must do some trick to create great performance of model and predict very accuratly every class
# > Sklearn and Xgboost: Which includes some extrimed model RandomForestClassifair and XGBoost to help us predict frauds and also they are so usefull for unbalanced dataset, but will use some other trick too
#
# > Hyperparameters Tuning: In this section will help us Optuna and cross_val_score, but Optuna is More advance and very efficient framework to opimize model, for some extra informationoptuna is really really fast framework compare to Sklearn RandomizedSearchCV and CridSearchCV model selection.
# # Instalation part
# **I will imoprt some essantial model and framework to help us detect frauds**
# !pip install feature_engine
# Optuna is an automatic hyperparameter optimization software framework, particularly designed for machine learning. It features an imperative, define-by-run style user API.
import optuna
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os
from imblearn.over_sampling import SMOTE
from sklearn.metrics import classification_report, accuracy_score, confusion_matrix
from sklearn.preprocessing import StandardScaler, QuantileTransformer
from feature_engine.transformation import YeoJohnsonTransformer
from sklearn.feature_selection import (
chi2,
r_regression,
mutual_info_regression,
f_classif,
f_regression,
mutual_info_classif,
)
from sklearn.feature_selection import (
SelectKBest,
SelectFromModel,
SequentialFeatureSelector,
)
from feature_engine.selection import DropCorrelatedFeatures
from feature_engine.transformation import LogTransformer
# Imblearn techniques are the methods by which we can generate a data set that has an equal ratio of classes. The predictive model built on this type of data set would be able to generalize well.
# We mainly have two options to treat an imbalanced data set that are Upsampling and Downsampling
from imblearn.over_sampling import SMOTE
from imblearn.ensemble import BalancedBaggingClassifier, BalancedRandomForestClassifier
from imblearn.pipeline import Pipeline as imbPipeline
from sklearn.model_selection import (
GridSearchCV,
RandomizedSearchCV,
train_test_split,
cross_val_score,
KFold,
)
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import VotingClassifier, RandomForestClassifier
from xgboost import XGBClassifier
from sklearn.metrics import (
accuracy_score,
mean_absolute_error,
mean_squared_error,
make_scorer,
classification_report,
confusion_matrix,
)
import warnings
warnings.filterwarnings("ignore")
# **Here are a few potential benefits of using the os module to read CSV files:**
# File Path Manipulation: The os module allows you to manipulate file paths, such as joining directory names and file names using os.path.join().
# This can be helpful when constructing the path to your CSV file dynamically or when working with files in different directories.
# While the os module itself may not directly provide CSV parsing capabilities, it complements other modules, such as csv or pandas, by helping with file-related tasks and ensuring smooth interaction with the operating system.
read_path = ""
import os
for dirname, _, filenames in os.walk("creditcardfraud/"):
for filename in filenames:
if ".csv" in filename:
read_path = os.path.join(dirname, filename)
break
df = pd.read_csv(read_path)
df = df.drop("Time", axis=1)
# # Step: 1 Visualization part
# This type of fraud does not require the criminal to have a physical credit card.
# Instead, they will obtain basic details, such as the account holder's name, the credit card number, and the expiration date.
# With this information, they can commit fraudulent activity by mail, via the phone, or online.
# Here I have some important function which is help me make conclution aboit dataset ,
# and from this decitions I will determine and create way to forward of my Feature Engineering and ML Engineering of the steps
# * Type: Corralation of Analysis.
# The main benefits of correlation analysis are that it helps companies determine which variables they want to investigate further,
#
# and it allows for rapid hypothesis testing. The main type of correlation analysis use Pearson's r formula to identify the degree of the linear relationship between two variables.
#
# * What is benefit of use Corralation Analysis to Classification Problems.
#
# * Correlation analysis can indeed help determine the importance of features in the machine learning process for classification problems. Here's how correlation analysis can be useful:**
#
# 1. Identify Strong Correlations: By calculating the correlation coefficient (such as Pearson's correlation
# coefficient), you can measure the strength and direction of the linear
# relationship between pairs of features. Positive values indicate a positive correlation, negative values
# indicate a negative correlation, and values close to zero indicate no
# correlation. Identifying strong correlations can give insights into which features might be important for
# the classification task.
# 2. Feature Selection: Correlation analysis can help in feature selection by identifying features that have a
# strong correlation with the target variable. Features with high correlation
# are more likely to have a strong influence on the target variable and may contribute significantly to the
# classification task. Such features can be selected for further analysis or
# included in the machine learning model.
#
# 3. Multicollinearity Detection: Correlation analysis can help detect multicollinearity, which refers to high
# correlations among predictor variables. Multicollinearity can lead to
# redundant information and instability in the model. By identifying highly correlated features, you can make
# informed decisions about whether to keep all correlated features, remove
# redundant features, or use dimensionality reduction techniques to address multicollinearity.
fig = figsize = (30, 30)
class get_corr_analysis:
def __init__(self, *, data=None, figure_analysis=(10, 8), figure_heat=(10, 10)):
self.data = data
self.figure_a = figure_analysis
self.figure_h = figure_heat
# Analysis by columns of the Class, show corr() analysis increace and decreace of the corralation
def analysis_by(self, *, which_column="Class"):
df = self.data
figure = self.figure_a
_, ax1 = plt.subplots(1, figsize=figure)
df_by_corr = np.abs(df.corr().round(3)[which_column])[:-1]
ax1.set_xticklabels(
labels=df_by_corr.index, rotation=90, fontdict={"fontsize": 15}
)
ax1.set_ylabel(
"Corr for Class column", fontdict={"fontsize": 20, "color": "gray"}
)
ax1.set_title(
"Correlation Analysis", fontdict={"fontsize": 20, "color": "gray"}
)
sns.lineplot(df_by_corr, ax=ax1)
plt.show()
# Here is same heatmap, but change some need part of visualizations
def heatmap(self, *, annot=False, top_xaxis=False):
df = self.data
figure = self.figure_h
_, ax11 = plt.subplots(1, figsize=figure)
ax11.set_title("Correlation", fontdict={"fontsize": 30, "color": "gray"})
ax11.set_xticklabels(labels=df.columns, fontdict={"fontsize": 15})
ax11.set_yticklabels(labels=df.columns, fontdict={"fontsize": 15})
ax = sns.heatmap(df.corr().round(4), annot=True, ax=ax11, linewidths=0.5)
if top_xaxis:
ax.xaxis.tick_top()
# this function can show us. Are classes balanced? from catplot
def isBalanced_(self, *, df=df):
fig = self.figure_a
_, ax = plt.subplots(1, figsize=fig)
ax.set_xticklabels(labels=df["Class"], fontdict={"fontsize": 15})
ax.set_ylabel("Class column", fontdict={"fontsize": 20, "color": "gray"})
ax.set_title("Count= Analysis", fontdict={"fontsize": 20, "color": "gray"})
sns.countplot(x=df["Class"], ax=ax)
plt.show()
CORR_A = get_corr_analysis(data=df, figure_analysis=(12, 8), figure_heat=fig)
# There we can see line which is rising and droping line itself by the corr() score
# This is help us determine which feature are important for fraud detection
# But, has some information in classification problems multicorralation are not so important like regression problems
# Whatever we should analyse it, and it can help us improve a little
# Here we can see relationship with Class, but that not so high results around of 0.3 or 0.25 corr() score mean value
# They are independent each of them, I thinks this dataset is so beatifull dataset, maybe you can work with same this data is rare events of live
# But, I should back to analyse to line of plot, Why middle of columns are so corralated by Class of column?
# Because the I think the middle of these columns which have high corr() score exist normal and standard of values inside of this column, but this gausing that not 100% fact
# Whatever will continue analysis of the other of visualizations
CORR_A.analysis_by(which_column="Class")
# **Heatmaps are commonly used to visualize and analyze the correlation or relationship between variables in a dataset.**
# **The heatmap provides a visual summary of the data, allowing patterns and trends to be easily identified. Here are some scenarios when a heatmap can be useful:**
# * Correlation Analysis: Heatmaps are particularly useful for exploring and visualizing the correlation matrix of a dataset. By plotting the correlations between variables as a heatmap, you can quickly identify strong positive or negative correlations. This helps in understanding the relationships between variables and identifying potential predictors or redundant features.
# * Feature Selection: Heatmaps can aid in feature selection by visually highlighting the relationships between features and the target variable. By examining the correlation of each feature with the target, you can identify the most influential or relevant features for your analysis or machine learning model.
# * Data Comparison: Heatmaps allow for easy comparison of multiple datasets or categories. For example, in the context of time-series data, you can use a heatmap to compare patterns and trends across different time periods or groups. This helps in identifying variations, anomalies, or similarities in the data.
# * Performance Evaluation: Heatmaps can be used to evaluate the performance of a model by visualizing the confusion matrix or classification results. This helps in identifying patterns of correct and incorrect predictions, highlighting areas of improvement, and understanding the strengths and weaknesses of the model.
CORR_A.heatmap(annot=True, top_xaxis=True)
CORR_A.isBalanced_(df=df)
# # Title: Analyzing Differences in Credit Fraud Data by Class
# **Introduction:**
# Fraud detection is a critical task in the field of credit card transactions. Analyzing the differences between fraudulent and non-fraudulent transactions can provide valuable insights for developing effective fraud detection models. This code provides a visual analysis of the mean values of different features for each class (fraudulent and non-fraudulent) in a credit fraud dataset.
# **Key Features:**
# * Grouping by Class: The code groups the data by the 'Class' column, allowing for a comparison between fraudulent and non-fraudulent transactions.
# * Mean Calculation: The mean values of the remaining columns are calculated for each class, providing an overview of the average values of different features in fraudulent and non-fraudulent transactions.
# * Bar Plot Visualization: The code creates a bar plot to visually represent the differences between the mean values of different features for each class. The x-axis represents the features, the y-axis represents the mean values, and the bars are grouped by the classes.
# * Adjustable Ratio: The code introduces a ratio multiplier (ratio_mult) to adjust the visual representation of the differences between classes. This allows for more pronounced differences between the bars, making it easier to identify significant variations.
# * Insight Generation: By visually comparing the bars for each feature, analysts can identify features that exhibit significant differences between fraudulent and non-fraudulent transactions. These insights can be used to understand the characteristics of fraudulent transactions and develop effective fraud detection models.
# **Benefits and Applications:**
# * Feature Importance: The bar plot provides an intuitive way to identify important features that contribute to fraud detection. Features with notable differences between classes can be considered as potential indicators of fraudulent activity.
# * Model Development: The analysis can guide the selection of relevant features for training machine learning models. By focusing on features that exhibit significant differences, the model's ability to detect fraud can be enhanced.
# * Anomaly Detection: The visual analysis can help identify outliers or anomalies in the data. Unusual patterns or discrepancies in feature means for specific classes may indicate potential fraud patterns that require further investigation.
# * Decision Support: The insights gained from the analysis can inform decision-making processes related to fraud prevention and mitigation. For example, businesses can implement targeted measures or enhanced
def Analysis_by_Fraud_mean(*, df=None, ratio_mult=2):
data = df.groupby("Class").mean()
_, ax2 = plt.subplots(1, figsize=(15, 10))
x, y, classes = (
data[data.columns[:28]].values,
[[i, i] for i in data.columns[:28]],
[data.index.values.tolist() for i in range(len(data.columns[:28]))],
)
ratio = (x[0] - x[1]) * ratio_mult
x[0] = x[0] + ratio
x[1] = x[1] + ratio
x_, y_, classes_ = (
np.stack(x).flatten(order="F"),
np.stack(y).flatten(),
np.stack(classes).flatten(),
)
to_df_x, to_df_class, to_df_columns_types = x_, classes_, y_
table = {"X": to_df_x, "Which_columns": to_df_columns_types, "Classes": classes_}
new_data = pd.DataFrame(table)
ax2.set_xticklabels(y_, fontdict={"fontsize": "12"})
ax2.set_ylabel("X", fontdict={"fontsize": "15", "color": "gray"})
ax2.set_xlabel("Which_columns", fontdict={"fontsize": "15", "color": "gray"})
ax2.set_title("Each Class differance", fontdict={"fontsize": "20", "color": "gray"})
sns.barplot(new_data, x="Which_columns", y="X", hue="Classes", ax=ax2)
plt.show()
# Ohhh, that really oblivious, The Determine differance of the each Class by grouping it help us to What was exists in features
# Here you can see most of features which have class 0 more higher then class 1, from this plot you can conclude:
# > based this information Ml predict 0 and 1 How???, When most of the features exist higher of value in class 0 then class 1 of values Model will predict 0:
Analysis_by_Fraud_mean(df=df, ratio_mult=2)
# # Title: Outlier Detection in Data Analysis using Quantile Range
# **Introduction:**
# Briefly explain the concept of outliers and their impact on data analysis.
# Introduce the Quantile Range (QR) method as a technique for outlier detection.
# **Code Description:**
# * Import the necessary modules, including Counter from collections for counting occurrences.
# * Define the function OutlierDetect that takes a dataframe (df) and optional parameters.
# * Iterate over each column in the dataframe using a for loop.
# * Calculate the Interquartile Range (IQR) for each column using the given QR percentiles.
# * Determine the outlier threshold as a multiple of the IQR.
# * Identify outliers by checking if values are below Q1 - IQR or above Q3 + IQR.
# * Collect the indices of the outliers in a list.
# * Use the Counter function to count the occurrences of each index in the list.
# * Filter out indices that occur more than a specified threshold (n1).
# * Return the resulting list of outlier indices.
# **Outlier Removal:**
# * Create a new dataframe (drop_outlier_df) by dropping the rows with outlier indices from the original dataframe.
# * Reset the index of the new dataframe.
# **Conclusion:**
# * Recap the purpose and usage of the code for outlier detection.
# * Highlight the importance of detecting and handling outliers in data analysis.
# * Discuss potential use cases and applications of the code.
# * Offer additional resources and references for further exploration.
# **Note:**
# > The blog post can be expanded with examples, visualizations, and explanations of the parameters, along with potential considerations and limitations of the method.
from collections import Counter
def OutlierDetect(*, df, QR=[25, 75], ratio=1, n1=2):
outliers = []
for i in [*df]:
IQ1 = np.percentile(df[i], QR[1])
IQ0 = np.percentile(df[i], QR[0])
IQR = (IQ1 - IQ0) * ratio
n = df[(df[i] < IQ0 - IQR) | (df[i] > IQ1 + IQR)]
outliers.extend(n.index)
outlier_indices = Counter(outliers)
multi_index = list(k for k, v in outlier_indices.items() if v > n1)
return multi_index
drop_outlier_df = df.drop(OutlierDetect(df=df, ratio=1.5), axis=0).reset_index(
drop=True
)
# # Title: Exploratory Data Analysis: Visualizing Actual Data Distribution
# **Introduction:**
# Introduce the importance of exploratory data analysis (EDA) in understanding the underlying structure and characteristics of data.
# Explain the significance of visualizing the actual data distribution.
#
# **Code Description:**
# * Import the necessary modules, including matplotlib.pyplot for data visualization.
# * Define the function plot_actual that takes a dataframe (df) as input.
# * Create a subplot grid with 3 rows and 2 columns, sharing the y-axis.
# * Iterate over each column in the dataframe using for loops.
# * Plot a histogram for each column using the hist function, customizing the plot appearance.
# * Assign appropriate labels to the histograms based on the column names.
# * Add legends to the histograms.
# * Ensure proper spacing and layout using tight_layout.
# **Data Visualization:**
# * Describe the visualizations produced by the code, highlighting the distribution of selected columns from the dataframe.
# * Emphasize the density estimation, binning, and alpha transparency settings used to create the histograms.
# * Discuss the benefits of visualizing the actual data distribution for understanding the range, skewness, and patterns within the data.
# **Conclusion:**
# * Summarize the purpose and functionality of the code for visualizing actual data distribution.
# * Emphasize the importance of exploratory data analysis in uncovering insights and informing subsequent data analysis tasks.
# * Encourage readers to explore further visualizations and customize the code for their specific datasets.
# * Provide additional resources and references for data visualization and exploratory data analysis.
# **Note:**
# > The blog post can be enhanced with example visualizations, explanations of the input dataframe, and potential insights that can be derived from the data distribution.
def plot_actual(*, df=None):
_, ax1 = plt.subplots(3, 2, sharey=True, squeeze=False, figsize=(15, 12))
for i in [*df][:4]:
ax1[0][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][0].legend()
for i in [*df][4:8]:
ax1[0][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][1].legend()
for i in [*df][8:12]:
ax1[1][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][0].legend()
for i in [*df][12:16]:
ax1[1][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][1].legend()
for i in [*df][16:20]:
ax1[2][0].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][0].legend()
for i in [*df][20:24]:
ax1[2][1].hist(
df[i],
bins=50,
linewidth=0.6,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][1].legend()
plt.tight_layout()
plot_actual(df=df)
# # Title: Exploratory Data Analysis: Visualizing Outlier Removal (Part 3)
# **Introduction:**
# Introduce the importance of handling outliers in data analysis and the impact they can have on model performance.
# Provide an overview of the previous blog posts on exploratory data analysis (EDA) and mention that this post focuses on visualizing the effect of outlier removal.
#
# **Code Description:**
# * Import the necessary modules, including matplotlib.pyplot for data visualization.
# * Define the function show_dropoutlier that takes a dataframe (df) as input.
# * Create a subplot grid with 3 rows and 2 columns, sharing the y-axis.
# * Iterate over selected columns from the dataframe using for loops.
# * Plot a histogram for each column using the hist function, customizing the plot appearance.
# * Customize the histogram's binning, linewidth, edgecolor, density, and alpha transparency settings.
# * Assign appropriate labels to the histograms based on the column names.
# * Add legends to the histograms.
# * Ensure proper spacing and layout using tight_layout.
# **Data Visualization:**
# * Describe the visualizations produced by the code, emphasizing the distribution of different sets of columns after removing outliers.
# * Highlight the effect of outlier removal on the shape, range, and density of the data distribution.
# * Discuss the potential benefits of outlier removal in improving model performance and reducing the influence of extreme values.
# **Conclusion:**
# * Summarize the purpose and functionality of the code for visualizing the effect of outlier removal.
# * Reinforce the importance of handling outliers in data analysis and decision-making.
# * Encourage readers to apply outlier removal techniques to their own datasets and explore the resulting visualizations.
# * Provide links to relevant resources for further exploration of outlier detection and handling.
# **Note:**
# > The blog post can be enhanced with example visualizations, explanations of the input dataframe, and potential insights that can be gained from the outlier removal process. Additionally, it can be presented as a continuation of the previous blog posts on EDA, showcasing the iterative nature of data analysis and improvement.
def show_dropoutlier(*, df=None):
_, ax1 = plt.subplots(3, 2, sharey=True, squeeze=False, figsize=(20, 17))
for i in [*df][:4]:
ax1[0][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][0].legend()
for i in [*df][4:8]:
ax1[0][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[0][1].legend()
for i in [*df][8:12]:
ax1[1][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][0].legend()
for i in [*df][12:16]:
ax1[1][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[1][1].legend()
for i in [*df][16:20]:
ax1[2][0].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][0].legend()
for i in [*df][20:24]:
ax1[2][1].hist(
df[i],
bins=100,
linewidth=0.4,
edgecolor="white",
density=True,
alpha=0.7,
label=i,
)
ax1[2][1].legend()
plt.tight_layout()
show_dropoutlier(df=drop_outlier_df)
# **Data Preparation:**
# * Explain the purpose of separating features and the target variable.
# * Highlight the importance of removing outliers from the feature set to ensure the integrity and reliability of the machine learning model.
# * Demonstrate Data Transformation:
# * Show examples of the original feature set (X) and target variable (y) before outlier removal.
# * Display the outlier-removed feature set (X_drop_outlier) and target variable (y_drop_outlier), highlighting the differences.
# **Conclusion:**
# * Summarize the code's functionality in preparing the data for machine learning by handling outliers.
# * Emphasize the importance of data preprocessing and the role it plays in improving the performance and reliability of machine learning models.
# * Encourage readers to explore different data preprocessing techniques, such as outlier handling, to optimize their machine learning pipelines.
# * Provide links to additional resources for further learning on data preprocessing and outlier handling.
X, y = df[df.columns[:-1]], df[df.columns[-1]]
X_drop_outlier, y_drop_outlier = (
drop_outlier_df.iloc[:, :-1],
drop_outlier_df.iloc[:, -1],
)
# # Step: 2 Feature Engineering part.
# **DropCorrelatedFeatures:**
# * This step aims to drop highly correlated features from the dataset.
# * The threshold parameter defines the correlation threshold above which features will be considered highly correlated and dropped.
# * The instantiated object drop_corr can be used to fit and transform the dataset.
# **SelectKBest:**
# * This step performs feature selection based on univariate statistical tests.
# * The f_classif function is used as the scoring function to evaluate the significance of each feature in relation to the target variable.
# * The k parameter is set to select the top k features based on their scores.
# * The instantiated object model_s can be used to fit and transform the dataset.
# **StandardScaler:**
# * This step standardizes the features by subtracting the mean and scaling to unit variance.
# * Standardization is commonly used to ensure that features are on a similar scale, which can improve the performance of certain machine learning algorithms.
# * The instantiated object standard_features can be used to fit and transform the dataset.
# **SMOTE:**
# * This step performs Synthetic Minority Over-sampling Technique (SMOTE) to address class imbalance in the dataset.
# * SMOTE generates synthetic samples of the minority class to balance the class distribution.
# * The random_state parameter sets the seed for reproducibility.
# * The instantiated object smote can be used to fit and resample the dataset.
# * Each of these preprocessing steps serves a specific purpose in preparing the data for machine learning. The drop_corr and model_s steps focus on feature selection and dimensionality reduction, * while standard_features standardizes the data, and smote addresses class imbalance.
drop_corr = DropCorrelatedFeatures(threshold=0.6)
model_s = SelectKBest(f_classif, k=int(len(X.columns) / 2))
standard_features = StandardScaler()
smote = SMOTE(random_state=42)
# The code you provided creates a pipeline using the imbPipeline class from the imbalanced-learn library.
# The pipeline consists of three steps: dropping correlated features, performing model selection, and applying standard scaling. Here's a breakdown of the code:
# **imbPipeline:**
# * This class is used to create a pipeline that sequentially applies a list of transformers and a final estimator.
# * It is imported from the imbalanced-learn library (from imblearn.pipeline import Pipeline).
# * The instantiated object pipe represents the pipeline.
# **'drop_corr_features', 'model_selection', 'standardScaler':**
# * These are the names given to the steps in the pipeline. They are used as identifiers for each step.
# **drop_corr, model_s, standard_features:**
# * These are the instantiated objects previously defined for the respective preprocessing steps.
# **transform_X:**
# * This variable represents the transformed feature matrix after applying the pipeline.
# * It is created by fitting the pipeline (fit_transform) to the original feature matrix X and target variable y.
# * The transformed data is stored in a pandas DataFrame with column names obtained from the pipeline's get_feature_names_out() method.
# * **Seamless workflow:**
# > The pipeline allows you to define and apply multiple preprocessing steps in a sequential manner. This ensures a smooth and consistent data transformation process without the need for manual intervention at each step.
# * **Code organization:**
# > By encapsulating the preprocessing steps within a pipeline, the code becomes more organized and easier to read and maintain. Each step is named and can be easily referred to and modified if needed.
# * **Preventing data leakage:**
# > The pipeline automatically ensures that each preprocessing step is fitted only on the training data and applied consistently to both the training and test data.
#
# > This prevents data leakage, where information from the test set is inadvertently used during preprocessing.
# * **Efficient parameter tuning:**
# > When using a pipeline, you can perform parameter tuning and cross-validation on the entire pipeline instead of individual preprocessing steps. This enables more comprehensive and efficient hyperparameter optimization and model selection.
# * **Reusability and portability:**
# > Once defined, the pipeline can be easily applied to new datasets with the same structure. It encapsulates the entire preprocessing workflow, making it portable and reusable across different projects.
# * **Conclutions:**
# > using a pipeline simplifies and streamlines the preprocessing workflow, enhances code organization and readability, prevents data leakage, and facilitates efficient parameter tuning and reusability.
drop_corr = DropCorrelatedFeatures(threshold=0.6)
model_s = SelectKBest(f_classif, k=int(len(X.columns) / 2))
standard_features = StandardScaler()
smote = SMOTE(random_state=42)
pipe = imbPipeline(
[
("drop_corr_features", drop_corr),
("model_selection", model_s),
("standardScaler", standard_features),
]
)
transform_X = pd.DataFrame(
pipe.fit_transform(X, y), columns=pipe.get_feature_names_out()
)
balanced_transform_X, balanced_y = smote.fit_resample(transform_X, y)
X_trainB, X_testB, y_trainB, y_testB = train_test_split(
balanced_transform_X, balanced_y, train_size=0.3, random_state=42
)
# *SMOTE (Synthetic Minority Over-sampling Technique)* is a popular algorithm used for addressing class imbalance in machine learning datasets.
# Class imbalance occurs when one class (the minority class) is underrepresented compared to the other classes (the majority class(es)).
# SMOTE works by creating synthetic samples of the minority class to increase its representation in the dataset.
# It does this by randomly selecting a minority class sample and finding its k nearest neighbors.
# Synthetic samples are then generated by interpolating between the selected sample and its neighbors. The number of synthetic samples created is determined by the desired level of oversampling.
# The key benefit of using SMOTE is that it helps improve the performance of machine learning models when dealing with imbalanced datasets.
# By oversampling the minority class, SMOTE provides more training examples for the model to learn from, which can lead to better generalization and more accurate predictions.
# However, it's important to note that SMOTE should be used with caution and in combination with other techniques, as it may introduce some level of noise or overfitting, especially when the dataset is highly imbalanced or has overlapping classes.
# It's also crucial to apply SMOTE only to the training data and not the entire dataset to avoid information leakage.
# In the code snippet you provided, smote is an instance of the SMOTE class from the imbalanced-learn library. It can be used as a preprocessing step in the pipeline to apply the SMOTE oversampling technique to the training data before training a machine learning model.
balanced_transform_X, balanced_y = smote.fit_resample(transform_X, y)
X_trainB, X_testB, y_trainB, y_testB = train_test_split(
balanced_transform_X, balanced_y, train_size=0.3, random_state=42
)
X_train, X_test, y_train, y_test = train_test_split(
transform_X, y, train_size=0.3, random_state=42
)
X_train__outlier, X_test__outlier, y_train__outlier, y_test__outlier = train_test_split(
X_drop_outlier, y_drop_outlier, train_size=0.3, random_state=42
)
# * Check it balanced dataset and is this help us
BX = balanced_transform_X.copy()
BX["Class"] = balanced_y
CORR_A.isBalanced_(df=BX_train)
# # Step: 3 Machine Learning part. Main part
# Both XGBClassifier (XGBoost) and RandomForestClassifier are popular machine learning models for classification tasks. However, their performance can vary depending on the specific dataset and problem at hand. Here's a brief overview of each model's performance characteristics:
# **XGBClassifier (XGBoost):**
# * XGBoost is an ensemble learning algorithm that combines multiple weak models (decision trees) to create a strong predictive model.
# * It is known for its high predictive accuracy and performance on a wide range of datasets.
# * XGBoost incorporates regularization techniques to prevent overfitting and improve generalization.
# * It can handle a large number of features and is often effective in dealing with complex, high-dimensional data.
# * XGBoost is computationally efficient and scales well with large datasets.
# **RandomForestClassifier:**
# * RandomForestClassifier is also an ensemble learning algorithm that combines multiple decision trees to make predictions.
# * It is based on the concept of random decision forests, where each tree is built on a random subset of features and training samples.
# * RandomForestClassifier is known for its robustness to noise and outliers in the data.
# * It can handle both numerical and categorical features and is relatively insensitive to feature scaling.
# * RandomForestClassifier performs well on a wide range of datasets and is less prone to overfitting compared to individual decision trees.
# * It can capture complex interactions between features and provide insights into feature importance.
xgb = XGBClassifier(n_estimators=100)
random = RandomForestClassifier(n_estimators=100)
# The provided code is an example of using Optuna, a hyperparameter optimization framework, to tune the hyperparameters of a RandomForestClassifier model. Here's a breakdown of the code and its implications:
# * The objective2 function is the objective function that Optuna uses to evaluate different sets of hyperparameters. It takes an Optuna trial object as an argument, which allows sampling and suggesting values for the hyperparameters to be tuned.
# * Inside the objective2 function, a dictionary params_random_f is defined to store the hyperparameters for the RandomForestClassifier model. The hyperparameters being tuned in this example are n_estimators, max_depth, and criterion.
# * The random_model is instantiated with the suggested hyperparameters from the Optuna trial.
# * The random_model is trained on the training data (X_trainB and y_trainB).
# * Predictions are made on the test data (X_testB) using the trained random_model.
# * The accuracy score is calculated by comparing the predicted labels with the true labels (y_testB).
# * The accuracy score is returned as the value to be optimized by Optuna.
# * The learn_optuna object represents an Optuna study, which manages the optimization process. The optimize method is called on the study, specifying the objective2 function and the number of trials to run (n_trials=5).
# Finally, the best_trials attribute of the learn_optuna object is printed, which contains the best trial(s) with the highest optimization value (accuracy score in this case).
def objective2(trial: optuna.trial.Trial):
params_random_f = {
"n_estimators": trial.suggest_int("n_estimators", 100, 300, step=200),
"max_depth": trial.suggest_int("max_depth", 3, 6),
"criterion": trial.suggest_categorical("criterion", ["gini", "entropy"]),
}
random_model = RandomForestClassifier(**params_random_f, random_state=42)
random_model.fit(X_trainB, y_trainB)
predict_random_f = random_model.predict(X_testB)
return accuracy_score(predict_random_f, y_testB)
learn_optuna = optuna.study.create_study()
learn_optuna.optimize(objective2, n_trials=5)
# OOOHHHH, really !!! WOW around 0.93 accuracy, OPTUNA and RandomForestClassifeir are something else...
# Here this model_optimize can't be sensitive on overfiting...
# Continue to other extreme model XGBOOST algorithms with Optuna
print("RandomForestClassifier+Optuna: accuracy - ", learn_optuna.best_value)
#
# The provided code demonstrates the use of Optuna to optimize the hyperparameters of an XGBoost classifier model. Here's an overview of the code and its implications:
# * The objective function is defined as the objective function for Optuna's optimization. It takes an Optuna trial object as an argument, which is used to sample and suggest values for the * hyperparameters to be tuned.
# * Inside the objective function, the hyperparameters n_estimators and max_depth are sampled using trial.suggest_int. The n_estimators hyperparameter is suggested within the range of 100 to 1100 * with a step of 900, and the max_depth hyperparameter is suggested within the range of 3 to 6.
# * An XGBoost classifier model is created with the suggested hyperparameters.
# * The model is trained using the training data (X_trainB and y_trainB).
# * During training, evaluation metrics such as log loss and AUC are calculated using the training and testing data (X_trainB, y_trainB, X_testB, y_testB).
# * The model makes predictions on the testing data (X_testB), and the accuracy is calculated by comparing the predicted labels with the true labels (y_testB).
# * The accuracy is returned as the value to be optimized by Optuna.
# * The study object represents an Optuna study, which manages the optimization process. It is created using optuna.create_study with the direction set to 'maximize' to maximize the accuracy.
# * The optimize method is called on the study, specifying the objective function and the number of trials to run (n_trials=10).
def objective(trial):
n_estimators = trial.suggest_int("n_estimators", 100, 1100, step=900)
max_depth = trial.suggest_int("max_depth", 3, 6)
model = XGBClassifier(
n_estimators=n_estimators, max_depth=max_depth, random_state=42
)
model.fit(
X_trainB,
y_trainB,
eval_metric=["logloss", "auc"],
eval_set=[(X_trainB, y_trainB), (X_testB, y_testB)],
verbose=100,
)
y_predB = model.predict(X_testB)
accuracy = accuracy_score(y_testB, y_predB)
return accuracy
study = optuna.create_study(direction="maximize")
study.optimize(objective, n_trials=10)
print("XGBClassifier+Optuna: accuracy - ", study.best_value)
# The objective1 function is defined as the objective function for Optuna's optimization. It takes an Optuna trial object as an argument, which is used to sample and suggest values for the hyperparameters to be tuned.
# Inside the objective1 function, the training and testing data are converted to xgboost's DMatrix format using xgb_class.DMatrix.
# The param dictionary is defined, specifying the objective function as "binary:logistic" and the evaluation metric as "logloss".
# The max_depth hyperparameter is sampled using trial.suggest_int within the range of 1 to 9.
# An XGBoost classifier model is trained using xgb_class.train. The param dictionary, training data (dtrain), and testing data (dtest) are provided. The evals parameter specifies the evaluation sets with their respective names ("validation_train" and "validation_test").
# The optuna.integration.XGBoostPruningCallback is used as a callback to perform early stopping based on the validation log loss (observation_key='validation_test-logloss').
# The trained model is used to make predictions on the testing data (dtest).
# The predicted probabilities are converted to binary labels using np.rint.
# The accuracy is calculated by comparing the predicted labels with the true labels (y_testB).
# The accuracy is returned as the value to be optimized by Optuna.
# The study1 object represents an Optuna study, which manages the optimization process. It is created using optuna.create_study.
# The optimize method is called on the study1, specifying the objective1 function and the number of trials to run (n_trials=30).
#
import xgboost as xgb_class
def objective1(trial):
dtrain = xgb_class.DMatrix(X_trainB, label=y_trainB)
dtest = xgb_class.DMatrix(X_testB, label=y_testB)
param = {
"objective": "binary:logistic",
"eval_metric": "logloss",
}
param["max_depth"] = trial.suggest_int("max_depth", 1, 9)
pruning_callback = optuna.integration.XGBoostPruningCallback(
trial, observation_key="validation_test-logloss"
)
bst = xgb_class.train(
param,
dtrain,
evals=[(dtrain, "validation_train"), (dtest, "validation_test")],
num_boost_round=300,
callbacks=[pruning_callback],
verbose_eval=100,
)
preds = bst.predict(dtest)
pred_labels = np.rint(preds)
accuracy = accuracy_score(y_testB, pred_labels)
return accuracy
study1 = optuna.create_study()
study1.optimize(objective1, n_trials=20)
print(study1.best_value)
print("XGBClassifier(DMatrix and train model)+Optuna: accuracy - ", study1.best_value)
# The VotingClassifier is imported from the sklearn.ensemble module.
# Two classifier objects, xgb and random, are created with specific configurations. xgb refers to an XGBoost classifier, and random refers to a Random Forest classifier.
# A VotingClassifier object named name is created, specifying the list of classifiers to be used and their corresponding names. In this case, the classifiers are xgb and random.
# The fit method of the VotingClassifier object is called with the training data (X_train, y_train), which trains the individual classifiers within the ensemble.
# The Voting Classifier learns to combine the predictions of xgb and random using a majority vote or weighted voting, depending on the specified parameters.
# The resulting name classifier can be used to make predictions on new data by calling its predict or predict_proba methods.
# The purpose of using a Voting Classifier is to leverage the strengths of multiple classifiers and improve overall prediction performance by aggregating their predictions.
vote = VotingClassifier([("xgb", xgb), ("random", random)])
vote.fit(X_trainB, y_trainB)
predict_votingB = vote.predict(X_testB)
print("Predict values: ", predict_votingB[:10])
print(
"Classification report: to check model\n",
classification_report(predict_votingB, y_testB),
)
print(
"Accuracy: ",
np.diagonal(confusion_matrix(y_testB, predict_votingB)).sum() / len(y_testB),
)
accuracy = cross_val_score(
xgb,
balanced_transform_X,
balanced_y,
scoring="accuracy",
fit_params={"eval_set": [(X_trainB, y_trainB), (X_testB, y_testB)], "verbose": 10},
)
print("All CV accuracy: ", accuracy)
print("Mean Accuracy: ", accuracy.mean())
early_stopping = xgb_class.callback.EarlyStopping(rounds=25)
models = XGBClassifier().fit(
X_trainB,
y_trainB,
eval_set=[(X_trainB, y_trainB), (X_testB, y_testB)],
callbacks=[early_stopping],
verbose=10,
)
predict_XGB = models.predict(X_testB)
print("Predict values: ", predict_XGB[:10])
print(
"Classification report: to check model\n",
classification_report(predict_XGB, y_testB),
)
print(
"Accuracy: ",
np.diagonal(confusion_matrix(y_testB, predict_XGB)).sum() / len(y_testB),
)
Analysis_by_Fraud_mean(df=BX, ratio_mult=2)
| false | 0 | 12,150 | 0 | 12,150 | 12,150 |
||
129967883
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from pathlib import Path
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import confusion_matrix, classification_report
negative_dir = Path("/kaggle/input/crack-detection-dataset/Negative")
positive_dir = Path("/kaggle/input/crack-detection-dataset/Positive")
import os.path
list(map(lambda x: os.path.split(x), list(positive_dir.glob(r"*.jpg"))))
def generate_df(image_dir, label):
filepaths = pd.Series(list(image_dir.glob(r"*.jpg")), name="Filepath").astype(str)
labels = pd.Series(label, name="Label", index=filepaths.index)
df = pd.concat([filepaths, labels], axis=1)
return df
negative_df = generate_df(negative_dir, label="NEGATIVE")
positive_df = generate_df(positive_dir, label="POSITIVE")
all_df = (
pd.concat([positive_df, negative_df], axis=0)
.sample(frac=1.0, random_state=1)
.reset_index(drop=True)
)
all_df
train_df, test_df = train_test_split(
all_df.sample(6000, random_state=1), train_size=0.7, shuffle=True, random_state=1
)
train_df
test_df
train_gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, validation_split=0.2
)
test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=True,
subset="training",
)
val_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=True,
subset="validation",
)
test_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=False,
)
inputs = tf.keras.Input(shape=(120, 120, 3))
x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation="relu")(inputs)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
print(model.summary())
history = model.fit(
train_data,
validation_data=val_data,
epochs=100,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=3, restore_best_weights=True
)
],
)
fig = px.line(
history.history,
y=["loss", "val_loss"],
labels={"index": "Epoch", "value": "Loss"},
title="Training and Validation Loss Over Time",
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/967/129967883.ipynb
| null | null |
[{"Id": 129967883, "ScriptId": 38643594, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11008555, "CreationDate": "05/17/2023 19:32:18", "VersionNumber": 1.0, "Title": "notebookbbfba9c5ba", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from pathlib import Path
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.metrics import confusion_matrix, classification_report
negative_dir = Path("/kaggle/input/crack-detection-dataset/Negative")
positive_dir = Path("/kaggle/input/crack-detection-dataset/Positive")
import os.path
list(map(lambda x: os.path.split(x), list(positive_dir.glob(r"*.jpg"))))
def generate_df(image_dir, label):
filepaths = pd.Series(list(image_dir.glob(r"*.jpg")), name="Filepath").astype(str)
labels = pd.Series(label, name="Label", index=filepaths.index)
df = pd.concat([filepaths, labels], axis=1)
return df
negative_df = generate_df(negative_dir, label="NEGATIVE")
positive_df = generate_df(positive_dir, label="POSITIVE")
all_df = (
pd.concat([positive_df, negative_df], axis=0)
.sample(frac=1.0, random_state=1)
.reset_index(drop=True)
)
all_df
train_df, test_df = train_test_split(
all_df.sample(6000, random_state=1), train_size=0.7, shuffle=True, random_state=1
)
train_df
test_df
train_gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 255, validation_split=0.2
)
test_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 255)
train_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=True,
subset="training",
)
val_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=True,
subset="validation",
)
test_data = train_gen.flow_from_dataframe(
train_df,
x_col="Filepath",
y_col="Label",
target_size=(120, 120),
color_mode="rgb",
class_mode="binary",
batch_size=32,
shuffle=False,
)
inputs = tf.keras.Input(shape=(120, 120, 3))
x = tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation="relu")(inputs)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu")(x)
x = tf.keras.layers.MaxPool2D(pool_size=(2, 2))(x)
x = tf.keras.layers.GlobalAveragePooling2D()(x)
outputs = tf.keras.layers.Dense(1, activation="sigmoid")(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
print(model.summary())
history = model.fit(
train_data,
validation_data=val_data,
epochs=100,
callbacks=[
tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=3, restore_best_weights=True
)
],
)
fig = px.line(
history.history,
y=["loss", "val_loss"],
labels={"index": "Epoch", "value": "Loss"},
title="Training and Validation Loss Over Time",
)
fig.show()
| false | 0 | 1,025 | 0 | 1,025 | 1,025 |
||
129967925
|
# # Predicting the rating of a movie using machine learning algorithm.
# In this notebook, we're going to go through an example machine learning project with the goal of predicting the rating of a movie.
# ## 1.Problem definition
# Movie rating prediction based on review using machine learning & deep learning model
# ## 2.Data
# This data set is downloaded from IMDb movie reviews dataset.
# Link: https://ieee-dataport.org/open-access/imdb-movie-reviews-dataset
# ### Primary Target:
# Predict rating of one movie’s rating based on data.
# ### Further Target:
# Predict rating of different movies.
# ## 3.Steps
# Step1: download the data and there will be a lot of movie’s csv file where we need to extract review and rating.
# Step2: after extracting feature we need to apply world embedding process to create train and test data.
# Word embedding process:
# * 1.https://www.turing.com/kb/guide-on-word-embeddings-in-nlp
# * 2.https://www.geeksforgeeks.org/word-embeddings-in-nlp/
# * 3.https://towardsdatascience.com/introduction-to-word-embedding-and-word2vec-652d0c2060fa
# * 4.https://machinelearningmastery.com/what-are-word-embeddings/
# ## 4.Modeling
# Design models using machine learning algorithms:
# 1. Use ML algorithms like SVM
# 2. Use RNN model like LSTM
# ## 5. Evaluating
# ## 6. Improving
# Imporving machine learning model using:
# * `Grid Search CV`
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Preparing the tools:
# We're going to use `pandas` `numpy` `matplotlib` for data manipulation and analysis.
# Import all the tools we need
# Regular EDA(exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load Data
# Load the data from directory
df = pd.read_csv("/kaggle/input/dataset/3 Idiots 2009.csv")
df.head(3)
df.review[0]
test = df.review[0]
test
# ## Cleaning up the text
# using python `replace()` function to replce the unnecessary things.
# A function to clean a paragraph.
def clean_text(test):
"""
Clean's up the unnecessary thiings of a text by replacing them with proper symbols.
"""
test = test.replace("<br/>", "")
test = test.replace("--", " ")
test = test.replace("'", "")
test = test.replace('"', "")
return test
test_li = df.review
new_list = test_li.apply(clean_text)
new_list[1]
df.head(2)
df.review = new_list
df.review[1]
df.shape
df.head(2)
df.drop(["date", "title", "username"], axis=1, inplace=True)
df.head(2)
df.rating.value_counts().plot(kind="bar", cmap="winter", title="Rating count")
df.isna().sum()
df.describe()
df.rating.value_counts()
df.dtypes
# ## Now change the text to vector using `spaCy`
df.review[1]
import spacy
np.random.seed(42)
nlp = spacy.load("en_core_web_sm")
doc = nlp("My girlfriend is Dr. Tareen. She loves to eat burger.")
for sentence in doc.sents:
print(sentence)
for sentence in doc.sents:
for word in sentence:
print(word, end="\n")
# ### Write a function to convert the paragraph to sentence.
def to_sentence(para):
"""
Converts the given paragraph to sentences.
"""
doc = nlp(para)
for sentence in doc.sents:
print(sentence)
df.review[1]
to_sentence(df.review[1])
sen = "My name is oitik.I read in CSE at RUET."
doc = nlp(sen)
for sentence in doc.sents:
for word in sentence:
print(word)
tst = df.review[1]
tst
doc = nlp("He flew to mars today!")
for token in doc:
print(token, "|", token.lemma_, "|", token.pos_)
df.head(2)
# ## Word Embedding
# Using spacy
#
import spacy
nlp = spacy.load("en_core_web_lg")
text = "dog cat tiger eat bananas but efte eats boobs and sandwich!"
doc = nlp(text)
for token in doc:
print(token.text, "vector:", token.has_vector, "out of voca:", token.is_oov)
doc[0].vector.shape
ex = nlp("bread")
for token in doc:
print(f"{token.text} <-> {ex.text} similarity:{token.similarity(ex)}")
ex.vector
df.head(3)
# ## **Converting total paragraph into text**
doc = nlp(df.review[1])
li = []
for token in doc:
li.append(token.vector)
li[0]
df.review[1]
for token in doc:
print(token.text, end=" ")
df.head(3)
df.to_csv("/kaggle/working/modified_3_idiots.csv", index=False)
df1 = df.copy()
df1.head(2)
# ## Now i can change on my dataframe df safely
# ## *Create a function to convert review text to numbers*
def change_to_vec(reviews):
# Word embeding
import spacy
nlp = spacy.load("en_core_web_lg")
final_vector = []
for review in reviews:
doc = nlp(review)
for token in doc:
final_vector.append(token.vector)
return final_vector
review = change_to_vec(df.review)
len(review[1])
df.review = review
df.head(2)
df.shape
df.rating.value_counts()
df = df[~(df == "Null").any(axis=1)]
df.head(2)
df.rating.value_counts()
df.shape
df2 = df.copy()
df2.shape
df2.dtypes
df.head(2)
df.drop(["helpful", "total"], axis=1, inplace=True)
df.head(2)
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
# Assume X is your feature matrix and y is your target variable
y = df.rating.to_list()
X = df.review.to_list()
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train = X_train[:100]
y_train = y_train[:100]
X_test = X_test[:30]
y_test = y_test[:30]
len(X_train), len(X_test), len(y_train), len(y_test)
# Create and train the Naive Bayes classifier
naive_bayes = MultinomialNB()
naive_bayes.fit(X_train, y_train)
predictions = naive_bayes.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
print("Accuracy:", accuracy)
model = RandomForestClassifier()
model.fit(X_train, y_train)
# ## Showing some errors try again
df = df1.copy()
df.head()
df.drop("total", axis=1, inplace=True)
df.head(2)
# Function to convert text to vectors using spaCy
def text_to_vectors(text):
doc = nlp(text)
return doc.vector
# Apply the text_to_vectors function to the 'text' column
df["vectors"] = df["review"].apply(text_to_vectors)
df.head(2)
df.vectors[0], df.review[0]
df.drop("review", axis=1, inplace=True)
df.head(2)
df = df[~(df == "Null").any(axis=1)]
df.shape
# Assume X is your feature matrix and y is your target variable
X = df["vectors"].to_list()
y = df["rating"].to_list()
X.shape, X.head(2)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
X_train.dtypes
# Train model
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_preds = model.predict(X_test)
y_preds
y_test = np.array(y_test)
y_test
# ## Evaluate model
y_test.reshape(1, -1)
y_preds.reshape(1, -1)
model.score(X_test, y_test)
# # Linear Regression
from sklearn.linear_model import LinearRegression
# Create model
clf = LinearRegression()
# Fit the data to the model
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
prediction
clf.score(X_test, y_test)
naive_bayes = MultinomialNB()
naive_bayes.fit(X_train, y_train)
# Make predictions on the test set
predictions = naive_bayes.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
print("Accuracy:", accuracy)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/967/129967925.ipynb
| null | null |
[{"Id": 129967925, "ScriptId": 38481776, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14254243, "CreationDate": "05/17/2023 19:32:55", "VersionNumber": 2.0, "Title": "first_notebook", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 425.0, "LinesInsertedFromPrevious": 253.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 172.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Predicting the rating of a movie using machine learning algorithm.
# In this notebook, we're going to go through an example machine learning project with the goal of predicting the rating of a movie.
# ## 1.Problem definition
# Movie rating prediction based on review using machine learning & deep learning model
# ## 2.Data
# This data set is downloaded from IMDb movie reviews dataset.
# Link: https://ieee-dataport.org/open-access/imdb-movie-reviews-dataset
# ### Primary Target:
# Predict rating of one movie’s rating based on data.
# ### Further Target:
# Predict rating of different movies.
# ## 3.Steps
# Step1: download the data and there will be a lot of movie’s csv file where we need to extract review and rating.
# Step2: after extracting feature we need to apply world embedding process to create train and test data.
# Word embedding process:
# * 1.https://www.turing.com/kb/guide-on-word-embeddings-in-nlp
# * 2.https://www.geeksforgeeks.org/word-embeddings-in-nlp/
# * 3.https://towardsdatascience.com/introduction-to-word-embedding-and-word2vec-652d0c2060fa
# * 4.https://machinelearningmastery.com/what-are-word-embeddings/
# ## 4.Modeling
# Design models using machine learning algorithms:
# 1. Use ML algorithms like SVM
# 2. Use RNN model like LSTM
# ## 5. Evaluating
# ## 6. Improving
# Imporving machine learning model using:
# * `Grid Search CV`
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Preparing the tools:
# We're going to use `pandas` `numpy` `matplotlib` for data manipulation and analysis.
# Import all the tools we need
# Regular EDA(exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load Data
# Load the data from directory
df = pd.read_csv("/kaggle/input/dataset/3 Idiots 2009.csv")
df.head(3)
df.review[0]
test = df.review[0]
test
# ## Cleaning up the text
# using python `replace()` function to replce the unnecessary things.
# A function to clean a paragraph.
def clean_text(test):
"""
Clean's up the unnecessary thiings of a text by replacing them with proper symbols.
"""
test = test.replace("<br/>", "")
test = test.replace("--", " ")
test = test.replace("'", "")
test = test.replace('"', "")
return test
test_li = df.review
new_list = test_li.apply(clean_text)
new_list[1]
df.head(2)
df.review = new_list
df.review[1]
df.shape
df.head(2)
df.drop(["date", "title", "username"], axis=1, inplace=True)
df.head(2)
df.rating.value_counts().plot(kind="bar", cmap="winter", title="Rating count")
df.isna().sum()
df.describe()
df.rating.value_counts()
df.dtypes
# ## Now change the text to vector using `spaCy`
df.review[1]
import spacy
np.random.seed(42)
nlp = spacy.load("en_core_web_sm")
doc = nlp("My girlfriend is Dr. Tareen. She loves to eat burger.")
for sentence in doc.sents:
print(sentence)
for sentence in doc.sents:
for word in sentence:
print(word, end="\n")
# ### Write a function to convert the paragraph to sentence.
def to_sentence(para):
"""
Converts the given paragraph to sentences.
"""
doc = nlp(para)
for sentence in doc.sents:
print(sentence)
df.review[1]
to_sentence(df.review[1])
sen = "My name is oitik.I read in CSE at RUET."
doc = nlp(sen)
for sentence in doc.sents:
for word in sentence:
print(word)
tst = df.review[1]
tst
doc = nlp("He flew to mars today!")
for token in doc:
print(token, "|", token.lemma_, "|", token.pos_)
df.head(2)
# ## Word Embedding
# Using spacy
#
import spacy
nlp = spacy.load("en_core_web_lg")
text = "dog cat tiger eat bananas but efte eats boobs and sandwich!"
doc = nlp(text)
for token in doc:
print(token.text, "vector:", token.has_vector, "out of voca:", token.is_oov)
doc[0].vector.shape
ex = nlp("bread")
for token in doc:
print(f"{token.text} <-> {ex.text} similarity:{token.similarity(ex)}")
ex.vector
df.head(3)
# ## **Converting total paragraph into text**
doc = nlp(df.review[1])
li = []
for token in doc:
li.append(token.vector)
li[0]
df.review[1]
for token in doc:
print(token.text, end=" ")
df.head(3)
df.to_csv("/kaggle/working/modified_3_idiots.csv", index=False)
df1 = df.copy()
df1.head(2)
# ## Now i can change on my dataframe df safely
# ## *Create a function to convert review text to numbers*
def change_to_vec(reviews):
# Word embeding
import spacy
nlp = spacy.load("en_core_web_lg")
final_vector = []
for review in reviews:
doc = nlp(review)
for token in doc:
final_vector.append(token.vector)
return final_vector
review = change_to_vec(df.review)
len(review[1])
df.review = review
df.head(2)
df.shape
df.rating.value_counts()
df = df[~(df == "Null").any(axis=1)]
df.head(2)
df.rating.value_counts()
df.shape
df2 = df.copy()
df2.shape
df2.dtypes
df.head(2)
df.drop(["helpful", "total"], axis=1, inplace=True)
df.head(2)
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
# Assume X is your feature matrix and y is your target variable
y = df.rating.to_list()
X = df.review.to_list()
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train = X_train[:100]
y_train = y_train[:100]
X_test = X_test[:30]
y_test = y_test[:30]
len(X_train), len(X_test), len(y_train), len(y_test)
# Create and train the Naive Bayes classifier
naive_bayes = MultinomialNB()
naive_bayes.fit(X_train, y_train)
predictions = naive_bayes.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
print("Accuracy:", accuracy)
model = RandomForestClassifier()
model.fit(X_train, y_train)
# ## Showing some errors try again
df = df1.copy()
df.head()
df.drop("total", axis=1, inplace=True)
df.head(2)
# Function to convert text to vectors using spaCy
def text_to_vectors(text):
doc = nlp(text)
return doc.vector
# Apply the text_to_vectors function to the 'text' column
df["vectors"] = df["review"].apply(text_to_vectors)
df.head(2)
df.vectors[0], df.review[0]
df.drop("review", axis=1, inplace=True)
df.head(2)
df = df[~(df == "Null").any(axis=1)]
df.shape
# Assume X is your feature matrix and y is your target variable
X = df["vectors"].to_list()
y = df["rating"].to_list()
X.shape, X.head(2)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
X_train.dtypes
# Train model
model = RandomForestClassifier()
model.fit(X_train, y_train)
y_preds = model.predict(X_test)
y_preds
y_test = np.array(y_test)
y_test
# ## Evaluate model
y_test.reshape(1, -1)
y_preds.reshape(1, -1)
model.score(X_test, y_test)
# # Linear Regression
from sklearn.linear_model import LinearRegression
# Create model
clf = LinearRegression()
# Fit the data to the model
clf.fit(X_train, y_train)
prediction = clf.predict(X_test)
prediction
clf.score(X_test, y_test)
naive_bayes = MultinomialNB()
naive_bayes.fit(X_train, y_train)
# Make predictions on the test set
predictions = naive_bayes.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, predictions)
print("Accuracy:", accuracy)
| false | 0 | 2,670 | 0 | 2,670 | 2,670 |
||
129277248
|
<jupyter_start><jupyter_text>Ukrainian Speech, Speech To Text (STT/ASR)
Kaggle dataset identifier: ukrainian-speech
<jupyter_script># This is notebook to print random sample from dataset
import os
from glob import glob
from tqdm import tqdm
import numpy as np
import librosa
data_root = "/kaggle/input/ukrainian-speech"
video_types = ["BCS", "HOC", "F"]
os.listdir(data_root)
types = []
for t in video_types:
path = os.path.join(data_root, t)
if os.path.exists(path):
types.extend(glob(path + "*"))
print(f"Video types amount: {len(types)}")
video_seasons = []
for t in types:
video_seasons.extend(glob(t + "/*"))
print(f"Video seasons amount: {len(video_seasons)}")
video_names = []
for t in video_seasons:
video_names.extend(glob(t + "/*"))
print(f"Video series amount: {len(video_names)}")
audio_names = []
for t in video_names:
list_all = glob(t + "/*")
sub_name = glob(t + "/*.srt")
if len(sub_name) == 0:
print("No sub for ", t)
continue
list_all.remove(sub_name[0])
audio_names.extend(list_all)
audio_names.sort()
print(f"Audio subscription amount: {len(audio_sub)}")
print(f"Audio sample amount: {len(audio_names)}")
# total_duration = 0.
# for p in tqdm(audio_names):
# time_duration = float(os.popen(f'ffprobe -i {p} -show_entries format=duration -v quiet -of csv="p=0" 2>/dev/null').read())
# total_duration += time_duration
random_audio = np.random.choice(audio_names)
season = random_audio.split("/")[-2]
sub_file = "/".join(random_audio.split("/")[:-1]) + "/" + season + "_sub.srt"
audio_num = random_audio.split("/")[-1].split("-")[0]
audio_num
def get_sub(sub_file, audio_num):
return os.popen(f'grep -A 3 -e "^{audio_num}$" {sub_file} | sed 1,2d').read()
get_sub(sub_file, audio_num)
os.system(f"ffmpeg -i {random_audio} -vcodec copy -acodec copy video.mp4")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277248.ipynb
|
ukrainian-speech
|
igor185
|
[{"Id": 129277248, "ScriptId": 38433064, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3567091, "CreationDate": "05/12/2023 11:40:31", "VersionNumber": 1.0, "Title": "audio_sample", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 62.0, "LinesInsertedFromPrevious": 62.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185176133, "KernelVersionId": 129277248, "SourceDatasetVersionId": 5669287}]
|
[{"Id": 5669287, "DatasetId": 3258624, "DatasourceVersionId": 5744785, "CreatorUserId": 3567091, "LicenseName": "Unknown", "CreationDate": "05/12/2023 10:40:40", "VersionNumber": 6.0, "Title": "Ukrainian Speech, Speech To Text (STT/ASR)", "Slug": "ukrainian-speech", "Subtitle": "Audio from TV series translated and dubbed on Ukrainian language with subscripts", "Description": NaN, "VersionNotes": "Data Update 2023-05-12", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3258624, "CreatorUserId": 3567091, "OwnerUserId": 3567091.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5678191.0, "CurrentDatasourceVersionId": 5753744.0, "ForumId": 3324152, "Type": 2, "CreationDate": "05/12/2023 09:16:48", "LastActivityDate": "05/12/2023", "TotalViews": 215, "TotalDownloads": 7, "TotalVotes": 7, "TotalKernels": 1}]
|
[{"Id": 3567091, "UserName": "igor185", "DisplayName": "Igor", "RegisterDate": "08/10/2019", "PerformanceTier": 1}]
|
# This is notebook to print random sample from dataset
import os
from glob import glob
from tqdm import tqdm
import numpy as np
import librosa
data_root = "/kaggle/input/ukrainian-speech"
video_types = ["BCS", "HOC", "F"]
os.listdir(data_root)
types = []
for t in video_types:
path = os.path.join(data_root, t)
if os.path.exists(path):
types.extend(glob(path + "*"))
print(f"Video types amount: {len(types)}")
video_seasons = []
for t in types:
video_seasons.extend(glob(t + "/*"))
print(f"Video seasons amount: {len(video_seasons)}")
video_names = []
for t in video_seasons:
video_names.extend(glob(t + "/*"))
print(f"Video series amount: {len(video_names)}")
audio_names = []
for t in video_names:
list_all = glob(t + "/*")
sub_name = glob(t + "/*.srt")
if len(sub_name) == 0:
print("No sub for ", t)
continue
list_all.remove(sub_name[0])
audio_names.extend(list_all)
audio_names.sort()
print(f"Audio subscription amount: {len(audio_sub)}")
print(f"Audio sample amount: {len(audio_names)}")
# total_duration = 0.
# for p in tqdm(audio_names):
# time_duration = float(os.popen(f'ffprobe -i {p} -show_entries format=duration -v quiet -of csv="p=0" 2>/dev/null').read())
# total_duration += time_duration
random_audio = np.random.choice(audio_names)
season = random_audio.split("/")[-2]
sub_file = "/".join(random_audio.split("/")[:-1]) + "/" + season + "_sub.srt"
audio_num = random_audio.split("/")[-1].split("-")[0]
audio_num
def get_sub(sub_file, audio_num):
return os.popen(f'grep -A 3 -e "^{audio_num}$" {sub_file} | sed 1,2d').read()
get_sub(sub_file, audio_num)
os.system(f"ffmpeg -i {random_audio} -vcodec copy -acodec copy video.mp4")
| false | 0 | 616 | 0 | 651 | 616 |
||
129277991
|
<jupyter_start><jupyter_text>Mushroom Classification
### Context
Although this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as "shrooming") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be?
### Content
This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like "leaflets three, let it be'' for Poisonous Oak and Ivy.
- **Time period**: Donated to UCI ML 27 April 1987
### Inspiration
- What types of machine learning models perform best on this dataset?
- Which features are most indicative of a poisonous mushroom?
Kaggle dataset identifier: mushroom-classification
<jupyter_script># # import libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # reading data
# printing the first 5 values in the dataset after reading it
df = pd.read_csv("/kaggle/input/mushroom-classification/mushrooms.csv")
df.head()
# # check for missing data
df.isna().sum()
# # Exploratory dasta
df.describe().T
# Drop this column because it has only one value
df.drop(columns="veil-type", inplace=True)
# Get the distribution of the labels
labels = df["class"].value_counts()
# Plot the distribution
plt.bar(labels.index, labels.values)
plt.xlabel("Label")
plt.ylabel("Count")
plt.show()
X = df.drop(columns="class")
y = df["class"]
X = pd.get_dummies(X)
y = y.replace({"p": 1, "e": 0})
y
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277991.ipynb
|
mushroom-classification
| null |
[{"Id": 129277991, "ScriptId": 38433036, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10444652, "CreationDate": "05/12/2023 11:48:22", "VersionNumber": 1.0, "Title": "Mushroom Classification", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185177521, "KernelVersionId": 129277991, "SourceDatasetVersionId": 974}]
|
[{"Id": 974, "DatasetId": 478, "DatasourceVersionId": 974, "CreatorUserId": 495305, "LicenseName": "CC0: Public Domain", "CreationDate": "12/01/2016 23:08:00", "VersionNumber": 1.0, "Title": "Mushroom Classification", "Slug": "mushroom-classification", "Subtitle": "Safe to eat or deadly poison?", "Description": "### Context\n\nAlthough this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as \"shrooming\") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be?\n\n### Content \n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like \"leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n- **Time period**: Donated to UCI ML 27 April 1987\n\n### Inspiration\n\n- What types of machine learning models perform best on this dataset?\n\n- Which features are most indicative of a poisonous mushroom?\n\n### Acknowledgements\n\nThis dataset was originally donated to the UCI Machine Learning repository. You can learn more about past research using the data [here][1]. \n\n#[Start a new kernel][2]\n\n\n [1]: https://archive.ics.uci.edu/ml/datasets/Mushroom\n [2]: https://www.kaggle.com/uciml/mushroom-classification/kernels?modal=true", "VersionNotes": "Initial release", "TotalCompressedBytes": 374003.0, "TotalUncompressedBytes": 374003.0}]
|
[{"Id": 478, "CreatorUserId": 495305, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 974.0, "CurrentDatasourceVersionId": 974.0, "ForumId": 2099, "Type": 2, "CreationDate": "12/01/2016 23:08:00", "LastActivityDate": "02/06/2018", "TotalViews": 873597, "TotalDownloads": 114985, "TotalVotes": 2206, "TotalKernels": 1371}]
| null |
# # import libraries
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # reading data
# printing the first 5 values in the dataset after reading it
df = pd.read_csv("/kaggle/input/mushroom-classification/mushrooms.csv")
df.head()
# # check for missing data
df.isna().sum()
# # Exploratory dasta
df.describe().T
# Drop this column because it has only one value
df.drop(columns="veil-type", inplace=True)
# Get the distribution of the labels
labels = df["class"].value_counts()
# Plot the distribution
plt.bar(labels.index, labels.values)
plt.xlabel("Label")
plt.ylabel("Count")
plt.show()
X = df.drop(columns="class")
y = df["class"]
X = pd.get_dummies(X)
y = y.replace({"p": 1, "e": 0})
y
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
clf = RandomForestClassifier()
clf.fit(X_train, y_train)
clf.score(X_test, y_test)
| false | 0 | 521 | 0 | 823 | 521 |
||
129277671
|
<jupyter_start><jupyter_text>House Price dataset of India
This data set was very useful when I worked on a project .As a Kaggle contributer I am uploading for others to gain knowledge by implementing algorithms.
Attributes:
1.Id(int)
2.Date(int)
3.number of bedrooms(int)
4.number of bathrooms(float)-convert it into 'int' ,just to give you some practice!
5.living area(int)
6.lot area(int)
7.water front present(float)-convert it into 'int'
8.number of views(int)
and so on....
#dataset #kaggle #House .
Kaggle dataset identifier: house-price-dataset-of-india
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load the data into a Pandas DataFrame
#
data = pd.read_csv("/kaggle/input/house-price-dataset-of-india/House Price India.csv")
# # Print the first five rows of the DataFrame
#
data.head(10)
# # Remove duplicates
data = data.drop_duplicates()
# # Convert the date str to yy-mm-dd
import datetime
data["Date"] = pd.to_datetime(data["Date"], unit="D", origin=pd.Timestamp("1900-01-01"))
# # organizing data And printing first 5 rows
data = data.sort_values("Date")
data.head(5)
# # Create new columns for day, month, and year
data["Date"] = pd.to_datetime(data["Date"])
data["day"] = data["Date"].dt.day
data["month"] = data["Date"].dt.month
data["year"] = data["Date"].dt.year
# # Print the updated DataFrame
#
print(data)
# # Remove Date column &
# # checking new columns
data = data.drop(columns=["id"])
data.head(5)
# # Create a line chart of the mean price for each month
#
import matplotlib.pyplot as plt
data_monthly = data.groupby(pd.Grouper(key="Date", freq="M")).mean()
fig, ax = plt.subplots(figsize=(8, 5))
plt.plot(data_monthly.index, data_monthly["Price"])
plt.xlabel("Month")
plt.ylabel("Price")
plt.title("Price changes over the month")
plt.show()
# # Plot a scatter plot of bedrooms/bathrooms vs. price
#
fig, ax = plt.subplots(figsize=(8, 5))
ax.scatter(data["number of bedrooms"], data["Price"], color="b", label="Bedrooms")
ax.scatter(data["number of bathrooms"], data["Price"], color="r", label="Bathrooms")
ax.set_xlabel("Number of Bedrooms/Bathrooms")
ax.set_ylabel("Price")
ax.set_title("Relationship between Bedrooms/Bathrooms and Price")
ax.legend()
plt.show()
# # Create a scatter plot of living area vs price
#
plt.scatter(data["living area"], data["Price"])
plt.xlabel("Living Area")
plt.ylabel("Price")
plt.title("Living Area vs Price")
plt.show()
# # Create a bar chart of the mean prices for each number of floors
#
mean_price_by_floors = data.groupby("number of floors")["Price"].mean()
mean_price_by_floors.plot(kind="bar")
plt.xlabel("Number of floors")
plt.ylabel("Mean price")
plt.title("Mean price by number of floors")
plt.show()
# # Create a scatter plot of number of schools nearby vs. price to check is school distance affecting the price
#
plt.scatter(data["Number of schools nearby"], data["Price"])
plt.xlabel("Number of schools nearby")
plt.ylabel("Price")
plt.title("Relationship between number of schools nearby and price")
plt.show()
# # Create a scatter plot of distance from airport vs. price
#
plt.scatter(data["Distance from the airport"], data["Price"])
plt.xlabel("Distance from airport")
plt.ylabel("Price")
plt.title("Relationship between distance from airport and price")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277671.ipynb
|
house-price-dataset-of-india
|
mohamedafsal007
|
[{"Id": 129277671, "ScriptId": 38400942, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11605488, "CreationDate": "05/12/2023 11:45:14", "VersionNumber": 1.0, "Title": "House-Price-D@taset-of-India", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185177033, "KernelVersionId": 129277671, "SourceDatasetVersionId": 5244696}]
|
[{"Id": 5244696, "DatasetId": 3051697, "DatasourceVersionId": 5317410, "CreatorUserId": 11188729, "LicenseName": "Other (specified in description)", "CreationDate": "03/27/2023 06:22:55", "VersionNumber": 1.0, "Title": "House Price dataset of India", "Slug": "house-price-dataset-of-india", "Subtitle": "This data Set contains 23 columns and 14620 Rows(No Null values).", "Description": "This data set was very useful when I worked on a project .As a Kaggle contributer I am uploading for others to gain knowledge by implementing algorithms.\nAttributes:\n1.Id(int)\n2.Date(int)\n3.number of bedrooms(int)\n4.number of bathrooms(float)-convert it into 'int' ,just to give you some practice!\n5.living area(int)\n6.lot area(int)\n7.water front present(float)-convert it into 'int'\n8.number of views(int)\nand so on....\n#dataset #kaggle #House .", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3051697, "CreatorUserId": 11188729, "OwnerUserId": 11188729.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5244696.0, "CurrentDatasourceVersionId": 5317410.0, "ForumId": 3091327, "Type": 2, "CreationDate": "03/27/2023 06:22:55", "LastActivityDate": "03/27/2023", "TotalViews": 49197, "TotalDownloads": 12418, "TotalVotes": 113, "TotalKernels": 15}]
|
[{"Id": 11188729, "UserName": "mohamedafsal007", "DisplayName": "Mohamed Afsal", "RegisterDate": "08/01/2022", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Load the data into a Pandas DataFrame
#
data = pd.read_csv("/kaggle/input/house-price-dataset-of-india/House Price India.csv")
# # Print the first five rows of the DataFrame
#
data.head(10)
# # Remove duplicates
data = data.drop_duplicates()
# # Convert the date str to yy-mm-dd
import datetime
data["Date"] = pd.to_datetime(data["Date"], unit="D", origin=pd.Timestamp("1900-01-01"))
# # organizing data And printing first 5 rows
data = data.sort_values("Date")
data.head(5)
# # Create new columns for day, month, and year
data["Date"] = pd.to_datetime(data["Date"])
data["day"] = data["Date"].dt.day
data["month"] = data["Date"].dt.month
data["year"] = data["Date"].dt.year
# # Print the updated DataFrame
#
print(data)
# # Remove Date column &
# # checking new columns
data = data.drop(columns=["id"])
data.head(5)
# # Create a line chart of the mean price for each month
#
import matplotlib.pyplot as plt
data_monthly = data.groupby(pd.Grouper(key="Date", freq="M")).mean()
fig, ax = plt.subplots(figsize=(8, 5))
plt.plot(data_monthly.index, data_monthly["Price"])
plt.xlabel("Month")
plt.ylabel("Price")
plt.title("Price changes over the month")
plt.show()
# # Plot a scatter plot of bedrooms/bathrooms vs. price
#
fig, ax = plt.subplots(figsize=(8, 5))
ax.scatter(data["number of bedrooms"], data["Price"], color="b", label="Bedrooms")
ax.scatter(data["number of bathrooms"], data["Price"], color="r", label="Bathrooms")
ax.set_xlabel("Number of Bedrooms/Bathrooms")
ax.set_ylabel("Price")
ax.set_title("Relationship between Bedrooms/Bathrooms and Price")
ax.legend()
plt.show()
# # Create a scatter plot of living area vs price
#
plt.scatter(data["living area"], data["Price"])
plt.xlabel("Living Area")
plt.ylabel("Price")
plt.title("Living Area vs Price")
plt.show()
# # Create a bar chart of the mean prices for each number of floors
#
mean_price_by_floors = data.groupby("number of floors")["Price"].mean()
mean_price_by_floors.plot(kind="bar")
plt.xlabel("Number of floors")
plt.ylabel("Mean price")
plt.title("Mean price by number of floors")
plt.show()
# # Create a scatter plot of number of schools nearby vs. price to check is school distance affecting the price
#
plt.scatter(data["Number of schools nearby"], data["Price"])
plt.xlabel("Number of schools nearby")
plt.ylabel("Price")
plt.title("Relationship between number of schools nearby and price")
plt.show()
# # Create a scatter plot of distance from airport vs. price
#
plt.scatter(data["Distance from the airport"], data["Price"])
plt.xlabel("Distance from airport")
plt.ylabel("Price")
plt.title("Relationship between distance from airport and price")
plt.show()
| false | 1 | 1,026 | 3 | 1,194 | 1,026 |
||
129277516
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("dark_background")
df = pd.read_csv("/kaggle/input/bgg-dataset/bgg_dataset.csv", sep=";")
df.head()
df.info()
df.columns
df = df[
[
"Name",
"Year Published",
"Min Players",
"Max Players",
"Play Time",
"Min Age",
"Users Rated",
"Rating Average",
"BGG Rank",
"Complexity Average",
"Owned Users",
"Domains",
]
].rename(
columns={
"Year Published": "year",
"Min Players": "min_players",
"Max Players": "max_players",
"Play Time": "play_time",
"Min Age": "min_age",
"Users Rated": "users_rated",
"Rating Average": "avg_rating",
"BGG Rank": "bgg_rank",
"Complexity Average": "comlexity_avg",
"Owned Users": "owned_users",
"Name": "name",
"Mechanics": "mechanics",
"Domains": "domains",
}
)
df.duplicated().sum()
df.shape
df.dtypes
df.isnull().sum()
# # 1. Year published
df.loc[df["year"].isnull()]
df.drop(df[df["name"] == "Hus"].index, inplace=True)
# Get rid of old games of no interest to us
df.drop(df[df["year"] < 1990].index, inplace=True)
df.shape
df["year"] = df["year"].astype(int)
sns.histplot(df["year"])
# # 2. Minimum number of players
df["min_players"].value_counts()
# For 31 games the minimum number of players is 0. It can't be.
# Replace 0 for 2 (cause 2 players is the most popular number of players)
df.loc[df["min_players"] == 0, "min_players"] = 2
# # 3. Maximum players
df["max_players"].describe()
df.loc[df["max_players"] == 0, "max_players"] = df.apply(
lambda row: row["min_players"] if row["min_players"] > 0 else 0, axis=1
)
# If value in column 'max_players' = 0 and in column 'min_players' != 0, the replace by this value (if 'max_players' = 0 and 'min_players' = 2, then 'max_players' = 2 too, for example)
# # 4. Playing Time
df["play_time"].describe()
play_time_mean = df["play_time"].mean()
round(play_time_mean)
df.loc[df["play_time"] == 0, "play_time"] = round(play_time_mean)
df["play_time"] = df["play_time"].astype(int)
df["play_time"].value_counts().sort_values(ascending=False).head(5)
df["play_time"].value_counts().sort_values(ascending=False).head(5).sum() / len(
df["play_time"]
) * 100
# About ~62% of all games are 30 to 90 minutes long
# # 5. Minimal age
df["min_age"].describe()
sns.boxplot(x=df["min_age"])
df.loc[18931, "min_age"] = 16
# Verified the value and changed to the current, according to BoardgameGeek
# # 6. Users rated
df["users_rated"].describe()
df["users_rated"] = df["users_rated"].astype(int)
# # 7. Average rating
df["avg_rating"] = [float(str(i).replace(",", ".")) for i in df["avg_rating"]]
df["avg_rating"].describe()
sns.histplot(df["avg_rating"], kde=True)
# # 8. Complexity Level
df["comlexity_avg"] = [float(str(i).replace(",", ".")) for i in df["comlexity_avg"]]
df["comlexity_avg"].describe()
complexity_median = round(df["comlexity_avg"].dropna().median(), 2)
df.loc[df["comlexity_avg"] == 0, "comlexity_avg"] = complexity_median
# # 9. Owned Users
df["owned_users"].describe()
df["owned_users"].isnull().sum()
owned_median = df["owned_users"].median()
df.loc[df["owned_users"].isnull(), "owned_users"] = owned_median
df["owned_users"] = df["owned_users"].astype(int)
# # 10. Domains
df["domains"].isnull().sum()
df["domains"].dropna().value_counts().head(5)
fig, ax = plt.subplots()
df["domains"].dropna().value_counts().head(5).plot(
kind="pie", autopct="%.1f%%", ax=ax, textprops={"color": "red"}
)
ax.set_title("Percentage of each genre")
ax.yaxis.label.set_visible(False) # remove y-label
ax.title.set_position([0.5, 1.15]) # меняем положение названия
plt.show()
df.dropna(subset="domains", inplace=True)
df.head(10)
# Average complexity in each genre
df.groupby("domains")["comlexity_avg"].mean()
df.loc[df["domains"] == 2]
df.groupby("domains").agg({"comlexity_avg": ["min", "mean", "max"]}).reset_index()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277516.ipynb
| null | null |
[{"Id": 129277516, "ScriptId": 38372898, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13664064, "CreationDate": "05/12/2023 11:43:25", "VersionNumber": 1.0, "Title": "BoardgameGeek", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 177.0, "LinesInsertedFromPrevious": 177.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("dark_background")
df = pd.read_csv("/kaggle/input/bgg-dataset/bgg_dataset.csv", sep=";")
df.head()
df.info()
df.columns
df = df[
[
"Name",
"Year Published",
"Min Players",
"Max Players",
"Play Time",
"Min Age",
"Users Rated",
"Rating Average",
"BGG Rank",
"Complexity Average",
"Owned Users",
"Domains",
]
].rename(
columns={
"Year Published": "year",
"Min Players": "min_players",
"Max Players": "max_players",
"Play Time": "play_time",
"Min Age": "min_age",
"Users Rated": "users_rated",
"Rating Average": "avg_rating",
"BGG Rank": "bgg_rank",
"Complexity Average": "comlexity_avg",
"Owned Users": "owned_users",
"Name": "name",
"Mechanics": "mechanics",
"Domains": "domains",
}
)
df.duplicated().sum()
df.shape
df.dtypes
df.isnull().sum()
# # 1. Year published
df.loc[df["year"].isnull()]
df.drop(df[df["name"] == "Hus"].index, inplace=True)
# Get rid of old games of no interest to us
df.drop(df[df["year"] < 1990].index, inplace=True)
df.shape
df["year"] = df["year"].astype(int)
sns.histplot(df["year"])
# # 2. Minimum number of players
df["min_players"].value_counts()
# For 31 games the minimum number of players is 0. It can't be.
# Replace 0 for 2 (cause 2 players is the most popular number of players)
df.loc[df["min_players"] == 0, "min_players"] = 2
# # 3. Maximum players
df["max_players"].describe()
df.loc[df["max_players"] == 0, "max_players"] = df.apply(
lambda row: row["min_players"] if row["min_players"] > 0 else 0, axis=1
)
# If value in column 'max_players' = 0 and in column 'min_players' != 0, the replace by this value (if 'max_players' = 0 and 'min_players' = 2, then 'max_players' = 2 too, for example)
# # 4. Playing Time
df["play_time"].describe()
play_time_mean = df["play_time"].mean()
round(play_time_mean)
df.loc[df["play_time"] == 0, "play_time"] = round(play_time_mean)
df["play_time"] = df["play_time"].astype(int)
df["play_time"].value_counts().sort_values(ascending=False).head(5)
df["play_time"].value_counts().sort_values(ascending=False).head(5).sum() / len(
df["play_time"]
) * 100
# About ~62% of all games are 30 to 90 minutes long
# # 5. Minimal age
df["min_age"].describe()
sns.boxplot(x=df["min_age"])
df.loc[18931, "min_age"] = 16
# Verified the value and changed to the current, according to BoardgameGeek
# # 6. Users rated
df["users_rated"].describe()
df["users_rated"] = df["users_rated"].astype(int)
# # 7. Average rating
df["avg_rating"] = [float(str(i).replace(",", ".")) for i in df["avg_rating"]]
df["avg_rating"].describe()
sns.histplot(df["avg_rating"], kde=True)
# # 8. Complexity Level
df["comlexity_avg"] = [float(str(i).replace(",", ".")) for i in df["comlexity_avg"]]
df["comlexity_avg"].describe()
complexity_median = round(df["comlexity_avg"].dropna().median(), 2)
df.loc[df["comlexity_avg"] == 0, "comlexity_avg"] = complexity_median
# # 9. Owned Users
df["owned_users"].describe()
df["owned_users"].isnull().sum()
owned_median = df["owned_users"].median()
df.loc[df["owned_users"].isnull(), "owned_users"] = owned_median
df["owned_users"] = df["owned_users"].astype(int)
# # 10. Domains
df["domains"].isnull().sum()
df["domains"].dropna().value_counts().head(5)
fig, ax = plt.subplots()
df["domains"].dropna().value_counts().head(5).plot(
kind="pie", autopct="%.1f%%", ax=ax, textprops={"color": "red"}
)
ax.set_title("Percentage of each genre")
ax.yaxis.label.set_visible(False) # remove y-label
ax.title.set_position([0.5, 1.15]) # меняем положение названия
plt.show()
df.dropna(subset="domains", inplace=True)
df.head(10)
# Average complexity in each genre
df.groupby("domains")["comlexity_avg"].mean()
df.loc[df["domains"] == 2]
df.groupby("domains").agg({"comlexity_avg": ["min", "mean", "max"]}).reset_index()
| false | 0 | 1,603 | 0 | 1,603 | 1,603 |
||
129277545
|
<jupyter_start><jupyter_text>FIFA 22 complete player dataset
### Context
The datasets provided include the players data for the Career Mode from FIFA 15 to FIFA 22 ("players_22.csv"). The data allows multiple comparisons for the same players across the last 8 version of the videogame.
Some ideas of possible analysis:
- Historical comparison between Messi and Ronaldo (what skill attributes changed the most during time - compared to real-life stats);
- Ideal budget to create a competitive team (at the level of top n teams in Europe) and at which point the budget does not allow to buy significantly better players for the 11-men lineup. An extra is the same comparison with the Potential attribute for the lineup instead of the Overall attribute;
- Sample analysis of top n% players (e.g. top 5% of the player) to see if some important attributes as Agility or BallControl or Strength have been popular or not acroos the FIFA versions. An example would be seeing that the top 5% players of FIFA 20 are faster (higher Acceleration and Agility) compared to FIFA 15. The trend of attributes is also an important indication of how some attributes are necessary for players to win games (a version with more top 5% players with high BallControl stats would indicate that the game is more focused on the technique rather than the physicial aspect).
<br>
### Content
- Every player available in FIFA 15, 16, 17, 18, 19, 20, 21, and also FIFA 22
- 100+ attributes
- URL of the scraped players
- URL of the uploaded player faces, club and nation logos
- Player positions, with the role in the club and in the national team
- Player attributes with statistics as Attacking, Skills, Defense, Mentality, GK Skills, etc.
- Player personal data like Nationality, Club, DateOfBirth, Wage, Salary, etc.
<br>
Updates from previous FIFA 21 dataset are the following:
- Inclusion of FIFA 22 data
- Inclusion of all female players
- Columns reorder - to increase readability
- Removal of duplicate GK attribute fields
- The field *defending marking* has been renamed *defending marking awareness* and includes both the *marking* (old attribute name - up to FIFA 19) and *defensive awareness* values (new attribute name - from FIFA 20)
- All data from FIFA 15 was re-scraped, as one Kaggle user noted in [this discussion](https://www.kaggle.com/stefanoleone992/fifa-21-complete-player-dataset/discussion/232504) that sofifa updated some historical player market values over time
<br>
Kaggle dataset identifier: fifa-22-complete-player-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('fifa-22-complete-player-dataset/players_22.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 19239 entries, 0 to 19238
Columns: 110 entries, sofifa_id to nation_flag_url
dtypes: float64(16), int64(44), object(50)
memory usage: 16.1+ MB
<jupyter_text>Examples:
{
"sofifa_id": 158023,
"player_url": "https://sofifa.com/player/158023/lionel-messi/220002",
"short_name": "L. Messi",
"long_name": "Lionel Andr\u00e9s Messi Cuccittini",
"player_positions": "RW, ST, CF",
"overall": 93,
"potential": 93,
"value_eur": 78000000,
"wage_eur": 320000,
"age": 34,
"dob": "1987-06-24",
"height_cm": 170,
"weight_kg": 72,
"club_team_id": 73,
"club_name": "Paris Saint-Germain",
"league_name": "French Ligue 1",
"league_level": 1,
"club_position": "RW",
"club_jersey_number": 30,
"club_loaned_from": NaN,
"...": "and 90 more columns"
}
{
"sofifa_id": 188545,
"player_url": "https://sofifa.com/player/188545/robert-lewandowski/220002",
"short_name": "R. Lewandowski",
"long_name": "Robert Lewandowski",
"player_positions": "ST",
"overall": 92,
"potential": 92,
"value_eur": 119500000,
"wage_eur": 270000,
"age": 32,
"dob": "1988-08-21",
"height_cm": 185,
"weight_kg": 81,
"club_team_id": 21,
"club_name": "FC Bayern M\u00fcnchen",
"league_name": "German 1. Bundesliga",
"league_level": 1,
"club_position": "ST",
"club_jersey_number": 9,
"club_loaned_from": NaN,
"...": "and 90 more columns"
}
{
"sofifa_id": 20801,
"player_url": "https://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/220002",
"short_name": "Cristiano Ronaldo",
"long_name": "Cristiano Ronaldo dos Santos Aveiro",
"player_positions": "ST, LW",
"overall": 91,
"potential": 91,
"value_eur": 45000000,
"wage_eur": 270000,
"age": 36,
"dob": "1985-02-05",
"height_cm": 187,
"weight_kg": 83,
"club_team_id": 11,
"club_name": "Manchester United",
"league_name": "English Premier League",
"league_level": 1,
"club_position": "ST",
"club_jersey_number": 7,
"club_loaned_from": NaN,
"...": "and 90 more columns"
}
{
"sofifa_id": 190871,
"player_url": "https://sofifa.com/player/190871/neymar-da-silva-santos-jr/220002",
"short_name": "Neymar Jr",
"long_name": "Neymar da Silva Santos J\u00fanior",
"player_positions": "LW, CAM",
"overall": 91,
"potential": 91,
"value_eur": 129000000,
"wage_eur": 270000,
"age": 29,
"dob": "1992-02-05",
"height_cm": 175,
"weight_kg": 68,
"club_team_id": 73,
"club_name": "Paris Saint-Germain",
"league_name": "French Ligue 1",
"league_level": 1,
"club_position": "LW",
"club_jersey_number": 10,
"club_loaned_from": NaN,
"...": "and 90 more columns"
}
<jupyter_script># # **Import Libraries**
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
# # **Read Dataset**
df = pd.read_csv(
"/kaggle/input/fifa-22-complete-player-dataset/players_22.csv", low_memory=False
)
df.head(5)
# ## Data Preprocesing 1
# ### Delete columns that include url
df = df[df.columns.drop(list(df.filter(regex="url")))]
df.shape
df.dtypes
# ### Will see what columns have more that 50% missing values so we can drop it
cols_to_drop = []
for i in df.columns:
missing = np.abs((df[i].count() - df[i].shape[0]) / df[i].shape[0] * 100)
if missing > 50:
print("{} - {}%".format(i, round(missing)))
cols_to_drop.append(i)
# ### **Columns that we might drop:**
# * club_loaned_from,nation_team_id,nation_position,nation_jersey_number,player_tags,player_traits.goalkeeping_speed
df.drop(columns=cols_to_drop, inplace=True)
print(df.shape)
df.rename(columns={"skill_moves": "skills"}, inplace=True)
filter = [
"sofifa_id",
"skill_",
"movement_",
"defending_",
"goalkeeping_",
"attacking_",
"power_",
"mentality_",
]
for i in filter:
df = df[df.columns.drop(list(df.filter(regex=i)))]
df.shape
df.columns
df1 = df[
[
"short_name",
"age",
"height_cm",
"weight_kg",
"nationality_name",
"club_name",
"overall",
"potential",
"league_name",
"league_level",
"value_eur",
"wage_eur",
"player_positions",
"preferred_foot",
"international_reputation",
"skills",
"work_rate",
"pace",
"shooting",
"passing",
"dribbling",
"defending",
"physic",
]
]
df1.info()
df1.isnull().sum()
# ## Exploratory data analysis
player_positions = df1["player_positions"].value_counts().head(20)
player_positions
plt.figure(figsize=(10, 6))
sns.barplot(
x=player_positions.index, y=player_positions.values, palette="plasma", capsize=0.2
)
plt.title("Most Common Player Positions")
plt.xticks(rotation=45)
plt.show()
plt.figure(figsize=(10, 6))
plt.hist(x=df1.wage_eur, bins=10)
plt.xticks(rotation=45)
plt.show()
country_players = df1["nationality_name"].value_counts().head(10)
country_players
plt.figure(figsize=(8, 6))
sns.barplot(
x=country_players.index, y=country_players.values, palette="plasma", capsize=0.2
)
plt.title("Number Players per country")
plt.xticks(rotation=45)
plt.show()
hg_skills = df1[df1.skills == 5]
hg_skills["nationality_name"].value_counts()
# ### Relationship between skills and Wages
# Relationship between skills and Wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="skills", y="wage_eur")
plt.xlabel("skills")
plt.ylabel("Wage in EUR")
plt.title("skill_moves & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between international_reputation and wages
# Relationship between international_reputation and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="international_reputation", y="wage_eur")
plt.xlabel("International Reputation")
plt.ylabel("Wage in EUR")
plt.title("reputation & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between potential and wages
# Relationship between potential and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="potential", y="wage_eur")
plt.xlabel("Potential")
plt.ylabel("Wage in EUR")
plt.title("potential & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between overall and wages
# Relationship between overall and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df, x="overall", y="wage_eur")
plt.xlabel("Overall")
plt.ylabel("Wage in EUR")
plt.title("overall & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between age and wages
# Relationship between age and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df, x="age", y="wage_eur")
plt.xlabel("Age")
plt.ylabel("Wage in EUR")
plt.title("age & wages in EUR", fontsize=16)
plt.show()
df1.columns
df_x = df[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"potential",
"overall",
]
]
plt.figure(figsize=(9, 9))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 3
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["physic"], y=df_x[i])
plt.xlabel("physic")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
df_x = df[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"physic",
"potential",
"overall",
]
]
plt.figure(figsize=(15, 15))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["height_cm"], y=df_x[i])
plt.xlabel("height_cm")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
df_x = df1[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"physic",
"potential",
"overall",
]
]
plt.figure(figsize=(15, 15))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["weight_kg"], y=df_x[i])
plt.xlabel("weight_kg")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
# ### Age distribution
plt.figure(figsize=(8, 6))
sns.barplot(
x=df1.age.value_counts().index,
y=df1.age.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Age Distribution")
plt.show()
plt.figure(figsize=(16, 6))
sns.barplot(
x=df1.league_name.value_counts().index,
y=df1.league_name.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.show()
df_x = df1[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"potential",
"overall",
]
]
plt.figure(figsize=(10, 10))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
sns.regplot(x=df["age"], y=df_x[i])
plt.xlabel("age")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
# ### Overall score of the players
# Overall score of the players
df1.sort_values(by="overall", ascending=False)[["short_name", "overall", "age"]].head(
20
)
# L. Messi, R. Lewandowski, Cristiano Ronaldo, Neymar Jr, K. De Bruyne,J. Oblak and K. Mbappé has highest overall score than the rest of the players.
# ### Overall score of the players
# Overall score of the players
# We filter players under or 25
young_players = df1[df1["age"] <= 25]
sorted_players = young_players.sort_values(by="potential", ascending=False)
potential = sorted_players[["short_name", "potential", "age"]].head(20)
potential
# K.Mbappé, E.Haaland, G. Donnarumma, G. Donnarumma and T. Alexander-Arnold are the players unders or 25 with the highest potential
top_players = df1.sort_values(by="overall", ascending=False).head(30)
top_players
# ### Age distribution of top players
plt.figure(figsize=(8, 6))
sns.barplot(
x=top_players.age.value_counts().index,
y=top_players.age.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Age Distribution")
plt.show()
# ### League distribution of top players
plt.figure(figsize=(6, 6))
sns.barplot(
x=top_players.league_name.value_counts().index,
y=top_players.league_name.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("League distribution of top players")
plt.show()
# ### Wage distribution of top players
plt.figure(figsize=(6, 6))
sns.barplot(
x=top_players.wage_eur.value_counts().index,
y=top_players.wage_eur.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Wage distribution of top players")
plt.show()
print("Top 30 players")
x = ["overall", "potential", "skills", "wage_eur", "pace", "physic"]
for i in x:
print("Mean {} : {}".format(i, top_players[i].mean()))
plt.figure(figsize=(15, 15))
x = ["overall", "potential", "wage_eur", "pace", "physic", "skills"]
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in x:
plt.subplot(height, width, index)
sns.histplot(x=top_players[i], kde=True)
plt.xlabel(i)
plt.xticks(rotation=45)
index = index + 1
# ## Data Preprocesing 2
# #### **After seeing that we have a lot of unique player_positions if a player has 'RW, ST, CF' we are gonna assum that the player position is 'RW'**
df1["player_positions"] = df1["player_positions"].apply(
lambda x: x.split(",")[0].strip()
)
unique_positions = df1["player_positions"].unique()
print(unique_positions)
# #### **As we can see, the columnn league_level will be used instead of league_name and club_name**
df1 = df1.drop(columns=["nationality_name", "club_name", "league_name", "short_name"])
df1[df1.league_level == 1].head(5)
df1.info()
# #### Gonna check missing values and how to impute those values
missing_percentage = (df1.isnull().sum() / len(df1)) * 100
print(missing_percentage)
# #### We are gonna preprocess the preffered_foot using one-hot encoder
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
encoded_data = encoder.fit_transform(df1[["preferred_foot"]])
encoded_df = pd.DataFrame(encoded_data, columns=encoder.categories_[0])
data_encoded = pd.concat([df1, encoded_df], axis=1)
data_encoded
# #### We are gonna use label encoder for work_rate and player_positions label_encoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in data_encoded.select_dtypes(["object"]):
data_encoded[i] = le.fit_transform(data_encoded[i])
data_encoded.info()
# #### We will use KNNImputer to impute the missing values in our dataset
from sklearn.impute import KNNImputer
from sklearn.metrics import mean_squared_error, mean_absolute_error
columns_with_missing_values = data_encoded.columns[data_encoded.isnull().any()].tolist()
columns_with_missing_values
df_imputed = data_encoded.copy()
imputation_data = df_imputed[columns_with_missing_values].copy()
imputer = KNNImputer(n_neighbors=6)
imputed_data = imputer.fit_transform(imputation_data)
df_imputed[columns_with_missing_values] = imputed_data
missing_percentage = (df_imputed.isnull().sum() / len(df_imputed)) * 100
print(missing_percentage)
# **We are gonna convert the float columns (value_eur,wage_eur,league_level,pace,shooting...)**
float_columns = df_imputed.select_dtypes(include=["float"]).columns
df_imputed[float_columns] = df_imputed[float_columns].astype(int)
df_imputed
# #### We use a heatmap to see the correlations between features
plt.figure(figsize=(18, 10))
sns.heatmap(df_imputed.corr())
# ## Prediction using Linear Regression
# #### We will use RFECV to collect the optimal features to train our model
# We eliminate the potential since we can see that these are 2 highly dependent variables
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
X = df_imputed.drop(columns=["overall", "potential"])
y = df_imputed["overall"]
model = LinearRegression()
rfecv = RFECV(estimator=model, scoring="neg_mean_squared_error")
X_selected = rfecv.fit_transform(X, y)
print("Optimal number of features: {}".format(rfecv.n_features_))
selected_features = X.columns[rfecv.support_]
print("Selected features:")
print(selected_features)
# **We make the train and test datasets**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, r2_score
pipeline = Pipeline(
[("standardscaler", StandardScaler()), ("linearregression", LinearRegression())]
)
pipeline.fit(X_train, y_train)
y_pred_test = pipeline.predict(X_test)
mse_test = mean_squared_error(y_test, y_pred_test)
rmse_test = np.sqrt(mse_test)
r2_test = r2_score(y_test, y_pred_test)
print("MSE test:", mse_test)
print("RMSE test:", rmse_test)
print("R-squared test:", r2_test)
print("----------------------------")
y_pred = pipeline.predict(X)
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
r2 = r2_score(y, y_pred)
print("MSE all:", mse)
print("RMSE all:", rmse)
print("R-squared all:", r2)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
rf = RandomForestRegressor(random_state=42)
rf.fit(X_train, y_train)
y_pred_test_rfg = rf.predict(X_test)
mse_test = mean_squared_error(y_test, y_pred_test_rfg)
rmse_test = np.sqrt(mse_test)
r2_test = r2_score(y_test, y_pred_test_rfg)
y_pred_rfg = rf.predict(X)
mse_all = mean_squared_error(y, y_pred_rfg)
rmse_all = np.sqrt(mse_all)
r2_all = r2_score(y, y_pred_rfg)
print("MSE test:", mse_test)
print("RMSE test:", rmse_test)
print("R-squared test:", r2_test)
print("---------------------------")
print("MSE all:", mse_all)
print("RMSE all:", rmse_all)
print("R-squared all:", r2_all)
# #### Real vs Predicted
# We plot the scatter for the linear regression model
data1 = pd.DataFrame({"Real": y_test, "Predicted": y_pred_test})
sns.scatterplot(data=data1, x="Real", y="Predicted")
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "r--", lw=3)
plt.xlabel("Real")
plt.ylabel("Predicted")
plt.show()
# We plot the scatter for the random forest regresson
data2 = pd.DataFrame({"Real": y_test, "Predicted": y_pred_test_rfg})
sns.scatterplot(data=data2, x="Real", y="Predicted")
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "r--", lw=3)
plt.xlabel("Real")
plt.ylabel("Predicted")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277545.ipynb
|
fifa-22-complete-player-dataset
|
stefanoleone992
|
[{"Id": 129277545, "ScriptId": 38435920, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13148010, "CreationDate": "05/12/2023 11:43:42", "VersionNumber": 1.0, "Title": "\ud83c\udfae\ud83c\udf0d FIFA 22 Player Overall Predictions \ud83c\udf1f\u26bd", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 542.0, "LinesInsertedFromPrevious": 542.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185176718, "KernelVersionId": 129277545, "SourceDatasetVersionId": 2767201}]
|
[{"Id": 2767201, "DatasetId": 1617785, "DatasourceVersionId": 2812748, "CreatorUserId": 1852232, "LicenseName": "CC0: Public Domain", "CreationDate": "11/01/2021 22:25:02", "VersionNumber": 3.0, "Title": "FIFA 22 complete player dataset", "Slug": "fifa-22-complete-player-dataset", "Subtitle": "19k+ players, 100+ attributes extracted from the latest edition of FIFA", "Description": "### Context\n\nThe datasets provided include the players data for the Career Mode from FIFA 15 to FIFA 22 (\"players_22.csv\"). The data allows multiple comparisons for the same players across the last 8 version of the videogame.\n\nSome ideas of possible analysis:\n\n- Historical comparison between Messi and Ronaldo (what skill attributes changed the most during time - compared to real-life stats);\n\n- Ideal budget to create a competitive team (at the level of top n teams in Europe) and at which point the budget does not allow to buy significantly better players for the 11-men lineup. An extra is the same comparison with the Potential attribute for the lineup instead of the Overall attribute;\n\n- Sample analysis of top n% players (e.g. top 5% of the player) to see if some important attributes as Agility or BallControl or Strength have been popular or not acroos the FIFA versions. An example would be seeing that the top 5% players of FIFA 20 are faster (higher Acceleration and Agility) compared to FIFA 15. The trend of attributes is also an important indication of how some attributes are necessary for players to win games (a version with more top 5% players with high BallControl stats would indicate that the game is more focused on the technique rather than the physicial aspect).\n\n<br>\n\n### Content\n\n- Every player available in FIFA 15, 16, 17, 18, 19, 20, 21, and also FIFA 22\n\n- 100+ attributes\n\n- URL of the scraped players\n\n- URL of the uploaded player faces, club and nation logos\n\n- Player positions, with the role in the club and in the national team\n\n- Player attributes with statistics as Attacking, Skills, Defense, Mentality, GK Skills, etc.\n\n- Player personal data like Nationality, Club, DateOfBirth, Wage, Salary, etc.\n\n<br>\n\nUpdates from previous FIFA 21 dataset are the following:\n\n- Inclusion of FIFA 22 data\n\n- Inclusion of all female players\n\n- Columns reorder - to increase readability\n\n- Removal of duplicate GK attribute fields\n\n- The field *defending marking* has been renamed *defending marking awareness* and includes both the *marking* (old attribute name - up to FIFA 19) and *defensive awareness* values (new attribute name - from FIFA 20)\n\n- All data from FIFA 15 was re-scraped, as one Kaggle user noted in [this discussion](https://www.kaggle.com/stefanoleone992/fifa-21-complete-player-dataset/discussion/232504) that sofifa updated some historical player market values over time\n\n<br>\n\n### Acknowledgements\n\nData has been scraped from the publicly available website [sofifa.com](https://sofifa.com).", "VersionNotes": "Data Update 2021-11-01 - Including female players", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1617785, "CreatorUserId": 1852232, "OwnerUserId": 1852232.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2767201.0, "CurrentDatasourceVersionId": 2812748.0, "ForumId": 1638308, "Type": 2, "CreationDate": "09/28/2021 17:38:58", "LastActivityDate": "09/28/2021", "TotalViews": 129181, "TotalDownloads": 20956, "TotalVotes": 339, "TotalKernels": 31}]
|
[{"Id": 1852232, "UserName": "stefanoleone992", "DisplayName": "Stefano Leone", "RegisterDate": "04/24/2018", "PerformanceTier": 2}]
|
# # **Import Libraries**
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# data analysis and wrangling
import pandas as pd
import numpy as np
import random as rnd
# visualization
import seaborn as sns
import matplotlib.pyplot as plt
# machine learning
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeClassifier
# # **Read Dataset**
df = pd.read_csv(
"/kaggle/input/fifa-22-complete-player-dataset/players_22.csv", low_memory=False
)
df.head(5)
# ## Data Preprocesing 1
# ### Delete columns that include url
df = df[df.columns.drop(list(df.filter(regex="url")))]
df.shape
df.dtypes
# ### Will see what columns have more that 50% missing values so we can drop it
cols_to_drop = []
for i in df.columns:
missing = np.abs((df[i].count() - df[i].shape[0]) / df[i].shape[0] * 100)
if missing > 50:
print("{} - {}%".format(i, round(missing)))
cols_to_drop.append(i)
# ### **Columns that we might drop:**
# * club_loaned_from,nation_team_id,nation_position,nation_jersey_number,player_tags,player_traits.goalkeeping_speed
df.drop(columns=cols_to_drop, inplace=True)
print(df.shape)
df.rename(columns={"skill_moves": "skills"}, inplace=True)
filter = [
"sofifa_id",
"skill_",
"movement_",
"defending_",
"goalkeeping_",
"attacking_",
"power_",
"mentality_",
]
for i in filter:
df = df[df.columns.drop(list(df.filter(regex=i)))]
df.shape
df.columns
df1 = df[
[
"short_name",
"age",
"height_cm",
"weight_kg",
"nationality_name",
"club_name",
"overall",
"potential",
"league_name",
"league_level",
"value_eur",
"wage_eur",
"player_positions",
"preferred_foot",
"international_reputation",
"skills",
"work_rate",
"pace",
"shooting",
"passing",
"dribbling",
"defending",
"physic",
]
]
df1.info()
df1.isnull().sum()
# ## Exploratory data analysis
player_positions = df1["player_positions"].value_counts().head(20)
player_positions
plt.figure(figsize=(10, 6))
sns.barplot(
x=player_positions.index, y=player_positions.values, palette="plasma", capsize=0.2
)
plt.title("Most Common Player Positions")
plt.xticks(rotation=45)
plt.show()
plt.figure(figsize=(10, 6))
plt.hist(x=df1.wage_eur, bins=10)
plt.xticks(rotation=45)
plt.show()
country_players = df1["nationality_name"].value_counts().head(10)
country_players
plt.figure(figsize=(8, 6))
sns.barplot(
x=country_players.index, y=country_players.values, palette="plasma", capsize=0.2
)
plt.title("Number Players per country")
plt.xticks(rotation=45)
plt.show()
hg_skills = df1[df1.skills == 5]
hg_skills["nationality_name"].value_counts()
# ### Relationship between skills and Wages
# Relationship between skills and Wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="skills", y="wage_eur")
plt.xlabel("skills")
plt.ylabel("Wage in EUR")
plt.title("skill_moves & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between international_reputation and wages
# Relationship between international_reputation and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="international_reputation", y="wage_eur")
plt.xlabel("International Reputation")
plt.ylabel("Wage in EUR")
plt.title("reputation & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between potential and wages
# Relationship between potential and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df1, x="potential", y="wage_eur")
plt.xlabel("Potential")
plt.ylabel("Wage in EUR")
plt.title("potential & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between overall and wages
# Relationship between overall and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df, x="overall", y="wage_eur")
plt.xlabel("Overall")
plt.ylabel("Wage in EUR")
plt.title("overall & wages in EUR", fontsize=16)
plt.show()
# ### Relationship between age and wages
# Relationship between age and wages
fig, ax = plt.subplots(figsize=(8, 5))
plt.scatter(data=df, x="age", y="wage_eur")
plt.xlabel("Age")
plt.ylabel("Wage in EUR")
plt.title("age & wages in EUR", fontsize=16)
plt.show()
df1.columns
df_x = df[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"potential",
"overall",
]
]
plt.figure(figsize=(9, 9))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 3
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["physic"], y=df_x[i])
plt.xlabel("physic")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
df_x = df[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"physic",
"potential",
"overall",
]
]
plt.figure(figsize=(15, 15))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["height_cm"], y=df_x[i])
plt.xlabel("height_cm")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
df_x = df1[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"physic",
"potential",
"overall",
]
]
plt.figure(figsize=(15, 15))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
plt.scatter(x=df["weight_kg"], y=df_x[i])
plt.xlabel("weight_kg")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
# ### Age distribution
plt.figure(figsize=(8, 6))
sns.barplot(
x=df1.age.value_counts().index,
y=df1.age.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Age Distribution")
plt.show()
plt.figure(figsize=(16, 6))
sns.barplot(
x=df1.league_name.value_counts().index,
y=df1.league_name.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.show()
df_x = df1[
[
"shooting",
"defending",
"passing",
"dribbling",
"pace",
"wage_eur",
"potential",
"overall",
]
]
plt.figure(figsize=(10, 10))
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in df_x.columns:
plt.subplot(height, width, index)
sns.regplot(x=df["age"], y=df_x[i])
plt.xlabel("age")
plt.ylabel(i)
plt.xticks(rotation=45)
index = index + 1
# ### Overall score of the players
# Overall score of the players
df1.sort_values(by="overall", ascending=False)[["short_name", "overall", "age"]].head(
20
)
# L. Messi, R. Lewandowski, Cristiano Ronaldo, Neymar Jr, K. De Bruyne,J. Oblak and K. Mbappé has highest overall score than the rest of the players.
# ### Overall score of the players
# Overall score of the players
# We filter players under or 25
young_players = df1[df1["age"] <= 25]
sorted_players = young_players.sort_values(by="potential", ascending=False)
potential = sorted_players[["short_name", "potential", "age"]].head(20)
potential
# K.Mbappé, E.Haaland, G. Donnarumma, G. Donnarumma and T. Alexander-Arnold are the players unders or 25 with the highest potential
top_players = df1.sort_values(by="overall", ascending=False).head(30)
top_players
# ### Age distribution of top players
plt.figure(figsize=(8, 6))
sns.barplot(
x=top_players.age.value_counts().index,
y=top_players.age.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Age Distribution")
plt.show()
# ### League distribution of top players
plt.figure(figsize=(6, 6))
sns.barplot(
x=top_players.league_name.value_counts().index,
y=top_players.league_name.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("League distribution of top players")
plt.show()
# ### Wage distribution of top players
plt.figure(figsize=(6, 6))
sns.barplot(
x=top_players.wage_eur.value_counts().index,
y=top_players.wage_eur.value_counts().values,
palette="plasma",
capsize=0.2,
)
plt.xticks(fontsize=15, rotation=90)
plt.yticks(fontsize=15)
plt.title("Wage distribution of top players")
plt.show()
print("Top 30 players")
x = ["overall", "potential", "skills", "wage_eur", "pace", "physic"]
for i in x:
print("Mean {} : {}".format(i, top_players[i].mean()))
plt.figure(figsize=(15, 15))
x = ["overall", "potential", "wage_eur", "pace", "physic", "skills"]
plt.subplots_adjust(left=0.1, bottom=0.1, right=0.9, top=0.9, wspace=0.4, hspace=0.4)
width = 3
height = 4
index = 1
for i in x:
plt.subplot(height, width, index)
sns.histplot(x=top_players[i], kde=True)
plt.xlabel(i)
plt.xticks(rotation=45)
index = index + 1
# ## Data Preprocesing 2
# #### **After seeing that we have a lot of unique player_positions if a player has 'RW, ST, CF' we are gonna assum that the player position is 'RW'**
df1["player_positions"] = df1["player_positions"].apply(
lambda x: x.split(",")[0].strip()
)
unique_positions = df1["player_positions"].unique()
print(unique_positions)
# #### **As we can see, the columnn league_level will be used instead of league_name and club_name**
df1 = df1.drop(columns=["nationality_name", "club_name", "league_name", "short_name"])
df1[df1.league_level == 1].head(5)
df1.info()
# #### Gonna check missing values and how to impute those values
missing_percentage = (df1.isnull().sum() / len(df1)) * 100
print(missing_percentage)
# #### We are gonna preprocess the preffered_foot using one-hot encoder
from sklearn.preprocessing import OneHotEncoder
encoder = OneHotEncoder(sparse=False)
encoded_data = encoder.fit_transform(df1[["preferred_foot"]])
encoded_df = pd.DataFrame(encoded_data, columns=encoder.categories_[0])
data_encoded = pd.concat([df1, encoded_df], axis=1)
data_encoded
# #### We are gonna use label encoder for work_rate and player_positions label_encoder
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
for i in data_encoded.select_dtypes(["object"]):
data_encoded[i] = le.fit_transform(data_encoded[i])
data_encoded.info()
# #### We will use KNNImputer to impute the missing values in our dataset
from sklearn.impute import KNNImputer
from sklearn.metrics import mean_squared_error, mean_absolute_error
columns_with_missing_values = data_encoded.columns[data_encoded.isnull().any()].tolist()
columns_with_missing_values
df_imputed = data_encoded.copy()
imputation_data = df_imputed[columns_with_missing_values].copy()
imputer = KNNImputer(n_neighbors=6)
imputed_data = imputer.fit_transform(imputation_data)
df_imputed[columns_with_missing_values] = imputed_data
missing_percentage = (df_imputed.isnull().sum() / len(df_imputed)) * 100
print(missing_percentage)
# **We are gonna convert the float columns (value_eur,wage_eur,league_level,pace,shooting...)**
float_columns = df_imputed.select_dtypes(include=["float"]).columns
df_imputed[float_columns] = df_imputed[float_columns].astype(int)
df_imputed
# #### We use a heatmap to see the correlations between features
plt.figure(figsize=(18, 10))
sns.heatmap(df_imputed.corr())
# ## Prediction using Linear Regression
# #### We will use RFECV to collect the optimal features to train our model
# We eliminate the potential since we can see that these are 2 highly dependent variables
from sklearn.feature_selection import RFECV
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
X = df_imputed.drop(columns=["overall", "potential"])
y = df_imputed["overall"]
model = LinearRegression()
rfecv = RFECV(estimator=model, scoring="neg_mean_squared_error")
X_selected = rfecv.fit_transform(X, y)
print("Optimal number of features: {}".format(rfecv.n_features_))
selected_features = X.columns[rfecv.support_]
print("Selected features:")
print(selected_features)
# **We make the train and test datasets**
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.pipeline import Pipeline
from sklearn.metrics import mean_squared_error, r2_score
pipeline = Pipeline(
[("standardscaler", StandardScaler()), ("linearregression", LinearRegression())]
)
pipeline.fit(X_train, y_train)
y_pred_test = pipeline.predict(X_test)
mse_test = mean_squared_error(y_test, y_pred_test)
rmse_test = np.sqrt(mse_test)
r2_test = r2_score(y_test, y_pred_test)
print("MSE test:", mse_test)
print("RMSE test:", rmse_test)
print("R-squared test:", r2_test)
print("----------------------------")
y_pred = pipeline.predict(X)
mse = mean_squared_error(y, y_pred)
rmse = np.sqrt(mse)
r2 = r2_score(y, y_pred)
print("MSE all:", mse)
print("RMSE all:", rmse)
print("R-squared all:", r2)
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error, r2_score
rf = RandomForestRegressor(random_state=42)
rf.fit(X_train, y_train)
y_pred_test_rfg = rf.predict(X_test)
mse_test = mean_squared_error(y_test, y_pred_test_rfg)
rmse_test = np.sqrt(mse_test)
r2_test = r2_score(y_test, y_pred_test_rfg)
y_pred_rfg = rf.predict(X)
mse_all = mean_squared_error(y, y_pred_rfg)
rmse_all = np.sqrt(mse_all)
r2_all = r2_score(y, y_pred_rfg)
print("MSE test:", mse_test)
print("RMSE test:", rmse_test)
print("R-squared test:", r2_test)
print("---------------------------")
print("MSE all:", mse_all)
print("RMSE all:", rmse_all)
print("R-squared all:", r2_all)
# #### Real vs Predicted
# We plot the scatter for the linear regression model
data1 = pd.DataFrame({"Real": y_test, "Predicted": y_pred_test})
sns.scatterplot(data=data1, x="Real", y="Predicted")
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "r--", lw=3)
plt.xlabel("Real")
plt.ylabel("Predicted")
plt.show()
# We plot the scatter for the random forest regresson
data2 = pd.DataFrame({"Real": y_test, "Predicted": y_pred_test_rfg})
sns.scatterplot(data=data2, x="Real", y="Predicted")
plt.plot([y_test.min(), y_test.max()], [y_test.min(), y_test.max()], "r--", lw=3)
plt.xlabel("Real")
plt.ylabel("Predicted")
plt.show()
|
[{"fifa-22-complete-player-dataset/players_22.csv": {"column_names": "[\"sofifa_id\", \"player_url\", \"short_name\", \"long_name\", \"player_positions\", \"overall\", \"potential\", \"value_eur\", \"wage_eur\", \"age\", \"dob\", \"height_cm\", \"weight_kg\", \"club_team_id\", \"club_name\", \"league_name\", \"league_level\", \"club_position\", \"club_jersey_number\", \"club_loaned_from\", \"club_joined\", \"club_contract_valid_until\", \"nationality_id\", \"nationality_name\", \"nation_team_id\", \"nation_position\", \"nation_jersey_number\", \"preferred_foot\", \"weak_foot\", \"skill_moves\", \"international_reputation\", \"work_rate\", \"body_type\", \"real_face\", \"release_clause_eur\", \"player_tags\", \"player_traits\", \"pace\", \"shooting\", \"passing\", \"dribbling\", \"defending\", \"physic\", \"attacking_crossing\", \"attacking_finishing\", \"attacking_heading_accuracy\", \"attacking_short_passing\", \"attacking_volleys\", \"skill_dribbling\", \"skill_curve\", \"skill_fk_accuracy\", \"skill_long_passing\", \"skill_ball_control\", \"movement_acceleration\", \"movement_sprint_speed\", \"movement_agility\", \"movement_reactions\", \"movement_balance\", \"power_shot_power\", \"power_jumping\", \"power_stamina\", \"power_strength\", \"power_long_shots\", \"mentality_aggression\", \"mentality_interceptions\", \"mentality_positioning\", \"mentality_vision\", \"mentality_penalties\", \"mentality_composure\", \"defending_marking_awareness\", \"defending_standing_tackle\", \"defending_sliding_tackle\", \"goalkeeping_diving\", \"goalkeeping_handling\", \"goalkeeping_kicking\", \"goalkeeping_positioning\", \"goalkeeping_reflexes\", \"goalkeeping_speed\", \"ls\", \"st\", \"rs\", \"lw\", \"lf\", \"cf\", \"rf\", \"rw\", \"lam\", \"cam\", \"ram\", \"lm\", \"lcm\", \"cm\", \"rcm\", \"rm\", \"lwb\", \"ldm\", \"cdm\", \"rdm\", \"rwb\", \"lb\", \"lcb\", \"cb\", \"rcb\", \"rb\", \"gk\", \"player_face_url\", \"club_logo_url\", \"club_flag_url\", \"nation_logo_url\", \"nation_flag_url\"]", "column_data_types": "{\"sofifa_id\": \"int64\", \"player_url\": \"object\", \"short_name\": \"object\", \"long_name\": \"object\", \"player_positions\": \"object\", \"overall\": \"int64\", \"potential\": \"int64\", \"value_eur\": \"float64\", \"wage_eur\": \"float64\", \"age\": \"int64\", \"dob\": \"object\", \"height_cm\": \"int64\", \"weight_kg\": \"int64\", \"club_team_id\": \"float64\", \"club_name\": \"object\", \"league_name\": \"object\", \"league_level\": \"float64\", \"club_position\": \"object\", \"club_jersey_number\": \"float64\", \"club_loaned_from\": \"object\", \"club_joined\": \"object\", \"club_contract_valid_until\": \"float64\", \"nationality_id\": \"int64\", \"nationality_name\": \"object\", \"nation_team_id\": \"float64\", \"nation_position\": \"object\", \"nation_jersey_number\": \"float64\", \"preferred_foot\": \"object\", \"weak_foot\": \"int64\", \"skill_moves\": \"int64\", \"international_reputation\": \"int64\", \"work_rate\": \"object\", \"body_type\": \"object\", \"real_face\": \"object\", \"release_clause_eur\": \"float64\", \"player_tags\": \"object\", \"player_traits\": \"object\", \"pace\": \"float64\", \"shooting\": \"float64\", \"passing\": \"float64\", \"dribbling\": \"float64\", \"defending\": \"float64\", \"physic\": \"float64\", \"attacking_crossing\": \"int64\", \"attacking_finishing\": \"int64\", \"attacking_heading_accuracy\": \"int64\", \"attacking_short_passing\": \"int64\", \"attacking_volleys\": \"int64\", \"skill_dribbling\": \"int64\", \"skill_curve\": \"int64\", \"skill_fk_accuracy\": \"int64\", \"skill_long_passing\": \"int64\", \"skill_ball_control\": \"int64\", \"movement_acceleration\": \"int64\", \"movement_sprint_speed\": \"int64\", \"movement_agility\": \"int64\", \"movement_reactions\": \"int64\", \"movement_balance\": \"int64\", \"power_shot_power\": \"int64\", \"power_jumping\": \"int64\", \"power_stamina\": \"int64\", \"power_strength\": \"int64\", \"power_long_shots\": \"int64\", \"mentality_aggression\": \"int64\", \"mentality_interceptions\": \"int64\", \"mentality_positioning\": \"int64\", \"mentality_vision\": \"int64\", \"mentality_penalties\": \"int64\", \"mentality_composure\": \"int64\", \"defending_marking_awareness\": \"int64\", \"defending_standing_tackle\": \"int64\", \"defending_sliding_tackle\": \"int64\", \"goalkeeping_diving\": \"int64\", \"goalkeeping_handling\": \"int64\", \"goalkeeping_kicking\": \"int64\", \"goalkeeping_positioning\": \"int64\", \"goalkeeping_reflexes\": \"int64\", \"goalkeeping_speed\": \"float64\", \"ls\": \"object\", \"st\": \"object\", \"rs\": \"object\", \"lw\": \"object\", \"lf\": \"object\", \"cf\": \"object\", \"rf\": \"object\", \"rw\": \"object\", \"lam\": \"object\", \"cam\": \"object\", \"ram\": \"object\", \"lm\": \"object\", \"lcm\": \"object\", \"cm\": \"object\", \"rcm\": \"object\", \"rm\": \"object\", \"lwb\": \"object\", \"ldm\": \"object\", \"cdm\": \"object\", \"rdm\": \"object\", \"rwb\": \"object\", \"lb\": \"object\", \"lcb\": \"object\", \"cb\": \"object\", \"rcb\": \"object\", \"rb\": \"object\", \"gk\": \"object\", \"player_face_url\": \"object\", \"club_logo_url\": \"object\", \"club_flag_url\": \"object\", \"nation_logo_url\": \"object\", \"nation_flag_url\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 19239 entries, 0 to 19238\nColumns: 110 entries, sofifa_id to nation_flag_url\ndtypes: float64(16), int64(44), object(50)\nmemory usage: 16.1+ MB\n", "summary": "{\"sofifa_id\": {\"count\": 19239.0, \"mean\": 231468.08695878164, \"std\": 27039.717497127018, \"min\": 41.0, \"25%\": 214413.5, \"50%\": 236543.0, \"75%\": 253532.5, \"max\": 264640.0}, \"overall\": {\"count\": 19239.0, \"mean\": 65.77218150631529, \"std\": 6.880231506861689, \"min\": 47.0, \"25%\": 61.0, \"50%\": 66.0, \"75%\": 70.0, \"max\": 93.0}, \"potential\": {\"count\": 19239.0, \"mean\": 71.07937002962731, \"std\": 6.0862131012609, \"min\": 49.0, \"25%\": 67.0, \"50%\": 71.0, \"75%\": 75.0, \"max\": 95.0}, \"value_eur\": {\"count\": 19165.0, \"mean\": 2850451.813201148, \"std\": 7613699.947458978, \"min\": 9000.0, \"25%\": 475000.0, \"50%\": 975000.0, \"75%\": 2000000.0, \"max\": 194000000.0}, \"wage_eur\": {\"count\": 19178.0, \"mean\": 9017.989362811555, \"std\": 19470.176723602686, \"min\": 500.0, \"25%\": 1000.0, \"50%\": 3000.0, \"75%\": 8000.0, \"max\": 350000.0}, \"age\": {\"count\": 19239.0, \"mean\": 25.210821768283175, \"std\": 4.748235247092781, \"min\": 16.0, \"25%\": 21.0, \"50%\": 25.0, \"75%\": 29.0, \"max\": 54.0}, \"height_cm\": {\"count\": 19239.0, \"mean\": 181.29970372680492, \"std\": 6.863179177196187, \"min\": 155.0, \"25%\": 176.0, \"50%\": 181.0, \"75%\": 186.0, \"max\": 206.0}, \"weight_kg\": {\"count\": 19239.0, \"mean\": 74.94303238214044, \"std\": 7.069434064186432, \"min\": 49.0, \"25%\": 70.0, \"50%\": 75.0, \"75%\": 80.0, \"max\": 110.0}, \"club_team_id\": {\"count\": 19178.0, \"mean\": 50580.4981228491, \"std\": 54401.86853481683, \"min\": 1.0, \"25%\": 479.0, \"50%\": 1938.0, \"75%\": 111139.0, \"max\": 115820.0}, \"league_level\": {\"count\": 19178.0, \"mean\": 1.3543643758473252, \"std\": 0.7478651440250982, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 5.0}, \"club_jersey_number\": {\"count\": 19178.0, \"mean\": 20.945249765356138, \"std\": 17.909369141981372, \"min\": 1.0, \"25%\": 9.0, \"50%\": 18.0, \"75%\": 27.0, \"max\": 99.0}, \"club_contract_valid_until\": {\"count\": 19178.0, \"mean\": 2022.7640004171446, \"std\": 1.2132025171866476, \"min\": 2021.0, \"25%\": 2022.0, \"50%\": 2022.0, \"75%\": 2024.0, \"max\": 2031.0}, \"nationality_id\": {\"count\": 19239.0, \"mean\": 58.60268205208171, \"std\": 50.29861391203941, \"min\": 1.0, \"25%\": 21.0, \"50%\": 45.0, \"75%\": 60.0, \"max\": 219.0}, \"nation_team_id\": {\"count\": 759.0, \"mean\": 14480.848484848484, \"std\": 35328.73021738153, \"min\": 1318.0, \"25%\": 1338.0, \"50%\": 1357.0, \"75%\": 1386.0, \"max\": 111473.0}, \"nation_jersey_number\": {\"count\": 759.0, \"mean\": 12.567852437417654, \"std\": 7.039115702087771, \"min\": 1.0, \"25%\": 7.0, \"50%\": 12.0, \"75%\": 19.0, \"max\": 28.0}, \"weak_foot\": {\"count\": 19239.0, \"mean\": 2.9461510473517336, \"std\": 0.671560478048015, \"min\": 1.0, \"25%\": 3.0, \"50%\": 3.0, \"75%\": 3.0, \"max\": 5.0}, \"skill_moves\": {\"count\": 19239.0, \"mean\": 2.352461146629243, \"std\": 0.7676590344787977, \"min\": 1.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 5.0}, \"international_reputation\": {\"count\": 19239.0, \"mean\": 1.094183689380945, \"std\": 0.3710981752071121, \"min\": 1.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 5.0}, \"release_clause_eur\": {\"count\": 18063.0, \"mean\": 5374044.123346066, \"std\": 14948367.962500881, \"min\": 16000.0, \"25%\": 806000.0, \"50%\": 1600000.0, \"75%\": 3700000.0, \"max\": 373500000.0}, \"pace\": {\"count\": 17107.0, \"mean\": 68.21307067282399, \"std\": 10.933154561507958, \"min\": 28.0, \"25%\": 62.0, \"50%\": 69.0, \"75%\": 76.0, \"max\": 97.0}, \"shooting\": {\"count\": 17107.0, \"mean\": 52.3452972467411, \"std\": 14.051622953688101, \"min\": 18.0, \"25%\": 42.0, \"50%\": 54.0, \"75%\": 63.0, \"max\": 94.0}, \"passing\": {\"count\": 17107.0, \"mean\": 57.31256210907816, \"std\": 10.06896519025166, \"min\": 25.0, \"25%\": 51.0, \"50%\": 58.0, \"75%\": 64.0, \"max\": 93.0}, \"dribbling\": {\"count\": 17107.0, \"mean\": 62.56117378850763, \"std\": 9.651312199567759, \"min\": 27.0, \"25%\": 57.0, \"50%\": 64.0, \"75%\": 69.0, \"max\": 95.0}, \"defending\": {\"count\": 17107.0, \"mean\": 51.70363009294441, \"std\": 16.189746154376728, \"min\": 14.0, \"25%\": 37.0, \"50%\": 56.0, \"75%\": 64.0, \"max\": 91.0}, \"physic\": {\"count\": 17107.0, \"mean\": 64.82328871222306, \"std\": 9.791885635868047, \"min\": 29.0, \"25%\": 59.0, \"50%\": 66.0, \"75%\": 72.0, \"max\": 90.0}, \"attacking_crossing\": {\"count\": 19239.0, \"mean\": 49.577420863870266, \"std\": 18.03466131695005, \"min\": 6.0, \"25%\": 38.0, \"50%\": 54.0, \"75%\": 63.0, \"max\": 94.0}, \"attacking_finishing\": {\"count\": 19239.0, \"mean\": 45.89443318259785, \"std\": 19.721022626464112, \"min\": 2.0, \"25%\": 30.0, \"50%\": 50.0, \"75%\": 62.0, \"max\": 95.0}, \"attacking_heading_accuracy\": {\"count\": 19239.0, \"mean\": 51.78387650085763, \"std\": 17.29418252684362, \"min\": 5.0, \"25%\": 44.0, \"50%\": 55.0, \"75%\": 64.0, \"max\": 93.0}, \"attacking_short_passing\": {\"count\": 19239.0, \"mean\": 58.867976506055406, \"std\": 14.490857807319856, \"min\": 7.0, \"25%\": 54.0, \"50%\": 62.0, \"75%\": 68.0, \"max\": 94.0}, \"attacking_volleys\": {\"count\": 19239.0, \"mean\": 42.4638494724258, \"std\": 17.653328721077067, \"min\": 3.0, \"25%\": 30.0, \"50%\": 43.0, \"75%\": 56.0, \"max\": 90.0}, \"skill_dribbling\": {\"count\": 19239.0, \"mean\": 55.660429336244086, \"std\": 18.784590140366028, \"min\": 4.0, \"25%\": 50.0, \"50%\": 61.0, \"75%\": 68.0, \"max\": 96.0}, \"skill_curve\": {\"count\": 19239.0, \"mean\": 47.2689328967202, \"std\": 18.181085023876008, \"min\": 6.0, \"25%\": 35.0, \"50%\": 49.0, \"75%\": 61.0, \"max\": 94.0}, \"skill_fk_accuracy\": {\"count\": 19239.0, \"mean\": 42.24902541712147, \"std\": 17.178590067641114, \"min\": 4.0, \"25%\": 31.0, \"50%\": 41.0, \"75%\": 55.0, \"max\": 94.0}, \"skill_long_passing\": {\"count\": 19239.0, \"mean\": 53.07224907739487, \"std\": 15.026568932185066, \"min\": 9.0, \"25%\": 44.0, \"50%\": 56.0, \"75%\": 64.0, \"max\": 93.0}, \"skill_ball_control\": {\"count\": 19239.0, \"mean\": 58.472009979728675, \"std\": 16.663721903687765, \"min\": 8.0, \"25%\": 55.0, \"50%\": 63.0, \"75%\": 69.0, \"max\": 96.0}, \"movement_acceleration\": {\"count\": 19239.0, \"mean\": 64.65289256198348, \"std\": 15.167399449385732, \"min\": 14.0, \"25%\": 57.0, \"50%\": 67.0, \"75%\": 75.0, \"max\": 97.0}, \"movement_sprint_speed\": {\"count\": 19239.0, \"mean\": 64.7149020219346, \"std\": 14.965426485553154, \"min\": 15.0, \"25%\": 58.0, \"50%\": 68.0, \"75%\": 75.0, \"max\": 97.0}, \"movement_agility\": {\"count\": 19239.0, \"mean\": 63.50007796663028, \"std\": 14.86228452470993, \"min\": 18.0, \"25%\": 55.0, \"50%\": 66.0, \"75%\": 74.0, \"max\": 96.0}, \"movement_reactions\": {\"count\": 19239.0, \"mean\": 61.450023389989084, \"std\": 9.042281207641352, \"min\": 25.0, \"25%\": 56.0, \"50%\": 62.0, \"75%\": 67.0, \"max\": 94.0}, \"movement_balance\": {\"count\": 19239.0, \"mean\": 64.06861063464837, \"std\": 14.324788847506273, \"min\": 15.0, \"25%\": 56.0, \"50%\": 66.0, \"75%\": 74.0, \"max\": 96.0}, \"power_shot_power\": {\"count\": 19239.0, \"mean\": 57.77685950413223, \"std\": 13.192224146510023, \"min\": 20.0, \"25%\": 48.0, \"50%\": 59.0, \"75%\": 68.0, \"max\": 95.0}, \"power_jumping\": {\"count\": 19239.0, \"mean\": 64.81350382036489, \"std\": 12.122976897565113, \"min\": 22.0, \"25%\": 57.0, \"50%\": 65.0, \"75%\": 73.0, \"max\": 95.0}, \"power_stamina\": {\"count\": 19239.0, \"mean\": 63.084879671500595, \"std\": 16.14527944693461, \"min\": 12.0, \"25%\": 56.0, \"50%\": 66.0, \"75%\": 74.0, \"max\": 97.0}, \"power_strength\": {\"count\": 19239.0, \"mean\": 65.0077446852747, \"std\": 12.663517950076693, \"min\": 19.0, \"25%\": 57.0, \"50%\": 66.0, \"75%\": 74.0, \"max\": 97.0}, \"power_long_shots\": {\"count\": 19239.0, \"mean\": 46.642704922293255, \"std\": 19.41158265576437, \"min\": 4.0, \"25%\": 32.0, \"50%\": 51.0, \"75%\": 62.0, \"max\": 94.0}, \"mentality_aggression\": {\"count\": 19239.0, \"mean\": 55.53895732626436, \"std\": 16.972180768163998, \"min\": 10.0, \"25%\": 44.0, \"50%\": 58.0, \"75%\": 68.0, \"max\": 95.0}, \"mentality_interceptions\": {\"count\": 19239.0, \"mean\": 46.6135454025677, \"std\": 20.677077097912825, \"min\": 3.0, \"25%\": 26.0, \"50%\": 53.0, \"75%\": 64.0, \"max\": 91.0}, \"mentality_positioning\": {\"count\": 19239.0, \"mean\": 50.330214668122046, \"std\": 19.621601180421887, \"min\": 2.0, \"25%\": 40.0, \"50%\": 56.0, \"75%\": 64.0, \"max\": 96.0}, \"mentality_vision\": {\"count\": 19239.0, \"mean\": 53.96460314985186, \"std\": 13.650481029216035, \"min\": 10.0, \"25%\": 45.0, \"50%\": 55.0, \"75%\": 64.0, \"max\": 95.0}, \"mentality_penalties\": {\"count\": 19239.0, \"mean\": 47.85872446592858, \"std\": 15.768582591947187, \"min\": 7.0, \"25%\": 38.0, \"50%\": 49.0, \"75%\": 60.0, \"max\": 93.0}, \"mentality_composure\": {\"count\": 19239.0, \"mean\": 57.92983003274598, \"std\": 12.159326410704017, \"min\": 12.0, \"25%\": 50.0, \"50%\": 59.0, \"75%\": 66.0, \"max\": 96.0}, \"defending_marking_awareness\": {\"count\": 19239.0, \"mean\": 46.60174645251832, \"std\": 20.200806789158282, \"min\": 4.0, \"25%\": 29.0, \"50%\": 52.0, \"75%\": 63.0, \"max\": 93.0}, \"defending_standing_tackle\": {\"count\": 19239.0, \"mean\": 48.04558448983835, \"std\": 21.232717907052557, \"min\": 5.0, \"25%\": 28.0, \"50%\": 56.0, \"75%\": 65.0, \"max\": 93.0}, \"defending_sliding_tackle\": {\"count\": 19239.0, \"mean\": 45.90669993242892, \"std\": 20.755683442737006, \"min\": 5.0, \"25%\": 25.0, \"50%\": 53.0, \"75%\": 63.0, \"max\": 92.0}, \"goalkeeping_diving\": {\"count\": 19239.0, \"mean\": 16.40610218826342, \"std\": 17.5740278866242, \"min\": 2.0, \"25%\": 8.0, \"50%\": 11.0, \"75%\": 14.0, \"max\": 91.0}, \"goalkeeping_handling\": {\"count\": 19239.0, \"mean\": 16.192473621290087, \"std\": 16.83952849205467, \"min\": 2.0, \"25%\": 8.0, \"50%\": 11.0, \"75%\": 14.0, \"max\": 92.0}, \"goalkeeping_kicking\": {\"count\": 19239.0, \"mean\": 16.05535630750039, \"std\": 16.564554017979486, \"min\": 2.0, \"25%\": 8.0, \"50%\": 11.0, \"75%\": 14.0, \"max\": 93.0}, \"goalkeeping_positioning\": {\"count\": 19239.0, \"mean\": 16.229273870783306, \"std\": 17.05977893866491, \"min\": 2.0, \"25%\": 8.0, \"50%\": 11.0, \"75%\": 14.0, \"max\": 92.0}, \"goalkeeping_reflexes\": {\"count\": 19239.0, \"mean\": 16.491813503820364, \"std\": 17.88483340321158, \"min\": 2.0, \"25%\": 8.0, \"50%\": 11.0, \"75%\": 14.0, \"max\": 90.0}, \"goalkeeping_speed\": {\"count\": 2132.0, \"mean\": 36.43996247654784, \"std\": 10.751563246633594, \"min\": 15.0, \"25%\": 27.0, \"50%\": 36.0, \"75%\": 45.0, \"max\": 65.0}}", "examples": "{\"sofifa_id\":{\"0\":158023,\"1\":188545,\"2\":20801,\"3\":190871},\"player_url\":{\"0\":\"https:\\/\\/sofifa.com\\/player\\/158023\\/lionel-messi\\/220002\",\"1\":\"https:\\/\\/sofifa.com\\/player\\/188545\\/robert-lewandowski\\/220002\",\"2\":\"https:\\/\\/sofifa.com\\/player\\/20801\\/c-ronaldo-dos-santos-aveiro\\/220002\",\"3\":\"https:\\/\\/sofifa.com\\/player\\/190871\\/neymar-da-silva-santos-jr\\/220002\"},\"short_name\":{\"0\":\"L. Messi\",\"1\":\"R. Lewandowski\",\"2\":\"Cristiano Ronaldo\",\"3\":\"Neymar Jr\"},\"long_name\":{\"0\":\"Lionel Andr\\u00e9s Messi Cuccittini\",\"1\":\"Robert Lewandowski\",\"2\":\"Cristiano Ronaldo dos Santos Aveiro\",\"3\":\"Neymar da Silva Santos J\\u00fanior\"},\"player_positions\":{\"0\":\"RW, ST, CF\",\"1\":\"ST\",\"2\":\"ST, LW\",\"3\":\"LW, CAM\"},\"overall\":{\"0\":93,\"1\":92,\"2\":91,\"3\":91},\"potential\":{\"0\":93,\"1\":92,\"2\":91,\"3\":91},\"value_eur\":{\"0\":78000000.0,\"1\":119500000.0,\"2\":45000000.0,\"3\":129000000.0},\"wage_eur\":{\"0\":320000.0,\"1\":270000.0,\"2\":270000.0,\"3\":270000.0},\"age\":{\"0\":34,\"1\":32,\"2\":36,\"3\":29},\"dob\":{\"0\":\"1987-06-24\",\"1\":\"1988-08-21\",\"2\":\"1985-02-05\",\"3\":\"1992-02-05\"},\"height_cm\":{\"0\":170,\"1\":185,\"2\":187,\"3\":175},\"weight_kg\":{\"0\":72,\"1\":81,\"2\":83,\"3\":68},\"club_team_id\":{\"0\":73.0,\"1\":21.0,\"2\":11.0,\"3\":73.0},\"club_name\":{\"0\":\"Paris Saint-Germain\",\"1\":\"FC Bayern M\\u00fcnchen\",\"2\":\"Manchester United\",\"3\":\"Paris Saint-Germain\"},\"league_name\":{\"0\":\"French Ligue 1\",\"1\":\"German 1. Bundesliga\",\"2\":\"English Premier League\",\"3\":\"French Ligue 1\"},\"league_level\":{\"0\":1.0,\"1\":1.0,\"2\":1.0,\"3\":1.0},\"club_position\":{\"0\":\"RW\",\"1\":\"ST\",\"2\":\"ST\",\"3\":\"LW\"},\"club_jersey_number\":{\"0\":30.0,\"1\":9.0,\"2\":7.0,\"3\":10.0},\"club_loaned_from\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"club_joined\":{\"0\":\"2021-08-10\",\"1\":\"2014-07-01\",\"2\":\"2021-08-27\",\"3\":\"2017-08-03\"},\"club_contract_valid_until\":{\"0\":2023.0,\"1\":2023.0,\"2\":2023.0,\"3\":2025.0},\"nationality_id\":{\"0\":52,\"1\":37,\"2\":38,\"3\":54},\"nationality_name\":{\"0\":\"Argentina\",\"1\":\"Poland\",\"2\":\"Portugal\",\"3\":\"Brazil\"},\"nation_team_id\":{\"0\":1369.0,\"1\":1353.0,\"2\":1354.0,\"3\":null},\"nation_position\":{\"0\":\"RW\",\"1\":\"RS\",\"2\":\"ST\",\"3\":null},\"nation_jersey_number\":{\"0\":10.0,\"1\":9.0,\"2\":7.0,\"3\":null},\"preferred_foot\":{\"0\":\"Left\",\"1\":\"Right\",\"2\":\"Right\",\"3\":\"Right\"},\"weak_foot\":{\"0\":4,\"1\":4,\"2\":4,\"3\":5},\"skill_moves\":{\"0\":4,\"1\":4,\"2\":5,\"3\":5},\"international_reputation\":{\"0\":5,\"1\":5,\"2\":5,\"3\":5},\"work_rate\":{\"0\":\"Medium\\/Low\",\"1\":\"High\\/Medium\",\"2\":\"High\\/Low\",\"3\":\"High\\/Medium\"},\"body_type\":{\"0\":\"Unique\",\"1\":\"Unique\",\"2\":\"Unique\",\"3\":\"Unique\"},\"real_face\":{\"0\":\"Yes\",\"1\":\"Yes\",\"2\":\"Yes\",\"3\":\"Yes\"},\"release_clause_eur\":{\"0\":144300000.0,\"1\":197200000.0,\"2\":83300000.0,\"3\":238700000.0},\"player_tags\":{\"0\":\"#Dribbler, #Distance Shooter, #FK Specialist, #Acrobat, #Clinical Finisher, #Complete Forward\",\"1\":\"#Aerial Threat, #Distance Shooter, #Clinical Finisher, #Complete Forward\",\"2\":\"#Aerial Threat, #Dribbler, #Distance Shooter, #Crosser, #Acrobat, #Clinical Finisher, #Complete Forward\",\"3\":\"#Speedster, #Dribbler, #Playmaker, #FK Specialist, #Acrobat, #Complete Midfielder\"},\"player_traits\":{\"0\":\"Finesse Shot, Long Shot Taker (AI), Playmaker (AI), Outside Foot Shot, One Club Player, Chip Shot (AI), Technical Dribbler (AI)\",\"1\":\"Solid Player, Finesse Shot, Outside Foot Shot, Chip Shot (AI)\",\"2\":\"Power Free-Kick, Flair, Long Shot Taker (AI), Speed Dribbler (AI), Outside Foot Shot\",\"3\":\"Injury Prone, Flair, Speed Dribbler (AI), Playmaker (AI), Outside Foot Shot, Technical Dribbler (AI)\"},\"pace\":{\"0\":85.0,\"1\":78.0,\"2\":87.0,\"3\":91.0},\"shooting\":{\"0\":92.0,\"1\":92.0,\"2\":94.0,\"3\":83.0},\"passing\":{\"0\":91.0,\"1\":79.0,\"2\":80.0,\"3\":86.0},\"dribbling\":{\"0\":95.0,\"1\":86.0,\"2\":88.0,\"3\":94.0},\"defending\":{\"0\":34.0,\"1\":44.0,\"2\":34.0,\"3\":37.0},\"physic\":{\"0\":65.0,\"1\":82.0,\"2\":75.0,\"3\":63.0},\"attacking_crossing\":{\"0\":85,\"1\":71,\"2\":87,\"3\":85},\"attacking_finishing\":{\"0\":95,\"1\":95,\"2\":95,\"3\":83},\"attacking_heading_accuracy\":{\"0\":70,\"1\":90,\"2\":90,\"3\":63},\"attacking_short_passing\":{\"0\":91,\"1\":85,\"2\":80,\"3\":86},\"attacking_volleys\":{\"0\":88,\"1\":89,\"2\":86,\"3\":86},\"skill_dribbling\":{\"0\":96,\"1\":85,\"2\":88,\"3\":95},\"skill_curve\":{\"0\":93,\"1\":79,\"2\":81,\"3\":88},\"skill_fk_accuracy\":{\"0\":94,\"1\":85,\"2\":84,\"3\":87},\"skill_long_passing\":{\"0\":91,\"1\":70,\"2\":77,\"3\":81},\"skill_ball_control\":{\"0\":96,\"1\":88,\"2\":88,\"3\":95},\"movement_acceleration\":{\"0\":91,\"1\":77,\"2\":85,\"3\":93},\"movement_sprint_speed\":{\"0\":80,\"1\":79,\"2\":88,\"3\":89},\"movement_agility\":{\"0\":91,\"1\":77,\"2\":86,\"3\":96},\"movement_reactions\":{\"0\":94,\"1\":93,\"2\":94,\"3\":89},\"movement_balance\":{\"0\":95,\"1\":82,\"2\":74,\"3\":84},\"power_shot_power\":{\"0\":86,\"1\":90,\"2\":94,\"3\":80},\"power_jumping\":{\"0\":68,\"1\":85,\"2\":95,\"3\":64},\"power_stamina\":{\"0\":72,\"1\":76,\"2\":77,\"3\":81},\"power_strength\":{\"0\":69,\"1\":86,\"2\":77,\"3\":53},\"power_long_shots\":{\"0\":94,\"1\":87,\"2\":93,\"3\":81},\"mentality_aggression\":{\"0\":44,\"1\":81,\"2\":63,\"3\":63},\"mentality_interceptions\":{\"0\":40,\"1\":49,\"2\":29,\"3\":37},\"mentality_positioning\":{\"0\":93,\"1\":95,\"2\":95,\"3\":86},\"mentality_vision\":{\"0\":95,\"1\":81,\"2\":76,\"3\":90},\"mentality_penalties\":{\"0\":75,\"1\":90,\"2\":88,\"3\":93},\"mentality_composure\":{\"0\":96,\"1\":88,\"2\":95,\"3\":93},\"defending_marking_awareness\":{\"0\":20,\"1\":35,\"2\":24,\"3\":35},\"defending_standing_tackle\":{\"0\":35,\"1\":42,\"2\":32,\"3\":32},\"defending_sliding_tackle\":{\"0\":24,\"1\":19,\"2\":24,\"3\":29},\"goalkeeping_diving\":{\"0\":6,\"1\":15,\"2\":7,\"3\":9},\"goalkeeping_handling\":{\"0\":11,\"1\":6,\"2\":11,\"3\":9},\"goalkeeping_kicking\":{\"0\":15,\"1\":12,\"2\":15,\"3\":15},\"goalkeeping_positioning\":{\"0\":14,\"1\":8,\"2\":14,\"3\":15},\"goalkeeping_reflexes\":{\"0\":8,\"1\":10,\"2\":11,\"3\":11},\"goalkeeping_speed\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"ls\":{\"0\":\"89+3\",\"1\":\"90+2\",\"2\":\"90+1\",\"3\":\"83+3\"},\"st\":{\"0\":\"89+3\",\"1\":\"90+2\",\"2\":\"90+1\",\"3\":\"83+3\"},\"rs\":{\"0\":\"89+3\",\"1\":\"90+2\",\"2\":\"90+1\",\"3\":\"83+3\"},\"lw\":{\"0\":\"92\",\"1\":\"85\",\"2\":\"88\",\"3\":\"90\"},\"lf\":{\"0\":\"93\",\"1\":\"88\",\"2\":\"89\",\"3\":\"88\"},\"cf\":{\"0\":\"93\",\"1\":\"88\",\"2\":\"89\",\"3\":\"88\"},\"rf\":{\"0\":\"93\",\"1\":\"88\",\"2\":\"89\",\"3\":\"88\"},\"rw\":{\"0\":\"92\",\"1\":\"85\",\"2\":\"88\",\"3\":\"90\"},\"lam\":{\"0\":\"93\",\"1\":\"86+3\",\"2\":\"86+3\",\"3\":\"89+2\"},\"cam\":{\"0\":\"93\",\"1\":\"86+3\",\"2\":\"86+3\",\"3\":\"89+2\"},\"ram\":{\"0\":\"93\",\"1\":\"86+3\",\"2\":\"86+3\",\"3\":\"89+2\"},\"lm\":{\"0\":\"91+2\",\"1\":\"84+3\",\"2\":\"86+3\",\"3\":\"89+2\"},\"lcm\":{\"0\":\"87+3\",\"1\":\"80+3\",\"2\":\"78+3\",\"3\":\"82+3\"},\"cm\":{\"0\":\"87+3\",\"1\":\"80+3\",\"2\":\"78+3\",\"3\":\"82+3\"},\"rcm\":{\"0\":\"87+3\",\"1\":\"80+3\",\"2\":\"78+3\",\"3\":\"82+3\"},\"rm\":{\"0\":\"91+2\",\"1\":\"84+3\",\"2\":\"86+3\",\"3\":\"89+2\"},\"lwb\":{\"0\":\"66+3\",\"1\":\"64+3\",\"2\":\"63+3\",\"3\":\"67+3\"},\"ldm\":{\"0\":\"64+3\",\"1\":\"66+3\",\"2\":\"59+3\",\"3\":\"63+3\"},\"cdm\":{\"0\":\"64+3\",\"1\":\"66+3\",\"2\":\"59+3\",\"3\":\"63+3\"},\"rdm\":{\"0\":\"64+3\",\"1\":\"66+3\",\"2\":\"59+3\",\"3\":\"63+3\"},\"rwb\":{\"0\":\"66+3\",\"1\":\"64+3\",\"2\":\"63+3\",\"3\":\"67+3\"},\"lb\":{\"0\":\"61+3\",\"1\":\"61+3\",\"2\":\"60+3\",\"3\":\"62+3\"},\"lcb\":{\"0\":\"50+3\",\"1\":\"60+3\",\"2\":\"53+3\",\"3\":\"50+3\"},\"cb\":{\"0\":\"50+3\",\"1\":\"60+3\",\"2\":\"53+3\",\"3\":\"50+3\"},\"rcb\":{\"0\":\"50+3\",\"1\":\"60+3\",\"2\":\"53+3\",\"3\":\"50+3\"},\"rb\":{\"0\":\"61+3\",\"1\":\"61+3\",\"2\":\"60+3\",\"3\":\"62+3\"},\"gk\":{\"0\":\"19+3\",\"1\":\"19+3\",\"2\":\"20+3\",\"3\":\"20+3\"},\"player_face_url\":{\"0\":\"https:\\/\\/cdn.sofifa.net\\/players\\/158\\/023\\/22_120.png\",\"1\":\"https:\\/\\/cdn.sofifa.net\\/players\\/188\\/545\\/22_120.png\",\"2\":\"https:\\/\\/cdn.sofifa.net\\/players\\/020\\/801\\/22_120.png\",\"3\":\"https:\\/\\/cdn.sofifa.net\\/players\\/190\\/871\\/22_120.png\"},\"club_logo_url\":{\"0\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/73\\/60.png\",\"1\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/21\\/60.png\",\"2\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/11\\/60.png\",\"3\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/73\\/60.png\"},\"club_flag_url\":{\"0\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/fr.png\",\"1\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/de.png\",\"2\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/gb-eng.png\",\"3\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/fr.png\"},\"nation_logo_url\":{\"0\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/1369\\/60.png\",\"1\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/1353\\/60.png\",\"2\":\"https:\\/\\/cdn.sofifa.net\\/teams\\/1354\\/60.png\",\"3\":null},\"nation_flag_url\":{\"0\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/ar.png\",\"1\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/pl.png\",\"2\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/pt.png\",\"3\":\"https:\\/\\/cdn.sofifa.net\\/flags\\/br.png\"}}"}}]
| true | 1 |
<start_data_description><data_path>fifa-22-complete-player-dataset/players_22.csv:
<column_names>
['sofifa_id', 'player_url', 'short_name', 'long_name', 'player_positions', 'overall', 'potential', 'value_eur', 'wage_eur', 'age', 'dob', 'height_cm', 'weight_kg', 'club_team_id', 'club_name', 'league_name', 'league_level', 'club_position', 'club_jersey_number', 'club_loaned_from', 'club_joined', 'club_contract_valid_until', 'nationality_id', 'nationality_name', 'nation_team_id', 'nation_position', 'nation_jersey_number', 'preferred_foot', 'weak_foot', 'skill_moves', 'international_reputation', 'work_rate', 'body_type', 'real_face', 'release_clause_eur', 'player_tags', 'player_traits', 'pace', 'shooting', 'passing', 'dribbling', 'defending', 'physic', 'attacking_crossing', 'attacking_finishing', 'attacking_heading_accuracy', 'attacking_short_passing', 'attacking_volleys', 'skill_dribbling', 'skill_curve', 'skill_fk_accuracy', 'skill_long_passing', 'skill_ball_control', 'movement_acceleration', 'movement_sprint_speed', 'movement_agility', 'movement_reactions', 'movement_balance', 'power_shot_power', 'power_jumping', 'power_stamina', 'power_strength', 'power_long_shots', 'mentality_aggression', 'mentality_interceptions', 'mentality_positioning', 'mentality_vision', 'mentality_penalties', 'mentality_composure', 'defending_marking_awareness', 'defending_standing_tackle', 'defending_sliding_tackle', 'goalkeeping_diving', 'goalkeeping_handling', 'goalkeeping_kicking', 'goalkeeping_positioning', 'goalkeeping_reflexes', 'goalkeeping_speed', 'ls', 'st', 'rs', 'lw', 'lf', 'cf', 'rf', 'rw', 'lam', 'cam', 'ram', 'lm', 'lcm', 'cm', 'rcm', 'rm', 'lwb', 'ldm', 'cdm', 'rdm', 'rwb', 'lb', 'lcb', 'cb', 'rcb', 'rb', 'gk', 'player_face_url', 'club_logo_url', 'club_flag_url', 'nation_logo_url', 'nation_flag_url']
<column_types>
{'sofifa_id': 'int64', 'player_url': 'object', 'short_name': 'object', 'long_name': 'object', 'player_positions': 'object', 'overall': 'int64', 'potential': 'int64', 'value_eur': 'float64', 'wage_eur': 'float64', 'age': 'int64', 'dob': 'object', 'height_cm': 'int64', 'weight_kg': 'int64', 'club_team_id': 'float64', 'club_name': 'object', 'league_name': 'object', 'league_level': 'float64', 'club_position': 'object', 'club_jersey_number': 'float64', 'club_loaned_from': 'object', 'club_joined': 'object', 'club_contract_valid_until': 'float64', 'nationality_id': 'int64', 'nationality_name': 'object', 'nation_team_id': 'float64', 'nation_position': 'object', 'nation_jersey_number': 'float64', 'preferred_foot': 'object', 'weak_foot': 'int64', 'skill_moves': 'int64', 'international_reputation': 'int64', 'work_rate': 'object', 'body_type': 'object', 'real_face': 'object', 'release_clause_eur': 'float64', 'player_tags': 'object', 'player_traits': 'object', 'pace': 'float64', 'shooting': 'float64', 'passing': 'float64', 'dribbling': 'float64', 'defending': 'float64', 'physic': 'float64', 'attacking_crossing': 'int64', 'attacking_finishing': 'int64', 'attacking_heading_accuracy': 'int64', 'attacking_short_passing': 'int64', 'attacking_volleys': 'int64', 'skill_dribbling': 'int64', 'skill_curve': 'int64', 'skill_fk_accuracy': 'int64', 'skill_long_passing': 'int64', 'skill_ball_control': 'int64', 'movement_acceleration': 'int64', 'movement_sprint_speed': 'int64', 'movement_agility': 'int64', 'movement_reactions': 'int64', 'movement_balance': 'int64', 'power_shot_power': 'int64', 'power_jumping': 'int64', 'power_stamina': 'int64', 'power_strength': 'int64', 'power_long_shots': 'int64', 'mentality_aggression': 'int64', 'mentality_interceptions': 'int64', 'mentality_positioning': 'int64', 'mentality_vision': 'int64', 'mentality_penalties': 'int64', 'mentality_composure': 'int64', 'defending_marking_awareness': 'int64', 'defending_standing_tackle': 'int64', 'defending_sliding_tackle': 'int64', 'goalkeeping_diving': 'int64', 'goalkeeping_handling': 'int64', 'goalkeeping_kicking': 'int64', 'goalkeeping_positioning': 'int64', 'goalkeeping_reflexes': 'int64', 'goalkeeping_speed': 'float64', 'ls': 'object', 'st': 'object', 'rs': 'object', 'lw': 'object', 'lf': 'object', 'cf': 'object', 'rf': 'object', 'rw': 'object', 'lam': 'object', 'cam': 'object', 'ram': 'object', 'lm': 'object', 'lcm': 'object', 'cm': 'object', 'rcm': 'object', 'rm': 'object', 'lwb': 'object', 'ldm': 'object', 'cdm': 'object', 'rdm': 'object', 'rwb': 'object', 'lb': 'object', 'lcb': 'object', 'cb': 'object', 'rcb': 'object', 'rb': 'object', 'gk': 'object', 'player_face_url': 'object', 'club_logo_url': 'object', 'club_flag_url': 'object', 'nation_logo_url': 'object', 'nation_flag_url': 'object'}
<dataframe_Summary>
{'sofifa_id': {'count': 19239.0, 'mean': 231468.08695878164, 'std': 27039.717497127018, 'min': 41.0, '25%': 214413.5, '50%': 236543.0, '75%': 253532.5, 'max': 264640.0}, 'overall': {'count': 19239.0, 'mean': 65.77218150631529, 'std': 6.880231506861689, 'min': 47.0, '25%': 61.0, '50%': 66.0, '75%': 70.0, 'max': 93.0}, 'potential': {'count': 19239.0, 'mean': 71.07937002962731, 'std': 6.0862131012609, 'min': 49.0, '25%': 67.0, '50%': 71.0, '75%': 75.0, 'max': 95.0}, 'value_eur': {'count': 19165.0, 'mean': 2850451.813201148, 'std': 7613699.947458978, 'min': 9000.0, '25%': 475000.0, '50%': 975000.0, '75%': 2000000.0, 'max': 194000000.0}, 'wage_eur': {'count': 19178.0, 'mean': 9017.989362811555, 'std': 19470.176723602686, 'min': 500.0, '25%': 1000.0, '50%': 3000.0, '75%': 8000.0, 'max': 350000.0}, 'age': {'count': 19239.0, 'mean': 25.210821768283175, 'std': 4.748235247092781, 'min': 16.0, '25%': 21.0, '50%': 25.0, '75%': 29.0, 'max': 54.0}, 'height_cm': {'count': 19239.0, 'mean': 181.29970372680492, 'std': 6.863179177196187, 'min': 155.0, '25%': 176.0, '50%': 181.0, '75%': 186.0, 'max': 206.0}, 'weight_kg': {'count': 19239.0, 'mean': 74.94303238214044, 'std': 7.069434064186432, 'min': 49.0, '25%': 70.0, '50%': 75.0, '75%': 80.0, 'max': 110.0}, 'club_team_id': {'count': 19178.0, 'mean': 50580.4981228491, 'std': 54401.86853481683, 'min': 1.0, '25%': 479.0, '50%': 1938.0, '75%': 111139.0, 'max': 115820.0}, 'league_level': {'count': 19178.0, 'mean': 1.3543643758473252, 'std': 0.7478651440250982, 'min': 1.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 5.0}, 'club_jersey_number': {'count': 19178.0, 'mean': 20.945249765356138, 'std': 17.909369141981372, 'min': 1.0, '25%': 9.0, '50%': 18.0, '75%': 27.0, 'max': 99.0}, 'club_contract_valid_until': {'count': 19178.0, 'mean': 2022.7640004171446, 'std': 1.2132025171866476, 'min': 2021.0, '25%': 2022.0, '50%': 2022.0, '75%': 2024.0, 'max': 2031.0}, 'nationality_id': {'count': 19239.0, 'mean': 58.60268205208171, 'std': 50.29861391203941, 'min': 1.0, '25%': 21.0, '50%': 45.0, '75%': 60.0, 'max': 219.0}, 'nation_team_id': {'count': 759.0, 'mean': 14480.848484848484, 'std': 35328.73021738153, 'min': 1318.0, '25%': 1338.0, '50%': 1357.0, '75%': 1386.0, 'max': 111473.0}, 'nation_jersey_number': {'count': 759.0, 'mean': 12.567852437417654, 'std': 7.039115702087771, 'min': 1.0, '25%': 7.0, '50%': 12.0, '75%': 19.0, 'max': 28.0}, 'weak_foot': {'count': 19239.0, 'mean': 2.9461510473517336, 'std': 0.671560478048015, 'min': 1.0, '25%': 3.0, '50%': 3.0, '75%': 3.0, 'max': 5.0}, 'skill_moves': {'count': 19239.0, 'mean': 2.352461146629243, 'std': 0.7676590344787977, 'min': 1.0, '25%': 2.0, '50%': 2.0, '75%': 3.0, 'max': 5.0}, 'international_reputation': {'count': 19239.0, 'mean': 1.094183689380945, 'std': 0.3710981752071121, 'min': 1.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 5.0}, 'release_clause_eur': {'count': 18063.0, 'mean': 5374044.123346066, 'std': 14948367.962500881, 'min': 16000.0, '25%': 806000.0, '50%': 1600000.0, '75%': 3700000.0, 'max': 373500000.0}, 'pace': {'count': 17107.0, 'mean': 68.21307067282399, 'std': 10.933154561507958, 'min': 28.0, '25%': 62.0, '50%': 69.0, '75%': 76.0, 'max': 97.0}, 'shooting': {'count': 17107.0, 'mean': 52.3452972467411, 'std': 14.051622953688101, 'min': 18.0, '25%': 42.0, '50%': 54.0, '75%': 63.0, 'max': 94.0}, 'passing': {'count': 17107.0, 'mean': 57.31256210907816, 'std': 10.06896519025166, 'min': 25.0, '25%': 51.0, '50%': 58.0, '75%': 64.0, 'max': 93.0}, 'dribbling': {'count': 17107.0, 'mean': 62.56117378850763, 'std': 9.651312199567759, 'min': 27.0, '25%': 57.0, '50%': 64.0, '75%': 69.0, 'max': 95.0}, 'defending': {'count': 17107.0, 'mean': 51.70363009294441, 'std': 16.189746154376728, 'min': 14.0, '25%': 37.0, '50%': 56.0, '75%': 64.0, 'max': 91.0}, 'physic': {'count': 17107.0, 'mean': 64.82328871222306, 'std': 9.791885635868047, 'min': 29.0, '25%': 59.0, '50%': 66.0, '75%': 72.0, 'max': 90.0}, 'attacking_crossing': {'count': 19239.0, 'mean': 49.577420863870266, 'std': 18.03466131695005, 'min': 6.0, '25%': 38.0, '50%': 54.0, '75%': 63.0, 'max': 94.0}, 'attacking_finishing': {'count': 19239.0, 'mean': 45.89443318259785, 'std': 19.721022626464112, 'min': 2.0, '25%': 30.0, '50%': 50.0, '75%': 62.0, 'max': 95.0}, 'attacking_heading_accuracy': {'count': 19239.0, 'mean': 51.78387650085763, 'std': 17.29418252684362, 'min': 5.0, '25%': 44.0, '50%': 55.0, '75%': 64.0, 'max': 93.0}, 'attacking_short_passing': {'count': 19239.0, 'mean': 58.867976506055406, 'std': 14.490857807319856, 'min': 7.0, '25%': 54.0, '50%': 62.0, '75%': 68.0, 'max': 94.0}, 'attacking_volleys': {'count': 19239.0, 'mean': 42.4638494724258, 'std': 17.653328721077067, 'min': 3.0, '25%': 30.0, '50%': 43.0, '75%': 56.0, 'max': 90.0}, 'skill_dribbling': {'count': 19239.0, 'mean': 55.660429336244086, 'std': 18.784590140366028, 'min': 4.0, '25%': 50.0, '50%': 61.0, '75%': 68.0, 'max': 96.0}, 'skill_curve': {'count': 19239.0, 'mean': 47.2689328967202, 'std': 18.181085023876008, 'min': 6.0, '25%': 35.0, '50%': 49.0, '75%': 61.0, 'max': 94.0}, 'skill_fk_accuracy': {'count': 19239.0, 'mean': 42.24902541712147, 'std': 17.178590067641114, 'min': 4.0, '25%': 31.0, '50%': 41.0, '75%': 55.0, 'max': 94.0}, 'skill_long_passing': {'count': 19239.0, 'mean': 53.07224907739487, 'std': 15.026568932185066, 'min': 9.0, '25%': 44.0, '50%': 56.0, '75%': 64.0, 'max': 93.0}, 'skill_ball_control': {'count': 19239.0, 'mean': 58.472009979728675, 'std': 16.663721903687765, 'min': 8.0, '25%': 55.0, '50%': 63.0, '75%': 69.0, 'max': 96.0}, 'movement_acceleration': {'count': 19239.0, 'mean': 64.65289256198348, 'std': 15.167399449385732, 'min': 14.0, '25%': 57.0, '50%': 67.0, '75%': 75.0, 'max': 97.0}, 'movement_sprint_speed': {'count': 19239.0, 'mean': 64.7149020219346, 'std': 14.965426485553154, 'min': 15.0, '25%': 58.0, '50%': 68.0, '75%': 75.0, 'max': 97.0}, 'movement_agility': {'count': 19239.0, 'mean': 63.50007796663028, 'std': 14.86228452470993, 'min': 18.0, '25%': 55.0, '50%': 66.0, '75%': 74.0, 'max': 96.0}, 'movement_reactions': {'count': 19239.0, 'mean': 61.450023389989084, 'std': 9.042281207641352, 'min': 25.0, '25%': 56.0, '50%': 62.0, '75%': 67.0, 'max': 94.0}, 'movement_balance': {'count': 19239.0, 'mean': 64.06861063464837, 'std': 14.324788847506273, 'min': 15.0, '25%': 56.0, '50%': 66.0, '75%': 74.0, 'max': 96.0}, 'power_shot_power': {'count': 19239.0, 'mean': 57.77685950413223, 'std': 13.192224146510023, 'min': 20.0, '25%': 48.0, '50%': 59.0, '75%': 68.0, 'max': 95.0}, 'power_jumping': {'count': 19239.0, 'mean': 64.81350382036489, 'std': 12.122976897565113, 'min': 22.0, '25%': 57.0, '50%': 65.0, '75%': 73.0, 'max': 95.0}, 'power_stamina': {'count': 19239.0, 'mean': 63.084879671500595, 'std': 16.14527944693461, 'min': 12.0, '25%': 56.0, '50%': 66.0, '75%': 74.0, 'max': 97.0}, 'power_strength': {'count': 19239.0, 'mean': 65.0077446852747, 'std': 12.663517950076693, 'min': 19.0, '25%': 57.0, '50%': 66.0, '75%': 74.0, 'max': 97.0}, 'power_long_shots': {'count': 19239.0, 'mean': 46.642704922293255, 'std': 19.41158265576437, 'min': 4.0, '25%': 32.0, '50%': 51.0, '75%': 62.0, 'max': 94.0}, 'mentality_aggression': {'count': 19239.0, 'mean': 55.53895732626436, 'std': 16.972180768163998, 'min': 10.0, '25%': 44.0, '50%': 58.0, '75%': 68.0, 'max': 95.0}, 'mentality_interceptions': {'count': 19239.0, 'mean': 46.6135454025677, 'std': 20.677077097912825, 'min': 3.0, '25%': 26.0, '50%': 53.0, '75%': 64.0, 'max': 91.0}, 'mentality_positioning': {'count': 19239.0, 'mean': 50.330214668122046, 'std': 19.621601180421887, 'min': 2.0, '25%': 40.0, '50%': 56.0, '75%': 64.0, 'max': 96.0}, 'mentality_vision': {'count': 19239.0, 'mean': 53.96460314985186, 'std': 13.650481029216035, 'min': 10.0, '25%': 45.0, '50%': 55.0, '75%': 64.0, 'max': 95.0}, 'mentality_penalties': {'count': 19239.0, 'mean': 47.85872446592858, 'std': 15.768582591947187, 'min': 7.0, '25%': 38.0, '50%': 49.0, '75%': 60.0, 'max': 93.0}, 'mentality_composure': {'count': 19239.0, 'mean': 57.92983003274598, 'std': 12.159326410704017, 'min': 12.0, '25%': 50.0, '50%': 59.0, '75%': 66.0, 'max': 96.0}, 'defending_marking_awareness': {'count': 19239.0, 'mean': 46.60174645251832, 'std': 20.200806789158282, 'min': 4.0, '25%': 29.0, '50%': 52.0, '75%': 63.0, 'max': 93.0}, 'defending_standing_tackle': {'count': 19239.0, 'mean': 48.04558448983835, 'std': 21.232717907052557, 'min': 5.0, '25%': 28.0, '50%': 56.0, '75%': 65.0, 'max': 93.0}, 'defending_sliding_tackle': {'count': 19239.0, 'mean': 45.90669993242892, 'std': 20.755683442737006, 'min': 5.0, '25%': 25.0, '50%': 53.0, '75%': 63.0, 'max': 92.0}, 'goalkeeping_diving': {'count': 19239.0, 'mean': 16.40610218826342, 'std': 17.5740278866242, 'min': 2.0, '25%': 8.0, '50%': 11.0, '75%': 14.0, 'max': 91.0}, 'goalkeeping_handling': {'count': 19239.0, 'mean': 16.192473621290087, 'std': 16.83952849205467, 'min': 2.0, '25%': 8.0, '50%': 11.0, '75%': 14.0, 'max': 92.0}, 'goalkeeping_kicking': {'count': 19239.0, 'mean': 16.05535630750039, 'std': 16.564554017979486, 'min': 2.0, '25%': 8.0, '50%': 11.0, '75%': 14.0, 'max': 93.0}, 'goalkeeping_positioning': {'count': 19239.0, 'mean': 16.229273870783306, 'std': 17.05977893866491, 'min': 2.0, '25%': 8.0, '50%': 11.0, '75%': 14.0, 'max': 92.0}, 'goalkeeping_reflexes': {'count': 19239.0, 'mean': 16.491813503820364, 'std': 17.88483340321158, 'min': 2.0, '25%': 8.0, '50%': 11.0, '75%': 14.0, 'max': 90.0}, 'goalkeeping_speed': {'count': 2132.0, 'mean': 36.43996247654784, 'std': 10.751563246633594, 'min': 15.0, '25%': 27.0, '50%': 36.0, '75%': 45.0, 'max': 65.0}}
<dataframe_info>
RangeIndex: 19239 entries, 0 to 19238
Columns: 110 entries, sofifa_id to nation_flag_url
dtypes: float64(16), int64(44), object(50)
memory usage: 16.1+ MB
<some_examples>
{'sofifa_id': {'0': 158023, '1': 188545, '2': 20801, '3': 190871}, 'player_url': {'0': 'https://sofifa.com/player/158023/lionel-messi/220002', '1': 'https://sofifa.com/player/188545/robert-lewandowski/220002', '2': 'https://sofifa.com/player/20801/c-ronaldo-dos-santos-aveiro/220002', '3': 'https://sofifa.com/player/190871/neymar-da-silva-santos-jr/220002'}, 'short_name': {'0': 'L. Messi', '1': 'R. Lewandowski', '2': 'Cristiano Ronaldo', '3': 'Neymar Jr'}, 'long_name': {'0': 'Lionel Andrés Messi Cuccittini', '1': 'Robert Lewandowski', '2': 'Cristiano Ronaldo dos Santos Aveiro', '3': 'Neymar da Silva Santos Júnior'}, 'player_positions': {'0': 'RW, ST, CF', '1': 'ST', '2': 'ST, LW', '3': 'LW, CAM'}, 'overall': {'0': 93, '1': 92, '2': 91, '3': 91}, 'potential': {'0': 93, '1': 92, '2': 91, '3': 91}, 'value_eur': {'0': 78000000.0, '1': 119500000.0, '2': 45000000.0, '3': 129000000.0}, 'wage_eur': {'0': 320000.0, '1': 270000.0, '2': 270000.0, '3': 270000.0}, 'age': {'0': 34, '1': 32, '2': 36, '3': 29}, 'dob': {'0': '1987-06-24', '1': '1988-08-21', '2': '1985-02-05', '3': '1992-02-05'}, 'height_cm': {'0': 170, '1': 185, '2': 187, '3': 175}, 'weight_kg': {'0': 72, '1': 81, '2': 83, '3': 68}, 'club_team_id': {'0': 73.0, '1': 21.0, '2': 11.0, '3': 73.0}, 'club_name': {'0': 'Paris Saint-Germain', '1': 'FC Bayern München', '2': 'Manchester United', '3': 'Paris Saint-Germain'}, 'league_name': {'0': 'French Ligue 1', '1': 'German 1. Bundesliga', '2': 'English Premier League', '3': 'French Ligue 1'}, 'league_level': {'0': 1.0, '1': 1.0, '2': 1.0, '3': 1.0}, 'club_position': {'0': 'RW', '1': 'ST', '2': 'ST', '3': 'LW'}, 'club_jersey_number': {'0': 30.0, '1': 9.0, '2': 7.0, '3': 10.0}, 'club_loaned_from': {'0': None, '1': None, '2': None, '3': None}, 'club_joined': {'0': '2021-08-10', '1': '2014-07-01', '2': '2021-08-27', '3': '2017-08-03'}, 'club_contract_valid_until': {'0': 2023.0, '1': 2023.0, '2': 2023.0, '3': 2025.0}, 'nationality_id': {'0': 52, '1': 37, '2': 38, '3': 54}, 'nationality_name': {'0': 'Argentina', '1': 'Poland', '2': 'Portugal', '3': 'Brazil'}, 'nation_team_id': {'0': 1369.0, '1': 1353.0, '2': 1354.0, '3': None}, 'nation_position': {'0': 'RW', '1': 'RS', '2': 'ST', '3': None}, 'nation_jersey_number': {'0': 10.0, '1': 9.0, '2': 7.0, '3': None}, 'preferred_foot': {'0': 'Left', '1': 'Right', '2': 'Right', '3': 'Right'}, 'weak_foot': {'0': 4, '1': 4, '2': 4, '3': 5}, 'skill_moves': {'0': 4, '1': 4, '2': 5, '3': 5}, 'international_reputation': {'0': 5, '1': 5, '2': 5, '3': 5}, 'work_rate': {'0': 'Medium/Low', '1': 'High/Medium', '2': 'High/Low', '3': 'High/Medium'}, 'body_type': {'0': 'Unique', '1': 'Unique', '2': 'Unique', '3': 'Unique'}, 'real_face': {'0': 'Yes', '1': 'Yes', '2': 'Yes', '3': 'Yes'}, 'release_clause_eur': {'0': 144300000.0, '1': 197200000.0, '2': 83300000.0, '3': 238700000.0}, 'player_tags': {'0': '#Dribbler, #Distance Shooter, #FK Specialist, #Acrobat, #Clinical Finisher, #Complete Forward', '1': '#Aerial Threat, #Distance Shooter, #Clinical Finisher, #Complete Forward', '2': '#Aerial Threat, #Dribbler, #Distance Shooter, #Crosser, #Acrobat, #Clinical Finisher, #Complete Forward', '3': '#Speedster, #Dribbler, #Playmaker, #FK Specialist, #Acrobat, #Complete Midfielder'}, 'player_traits': {'0': 'Finesse Shot, Long Shot Taker (AI), Playmaker (AI), Outside Foot Shot, One Club Player, Chip Shot (AI), Technical Dribbler (AI)', '1': 'Solid Player, Finesse Shot, Outside Foot Shot, Chip Shot (AI)', '2': 'Power Free-Kick, Flair, Long Shot Taker (AI), Speed Dribbler (AI), Outside Foot Shot', '3': 'Injury Prone, Flair, Speed Dribbler (AI), Playmaker (AI), Outside Foot Shot, Technical Dribbler (AI)'}, 'pace': {'0': 85.0, '1': 78.0, '2': 87.0, '3': 91.0}, 'shooting': {'0': 92.0, '1': 92.0, '2': 94.0, '3': 83.0}, 'passing': {'0': 91.0, '1': 79.0, '2': 80.0, '3': 86.0}, 'dribbling': {'0': 95.0, '1': 86.0, '2': 88.0, '3': 94.0}, 'defending': {'0': 34.0, '1': 44.0, '2': 34.0, '3': 37.0}, 'physic': {'0': 65.0, '1': 82.0, '2': 75.0, '3': 63.0}, 'attacking_crossing': {'0': 85, '1': 71, '2': 87, '3': 85}, 'attacking_finishing': {'0': 95, '1': 95, '2': 95, '3': 83}, 'attacking_heading_accuracy': {'0': 70, '1': 90, '2': 90, '3': 63}, 'attacking_short_passing': {'0': 91, '1': 85, '2': 80, '3': 86}, 'attacking_volleys': {'0': 88, '1': 89, '2': 86, '3': 86}, 'skill_dribbling': {'0': 96, '1': 85, '2': 88, '3': 95}, 'skill_curve': {'0': 93, '1': 79, '2': 81, '3': 88}, 'skill_fk_accuracy': {'0': 94, '1': 85, '2': 84, '3': 87}, 'skill_long_passing': {'0': 91, '1': 70, '2': 77, '3': 81}, 'skill_ball_control': {'0': 96, '1': 88, '2': 88, '3': 95}, 'movement_acceleration': {'0': 91, '1': 77, '2': 85, '3': 93}, 'movement_sprint_speed': {'0': 80, '1': 79, '2': 88, '3': 89}, 'movement_agility': {'0': 91, '1': 77, '2': 86, '3': 96}, 'movement_reactions': {'0': 94, '1': 93, '2': 94, '3': 89}, 'movement_balance': {'0': 95, '1': 82, '2': 74, '3': 84}, 'power_shot_power': {'0': 86, '1': 90, '2': 94, '3': 80}, 'power_jumping': {'0': 68, '1': 85, '2': 95, '3': 64}, 'power_stamina': {'0': 72, '1': 76, '2': 77, '3': 81}, 'power_strength': {'0': 69, '1': 86, '2': 77, '3': 53}, 'power_long_shots': {'0': 94, '1': 87, '2': 93, '3': 81}, 'mentality_aggression': {'0': 44, '1': 81, '2': 63, '3': 63}, 'mentality_interceptions': {'0': 40, '1': 49, '2': 29, '3': 37}, 'mentality_positioning': {'0': 93, '1': 95, '2': 95, '3': 86}, 'mentality_vision': {'0': 95, '1': 81, '2': 76, '3': 90}, 'mentality_penalties': {'0': 75, '1': 90, '2': 88, '3': 93}, 'mentality_composure': {'0': 96, '1': 88, '2': 95, '3': 93}, 'defending_marking_awareness': {'0': 20, '1': 35, '2': 24, '3': 35}, 'defending_standing_tackle': {'0': 35, '1': 42, '2': 32, '3': 32}, 'defending_sliding_tackle': {'0': 24, '1': 19, '2': 24, '3': 29}, 'goalkeeping_diving': {'0': 6, '1': 15, '2': 7, '3': 9}, 'goalkeeping_handling': {'0': 11, '1': 6, '2': 11, '3': 9}, 'goalkeeping_kicking': {'0': 15, '1': 12, '2': 15, '3': 15}, 'goalkeeping_positioning': {'0': 14, '1': 8, '2': 14, '3': 15}, 'goalkeeping_reflexes': {'0': 8, '1': 10, '2': 11, '3': 11}, 'goalkeeping_speed': {'0': None, '1': None, '2': None, '3': None}, 'ls': {'0': '89+3', '1': '90+2', '2': '90+1', '3': '83+3'}, 'st': {'0': '89+3', '1': '90+2', '2': '90+1', '3': '83+3'}, 'rs': {'0': '89+3', '1': '90+2', '2': '90+1', '3': '83+3'}, 'lw': {'0': '92', '1': '85', '2': '88', '3': '90'}, 'lf': {'0': '93', '1': '88', '2': '89', '3': '88'}, 'cf': {'0': '93', '1': '88', '2': '89', '3': '88'}, 'rf': {'0': '93', '1': '88', '2': '89', '3': '88'}, 'rw': {'0': '92', '1': '85', '2': '88', '3': '90'}, 'lam': {'0': '93', '1': '86+3', '2': '86+3', '3': '89+2'}, 'cam': {'0': '93', '1': '86+3', '2': '86+3', '3': '89+2'}, 'ram': {'0': '93', '1': '86+3', '2': '86+3', '3': '89+2'}, 'lm': {'0': '91+2', '1': '84+3', '2': '86+3', '3': '89+2'}, 'lcm': {'0': '87+3', '1': '80+3', '2': '78+3', '3': '82+3'}, 'cm': {'0': '87+3', '1': '80+3', '2': '78+3', '3': '82+3'}, 'rcm': {'0': '87+3', '1': '80+3', '2': '78+3', '3': '82+3'}, 'rm': {'0': '91+2', '1': '84+3', '2': '86+3', '3': '89+2'}, 'lwb': {'0': '66+3', '1': '64+3', '2': '63+3', '3': '67+3'}, 'ldm': {'0': '64+3', '1': '66+3', '2': '59+3', '3': '63+3'}, 'cdm': {'0': '64+3', '1': '66+3', '2': '59+3', '3': '63+3'}, 'rdm': {'0': '64+3', '1': '66+3', '2': '59+3', '3': '63+3'}, 'rwb': {'0': '66+3', '1': '64+3', '2': '63+3', '3': '67+3'}, 'lb': {'0': '61+3', '1': '61+3', '2': '60+3', '3': '62+3'}, 'lcb': {'0': '50+3', '1': '60+3', '2': '53+3', '3': '50+3'}, 'cb': {'0': '50+3', '1': '60+3', '2': '53+3', '3': '50+3'}, 'rcb': {'0': '50+3', '1': '60+3', '2': '53+3', '3': '50+3'}, 'rb': {'0': '61+3', '1': '61+3', '2': '60+3', '3': '62+3'}, 'gk': {'0': '19+3', '1': '19+3', '2': '20+3', '3': '20+3'}, 'player_face_url': {'0': 'https://cdn.sofifa.net/players/158/023/22_120.png', '1': 'https://cdn.sofifa.net/players/188/545/22_120.png', '2': 'https://cdn.sofifa.net/players/020/801/22_120.png', '3': 'https://cdn.sofifa.net/players/190/871/22_120.png'}, 'club_logo_url': {'0': 'https://cdn.sofifa.net/teams/73/60.png', '1': 'https://cdn.sofifa.net/teams/21/60.png', '2': 'https://cdn.sofifa.net/teams/11/60.png', '3': 'https://cdn.sofifa.net/teams/73/60.png'}, 'club_flag_url': {'0': 'https://cdn.sofifa.net/flags/fr.png', '1': 'https://cdn.sofifa.net/flags/de.png', '2': 'https://cdn.sofifa.net/flags/gb-eng.png', '3': 'https://cdn.sofifa.net/flags/fr.png'}, 'nation_logo_url': {'0': 'https://cdn.sofifa.net/teams/1369/60.png', '1': 'https://cdn.sofifa.net/teams/1353/60.png', '2': 'https://cdn.sofifa.net/teams/1354/60.png', '3': None}, 'nation_flag_url': {'0': 'https://cdn.sofifa.net/flags/ar.png', '1': 'https://cdn.sofifa.net/flags/pl.png', '2': 'https://cdn.sofifa.net/flags/pt.png', '3': 'https://cdn.sofifa.net/flags/br.png'}}
<end_description>
| 5,135 | 3 | 7,130 | 5,135 |
129277278
|
# Running on GPU:
# Install detectron2
# Helper function, used these for debugging purposes
# detector2 build only succeeds if CUDA version is correct
#!nvidia-smi
#!nvcc --version
# import torch
# torch.__version__
# import torchvision
# torchvision.__version__
# Base setup:
# detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# common libraries
import numpy as np
import os, json, cv2, random
import matplotlib.pyplot as plt
# detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import torch
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import math
import numpy as np
# this will filter out all the extra classes
def get_persons_objects(instances):
pred_classes = instances.pred_classes
pred_boxes = instances.pred_boxes
pred_scores = instances.scores
new_boxes = Boxes(torch.tensor([]))
new_classes = torch.tensor([])
new_scores = torch.tensor([])
for i, t in enumerate(pred_classes):
if t.item() == 0:
new_classes = torch.cat((new_classes, t.unsqueeze(0).to("cpu:0")))
new_boxes = Boxes.cat((new_boxes, pred_boxes[i].to("cpu:0")))
new_scores = torch.cat(
(new_scores, pred_scores[i].unsqueeze(0).to("cpu:0"))
)
pred_classes = new_classes
pred_boxes = new_boxes
scores = new_scores
return pred_classes, pred_boxes, scores
# # DOing it over a single frame
im = cv2.imread("/kaggle/working/input.jpg")
plt.figure(figsize=(15, 7.5))
plt.imshow(im[..., ::-1]) # bgr to rgb
# This is detection
#
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)
predictor = DefaultPredictor(cfg)
outputs = predictor(im[..., ::-1])
pred_classes, pred_boxes, pred_scores = get_persons_objects(
outputs["instances"].to("cpu")
)
instances = Instances(
image_size=im.shape[:2],
pred_boxes=pred_boxes,
pred_classes=pred_classes.int(),
)
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(instances.to("cpu"))
plt.figure(figsize=(15, 7.5))
plt.imshow(out.get_image())
# #### Running the same code over 10 different frames from the video
#
input_file = "/kaggle/working/video5.mp4"
print("Execution starts....")
# Open the input video file
input_video = cv2.VideoCapture(input_file)
detections = np.empty((0, 5))
frame_count = 0
# Loop over the frames in the input video
while True:
# Read the next frame from the input video
ret, im = input_video.read()
if not ret:
break
print(f"Processing frame:{frame_count}", end=" | ")
outputs = predictor(im)
instances = outputs["instances"].to("cpu")
pred_classes, pred_boxes, scores = get_persons_objects(instances)
instances = Instances(
image_size=im.shape[:2],
pred_boxes=pred_boxes,
pred_classes=pred_classes.int(),
scores=scores,
)
v = Visualizer(
im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2
)
out = v.draw_instance_predictions(instances.to("cpu"))
plt.figure(figsize=(15, 7.5))
plt.imshow(out.get_image())
print("Total Person objects found: ", pred_classes.shape[0])
frame_count += 100 # each Nth frame
input_video.set(cv2.CAP_PROP_POS_FRAMES, frame_count)
print("ALL DONE!")
input_video.release()
# ## Running it over the whole video using ROBOLEX supervision library
from IPython import display
display.clear_output()
import supervision as sv
print("supervision", sv.__version__)
# Without polygon stuff
#
input_video = "/kaggle/working/video5.mp4"
video_info = sv.VideoInfo.from_video_path(input_video)
# extract video frame
generator = sv.get_video_frames_generator(input_video)
iterator = iter(generator)
frame = next(iterator)
# detect
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator()
labels = [f"Person-{detection[1]:0.2f}%" for detection in detections]
frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
sv.show_frame_in_notebook(frame, (16, 16))
# Running it over the video
input_video = "/kaggle/working/video5.mp4"
output_video = "/kaggle/working/full-video-output.mp4"
video_info = sv.VideoInfo.from_video_path(input_video)
# initiate annotators
box_annotator = sv.BoxAnnotator()
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print(i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator()
labels = [f"Person-{detection[1]:0.2f}%" for detection in detections]
frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
return frame
sv.process_video(
source_path=input_video, target_path=output_video, callback=process_frame
)
# # All fine above it
HOME = "/kaggle/working/"
# workaround related to https://github.com/roboflow/notebooks/issues/80
# workaround related to https://github.com/roboflow/notebooks/issues/112
from IPython import display
display.clear_output()
import sys
sys.path.append(f"{HOME}/ByteTrack")
import yolox
print("yolox.__version__:", yolox.__version__)
from yolox.tracker.byte_tracker import BYTETracker, STrack
from onemetric.cv.utils.iou import box_iou_batch
from dataclasses import dataclass
@dataclass(frozen=True)
class BYTETrackerArgs:
track_thresh: float = 0.25
track_buffer: int = 30
match_thresh: float = 0.8
aspect_ratio_thresh: float = 3.0
min_box_area: float = 1.0
mot20: bool = False
from IPython import display
display.clear_output()
import supervision
print("supervision.__version__:", supervision.__version__)
from supervision.draw.color import ColorPalette
from supervision.geometry.dataclasses import Point
from supervision.video.dataclasses import VideoInfo
from supervision.video.source import get_video_frames_generator
from supervision.video.sink import VideoSink
from supervision.notebook.utils import show_frame_in_notebook
from supervision.tools.detections import Detections, BoxAnnotator
from supervision.tools.line_counter import LineCounter, LineCounterAnnotator
from typing import List
import numpy as np
# converts Detections into format that can be consumed by match_detections_with_tracks function
def detections2boxes(detections: Detections) -> np.ndarray:
return np.hstack((detections.xyxy, detections.confidence[:, np.newaxis]))
# converts List[STrack] into format that can be consumed by match_detections_with_tracks function
def tracks2boxes(tracks: List[STrack]) -> np.ndarray:
return np.array([track.tlbr for track in tracks], dtype=float)
# matches our bounding boxes with predictions
def match_detections_with_tracks(
detections: Detections, tracks: List[STrack]
) -> Detections:
if not np.any(detections.xyxy) or len(tracks) == 0:
return np.empty((0,))
tracks_boxes = tracks2boxes(tracks=tracks)
iou = box_iou_batch(tracks_boxes, detections.xyxy)
track2detection = np.argmax(iou, axis=1)
tracker_ids = [None] * len(detections)
for tracker_index, detection_index in enumerate(track2detection):
if iou[tracker_index, detection_index] != 0:
tracker_ids[detection_index] = tracks[tracker_index].track_id
return tracker_ids
# Wokring copy of video
import numpy as np
import supervision as sv
input_video = "/kaggle/working/video5.mp4"
output_video = "/kaggle/working/full-video-output.mp4"
import numpy as np
import supervision as sv
video_info = sv.VideoInfo.from_video_path(input_video)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
print("Frame processed :", end=" ")
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print(i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections)
return frame
sv.process_video(
source_path=input_video, target_path=output_video, callback=process_frame
)
# # Segmentation
# #### On a single frame
im2 = cv2.imread("/kaggle/working/input2.jpg")
from detectron2.engine import DefaultPredictor
class PersonPredictor(DefaultPredictor):
def __init__(self, cfg):
super().__init__(cfg)
self.category_mapper = self.metadata.get("thing_dataset_id_to_contiguous_id")
def __call__(self, original_image):
outputs = super().__call__(original_image)
instances = outputs["instances"]
person_instances = instances[instances.pred_classes == 0]
outputs["instances"] = person_instances
return outputs
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import torch
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import math
def get_persons_objects(instances):
pred_classes = instances.pred_classes
pred_boxes = instances.pred_boxes
pred_scores = instances.scores
new_boxes = Boxes(torch.tensor([]))
new_classes = torch.tensor([])
new_scores = torch.tensor([])
for i, t in enumerate(pred_classes):
if t.item() == 0:
new_classes = torch.cat((new_classes, t.unsqueeze(0).to("cpu:0")))
new_boxes = Boxes.cat((new_boxes, pred_boxes[i].to("cpu:0")))
new_scores = torch.cat(
(new_scores, pred_scores[i].unsqueeze(0).to("cpu:0"))
)
pred_classes = new_classes
pred_boxes = new_boxes
scores = new_scores
return pred_classes, pred_boxes, scores
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
)
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"
)
predictor = DefaultPredictor(cfg)
panoptic_seg, segments_info = predictor(im2)["panoptic_seg"]
v = Visualizer(im2[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info)
plt.figure(figsize=(25, 15))
plt.imshow(out.get_image()[:, :, ::-1][..., ::-1])
SUBWAY_VIDEO_PATH = "/kaggle/working/video-clip-new.mp4"
import numpy as np
import supervision as sv
# initiate polygon zone
polygon = np.array(
[
[300, 600],
[1600, 600],
[300, 600],
[1600, 600],
]
)
video_info = sv.VideoInfo.from_video_path(SUBWAY_VIDEO_PATH)
zone = sv.PolygonZone(polygon=polygon, frame_resolution_wh=video_info.resolution_wh)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
zone_annotator = sv.PolygonZoneAnnotator(
zone=zone, color=sv.Color.white(), thickness=6, text_thickness=6, text_scale=4
)
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print("frame", i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
zone.trigger(detections=detections)
# annotate
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections, skip_label=True)
frame = zone_annotator.annotate(scene=frame)
return frame
sv.process_video(
source_path=SUBWAY_VIDEO_PATH,
target_path=f"/kaggle/working/video-clip-new-output.mp4",
callback=process_frame,
)
# from IPython import display
# display.clear_output()
print("ap")
# initiate polygon zone
polygon = np.array(
[
[300, 600],
[1600, 600],
[300, 600],
[1600, 600],
]
)
video_info = sv.VideoInfo.from_video_path(SUBWAY_VIDEO_PATH)
zone = sv.PolygonZone(polygon=polygon, frame_resolution_wh=video_info.resolution_wh)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
zone_annotator = sv.PolygonZoneAnnotator(
zone=zone, color=sv.Color.white(), thickness=6, text_thickness=6, text_scale=4
)
# extract video frame
generator = sv.get_video_frames_generator(SUBWAY_VIDEO_PATH)
iterator = iter(generator)
frame = next(iterator)
# detect
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
zone.trigger(detections=detections)
# annotate
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections, skip_label=True)
frame = zone_annotator.annotate(scene=frame)
sv.show_frame_in_notebook(frame, (16, 16))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/277/129277278.ipynb
| null | null |
[{"Id": 129277278, "ScriptId": 38420584, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1780352, "CreationDate": "05/12/2023 11:40:57", "VersionNumber": 3.0, "Title": "Detectron2 over video frames", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 552.0, "LinesInsertedFromPrevious": 320.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 232.0, "LinesInsertedFromFork": 498.0, "LinesDeletedFromFork": 368.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 54.0, "TotalVotes": 0}]
| null | null | null | null |
# Running on GPU:
# Install detectron2
# Helper function, used these for debugging purposes
# detector2 build only succeeds if CUDA version is correct
#!nvidia-smi
#!nvcc --version
# import torch
# torch.__version__
# import torchvision
# torchvision.__version__
# Base setup:
# detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger
setup_logger()
# common libraries
import numpy as np
import os, json, cv2, random
import matplotlib.pyplot as plt
# detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog
from detectron2.structures import BoxMode
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import torch
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import math
import numpy as np
# this will filter out all the extra classes
def get_persons_objects(instances):
pred_classes = instances.pred_classes
pred_boxes = instances.pred_boxes
pred_scores = instances.scores
new_boxes = Boxes(torch.tensor([]))
new_classes = torch.tensor([])
new_scores = torch.tensor([])
for i, t in enumerate(pred_classes):
if t.item() == 0:
new_classes = torch.cat((new_classes, t.unsqueeze(0).to("cpu:0")))
new_boxes = Boxes.cat((new_boxes, pred_boxes[i].to("cpu:0")))
new_scores = torch.cat(
(new_scores, pred_scores[i].unsqueeze(0).to("cpu:0"))
)
pred_classes = new_classes
pred_boxes = new_boxes
scores = new_scores
return pred_classes, pred_boxes, scores
# # DOing it over a single frame
im = cv2.imread("/kaggle/working/input.jpg")
plt.figure(figsize=(15, 7.5))
plt.imshow(im[..., ::-1]) # bgr to rgb
# This is detection
#
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file("COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml")
)
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5 # set threshold for this model
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml"
)
predictor = DefaultPredictor(cfg)
outputs = predictor(im[..., ::-1])
pred_classes, pred_boxes, pred_scores = get_persons_objects(
outputs["instances"].to("cpu")
)
instances = Instances(
image_size=im.shape[:2],
pred_boxes=pred_boxes,
pred_classes=pred_classes.int(),
)
v = Visualizer(im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_instance_predictions(instances.to("cpu"))
plt.figure(figsize=(15, 7.5))
plt.imshow(out.get_image())
# #### Running the same code over 10 different frames from the video
#
input_file = "/kaggle/working/video5.mp4"
print("Execution starts....")
# Open the input video file
input_video = cv2.VideoCapture(input_file)
detections = np.empty((0, 5))
frame_count = 0
# Loop over the frames in the input video
while True:
# Read the next frame from the input video
ret, im = input_video.read()
if not ret:
break
print(f"Processing frame:{frame_count}", end=" | ")
outputs = predictor(im)
instances = outputs["instances"].to("cpu")
pred_classes, pred_boxes, scores = get_persons_objects(instances)
instances = Instances(
image_size=im.shape[:2],
pred_boxes=pred_boxes,
pred_classes=pred_classes.int(),
scores=scores,
)
v = Visualizer(
im[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2
)
out = v.draw_instance_predictions(instances.to("cpu"))
plt.figure(figsize=(15, 7.5))
plt.imshow(out.get_image())
print("Total Person objects found: ", pred_classes.shape[0])
frame_count += 100 # each Nth frame
input_video.set(cv2.CAP_PROP_POS_FRAMES, frame_count)
print("ALL DONE!")
input_video.release()
# ## Running it over the whole video using ROBOLEX supervision library
from IPython import display
display.clear_output()
import supervision as sv
print("supervision", sv.__version__)
# Without polygon stuff
#
input_video = "/kaggle/working/video5.mp4"
video_info = sv.VideoInfo.from_video_path(input_video)
# extract video frame
generator = sv.get_video_frames_generator(input_video)
iterator = iter(generator)
frame = next(iterator)
# detect
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator()
labels = [f"Person-{detection[1]:0.2f}%" for detection in detections]
frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
sv.show_frame_in_notebook(frame, (16, 16))
# Running it over the video
input_video = "/kaggle/working/video5.mp4"
output_video = "/kaggle/working/full-video-output.mp4"
video_info = sv.VideoInfo.from_video_path(input_video)
# initiate annotators
box_annotator = sv.BoxAnnotator()
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print(i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator()
labels = [f"Person-{detection[1]:0.2f}%" for detection in detections]
frame = box_annotator.annotate(scene=frame, detections=detections, labels=labels)
return frame
sv.process_video(
source_path=input_video, target_path=output_video, callback=process_frame
)
# # All fine above it
HOME = "/kaggle/working/"
# workaround related to https://github.com/roboflow/notebooks/issues/80
# workaround related to https://github.com/roboflow/notebooks/issues/112
from IPython import display
display.clear_output()
import sys
sys.path.append(f"{HOME}/ByteTrack")
import yolox
print("yolox.__version__:", yolox.__version__)
from yolox.tracker.byte_tracker import BYTETracker, STrack
from onemetric.cv.utils.iou import box_iou_batch
from dataclasses import dataclass
@dataclass(frozen=True)
class BYTETrackerArgs:
track_thresh: float = 0.25
track_buffer: int = 30
match_thresh: float = 0.8
aspect_ratio_thresh: float = 3.0
min_box_area: float = 1.0
mot20: bool = False
from IPython import display
display.clear_output()
import supervision
print("supervision.__version__:", supervision.__version__)
from supervision.draw.color import ColorPalette
from supervision.geometry.dataclasses import Point
from supervision.video.dataclasses import VideoInfo
from supervision.video.source import get_video_frames_generator
from supervision.video.sink import VideoSink
from supervision.notebook.utils import show_frame_in_notebook
from supervision.tools.detections import Detections, BoxAnnotator
from supervision.tools.line_counter import LineCounter, LineCounterAnnotator
from typing import List
import numpy as np
# converts Detections into format that can be consumed by match_detections_with_tracks function
def detections2boxes(detections: Detections) -> np.ndarray:
return np.hstack((detections.xyxy, detections.confidence[:, np.newaxis]))
# converts List[STrack] into format that can be consumed by match_detections_with_tracks function
def tracks2boxes(tracks: List[STrack]) -> np.ndarray:
return np.array([track.tlbr for track in tracks], dtype=float)
# matches our bounding boxes with predictions
def match_detections_with_tracks(
detections: Detections, tracks: List[STrack]
) -> Detections:
if not np.any(detections.xyxy) or len(tracks) == 0:
return np.empty((0,))
tracks_boxes = tracks2boxes(tracks=tracks)
iou = box_iou_batch(tracks_boxes, detections.xyxy)
track2detection = np.argmax(iou, axis=1)
tracker_ids = [None] * len(detections)
for tracker_index, detection_index in enumerate(track2detection):
if iou[tracker_index, detection_index] != 0:
tracker_ids[detection_index] = tracks[tracker_index].track_id
return tracker_ids
# Wokring copy of video
import numpy as np
import supervision as sv
input_video = "/kaggle/working/video5.mp4"
output_video = "/kaggle/working/full-video-output.mp4"
import numpy as np
import supervision as sv
video_info = sv.VideoInfo.from_video_path(input_video)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
print("Frame processed :", end=" ")
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print(i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections)
return frame
sv.process_video(
source_path=input_video, target_path=output_video, callback=process_frame
)
# # Segmentation
# #### On a single frame
im2 = cv2.imread("/kaggle/working/input2.jpg")
from detectron2.engine import DefaultPredictor
class PersonPredictor(DefaultPredictor):
def __init__(self, cfg):
super().__init__(cfg)
self.category_mapper = self.metadata.get("thing_dataset_id_to_contiguous_id")
def __call__(self, original_image):
outputs = super().__call__(original_image)
instances = outputs["instances"]
person_instances = instances[instances.pred_classes == 0]
outputs["instances"] = person_instances
return outputs
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import torch
from detectron2.structures import Boxes
import detectron2.structures.boxes as box_ops
from detectron2.structures import Boxes, Instances
import math
def get_persons_objects(instances):
pred_classes = instances.pred_classes
pred_boxes = instances.pred_boxes
pred_scores = instances.scores
new_boxes = Boxes(torch.tensor([]))
new_classes = torch.tensor([])
new_scores = torch.tensor([])
for i, t in enumerate(pred_classes):
if t.item() == 0:
new_classes = torch.cat((new_classes, t.unsqueeze(0).to("cpu:0")))
new_boxes = Boxes.cat((new_boxes, pred_boxes[i].to("cpu:0")))
new_scores = torch.cat(
(new_scores, pred_scores[i].unsqueeze(0).to("cpu:0"))
)
pred_classes = new_classes
pred_boxes = new_boxes
scores = new_scores
return pred_classes, pred_boxes, scores
cfg = get_cfg()
cfg.merge_from_file(
model_zoo.get_config_file("COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml")
)
cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(
"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml"
)
predictor = DefaultPredictor(cfg)
panoptic_seg, segments_info = predictor(im2)["panoptic_seg"]
v = Visualizer(im2[:, :, ::-1], MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), scale=1.2)
out = v.draw_panoptic_seg_predictions(panoptic_seg.to("cpu"), segments_info)
plt.figure(figsize=(25, 15))
plt.imshow(out.get_image()[:, :, ::-1][..., ::-1])
SUBWAY_VIDEO_PATH = "/kaggle/working/video-clip-new.mp4"
import numpy as np
import supervision as sv
# initiate polygon zone
polygon = np.array(
[
[300, 600],
[1600, 600],
[300, 600],
[1600, 600],
]
)
video_info = sv.VideoInfo.from_video_path(SUBWAY_VIDEO_PATH)
zone = sv.PolygonZone(polygon=polygon, frame_resolution_wh=video_info.resolution_wh)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
zone_annotator = sv.PolygonZoneAnnotator(
zone=zone, color=sv.Color.white(), thickness=6, text_thickness=6, text_scale=4
)
def process_frame(frame: np.ndarray, i: int) -> np.ndarray:
print("frame", i)
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
zone.trigger(detections=detections)
# annotate
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections, skip_label=True)
frame = zone_annotator.annotate(scene=frame)
return frame
sv.process_video(
source_path=SUBWAY_VIDEO_PATH,
target_path=f"/kaggle/working/video-clip-new-output.mp4",
callback=process_frame,
)
# from IPython import display
# display.clear_output()
print("ap")
# initiate polygon zone
polygon = np.array(
[
[300, 600],
[1600, 600],
[300, 600],
[1600, 600],
]
)
video_info = sv.VideoInfo.from_video_path(SUBWAY_VIDEO_PATH)
zone = sv.PolygonZone(polygon=polygon, frame_resolution_wh=video_info.resolution_wh)
# initiate annotators
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
zone_annotator = sv.PolygonZoneAnnotator(
zone=zone, color=sv.Color.white(), thickness=6, text_thickness=6, text_scale=4
)
# extract video frame
generator = sv.get_video_frames_generator(SUBWAY_VIDEO_PATH)
iterator = iter(generator)
frame = next(iterator)
# detect
outputs = predictor(frame)
detections = sv.Detections(
xyxy=outputs["instances"].pred_boxes.tensor.cpu().numpy(),
confidence=outputs["instances"].scores.cpu().numpy(),
class_id=outputs["instances"].pred_classes.cpu().numpy().astype(int),
)
detections = detections[detections.class_id == 0]
zone.trigger(detections=detections)
# annotate
box_annotator = sv.BoxAnnotator(thickness=4, text_thickness=4, text_scale=2)
frame = box_annotator.annotate(scene=frame, detections=detections, skip_label=True)
frame = zone_annotator.annotate(scene=frame)
sv.show_frame_in_notebook(frame, (16, 16))
| false | 0 | 4,448 | 0 | 4,448 | 4,448 |
||
129200830
|
<jupyter_start><jupyter_text>🎹 Spotify Tracks Dataset
# Content
This is a dataset of Spotify tracks over a range of **125** different genres. Each track has some audio features associated with it. The data is in `CSV` format which is tabular and can be loaded quickly.
# Usage
The dataset can be used for:
- Building a **Recommendation System** based on some user input or preference
- **Classification** purposes based on audio features and available genres
- Any other application that you can think of. Feel free to discuss!
# Column Description
- **track_id**: The Spotify ID for the track
- **artists**: The artists' names who performed the track. If there is more than one artist, they are separated by a `;`
- **album_name**: The album name in which the track appears
- **track_name**: Name of the track
- **popularity**: **The popularity of a track is a value between 0 and 100, with 100 being the most popular**. The popularity is calculated by algorithm and is based, in the most part, on the total number of plays the track has had and how recent those plays are. Generally speaking, songs that are being played a lot now will have a higher popularity than songs that were played a lot in the past. Duplicate tracks (e.g. the same track from a single and an album) are rated independently. Artist and album popularity is derived mathematically from track popularity.
- **duration_ms**: The track length in milliseconds
- **explicit**: Whether or not the track has explicit lyrics (true = yes it does; false = no it does not OR unknown)
- **danceability**: Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable
- **energy**: Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale
- **key**: The key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. `0 = C`, `1 = C♯/D♭`, `2 = D`, and so on. If no key was detected, the value is -1
- **loudness**: The overall loudness of a track in decibels (dB)
- **mode**: Mode indicates the modality (major or minor) of a track, the type of scale from which its melodic content is derived. Major is represented by 1 and minor is 0
- **speechiness**: Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks
- **acousticness**: A confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic
- **instrumentalness**: Predicts whether a track contains no vocals. "Ooh" and "aah" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly "vocal". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content
- **liveness**: Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live
- **valence**: A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry)
- **tempo**: The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration
- **time_signature**: An estimated time signature. The time signature (meter) is a notational convention to specify how many beats are in each bar (or measure). The time signature ranges from 3 to 7 indicating time signatures of `3/4`, to `7/4`.
- **track_genre**: The genre in which the track belongs
# Acknowledgement
Image credits: [BPR world](https://www.bprworld.com/news/spotify-vs-radio-the-battle-continues/)
Kaggle dataset identifier: spotify-tracks-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
spt_music = pd.read_csv("/kaggle/input/-spotify-tracks-dataset/dataset.csv")
spt_music.describe()
spt_music_a = spt_music[
["popularity", "duration_ms", "tempo", "key"]
] # filtramos las columnas que no son "medidas subjetivas", falta revisar
popul = spt_music_a.sort_values("popularity")
popul
popul.head(
100
) # key???, pero no eran 0, 1?? [algunas ideas: qué tempos son más populares, qué duración es la más popular...?]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/200/129200830.ipynb
|
spotify-tracks-dataset
|
maharshipandya
|
[{"Id": 129200830, "ScriptId": 37797898, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8895412, "CreationDate": "05/11/2023 19:05:34", "VersionNumber": 3.0, "Title": "Quantifying_Music", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 17.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 8.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185034542, "KernelVersionId": 129200830, "SourceDatasetVersionId": 4372070}]
|
[{"Id": 4372070, "DatasetId": 2570056, "DatasourceVersionId": 4430808, "CreatorUserId": 7899307, "LicenseName": "Database: Open Database, Contents: \u00a9 Original Authors", "CreationDate": "10/22/2022 14:40:15", "VersionNumber": 1.0, "Title": "\ud83c\udfb9 Spotify Tracks Dataset", "Slug": "spotify-tracks-dataset", "Subtitle": "A dataset of Spotify songs with different genres and their audio features", "Description": "# Content\n\nThis is a dataset of Spotify tracks over a range of **125** different genres. Each track has some audio features associated with it. The data is in `CSV` format which is tabular and can be loaded quickly.\n\n# Usage\n\nThe dataset can be used for:\n\n- Building a **Recommendation System** based on some user input or preference\n- **Classification** purposes based on audio features and available genres\n- Any other application that you can think of. Feel free to discuss!\n\n# Column Description\n\n- **track_id**: The Spotify ID for the track\n- **artists**: The artists' names who performed the track. If there is more than one artist, they are separated by a `;`\n- **album_name**: The album name in which the track appears\n- **track_name**: Name of the track\n- **popularity**: **The popularity of a track is a value between 0 and 100, with 100 being the most popular**. The popularity is calculated by algorithm and is based, in the most part, on the total number of plays the track has had and how recent those plays are. Generally speaking, songs that are being played a lot now will have a higher popularity than songs that were played a lot in the past. Duplicate tracks (e.g. the same track from a single and an album) are rated independently. Artist and album popularity is derived mathematically from track popularity.\n- **duration_ms**: The track length in milliseconds\n- **explicit**: Whether or not the track has explicit lyrics (true = yes it does; false = no it does not OR unknown)\n- **danceability**: Danceability describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable\n- **energy**: Energy is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale\n- **key**: The key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. `0 = C`, `1 = C\u266f/D\u266d`, `2 = D`, and so on. If no key was detected, the value is -1\n- **loudness**: The overall loudness of a track in decibels (dB)\n- **mode**: Mode indicates the modality (major or minor) of a track, the type of scale from which its melodic content is derived. Major is represented by 1 and minor is 0\n- **speechiness**: Speechiness detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks\n- **acousticness**: A confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic\n- **instrumentalness**: Predicts whether a track contains no vocals. \"Ooh\" and \"aah\" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly \"vocal\". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content\n- **liveness**: Detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live\n- **valence**: A measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry)\n- **tempo**: The overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration\n- **time_signature**: An estimated time signature. The time signature (meter) is a notational convention to specify how many beats are in each bar (or measure). The time signature ranges from 3 to 7 indicating time signatures of `3/4`, to `7/4`.\n- **track_genre**: The genre in which the track belongs\n\n# Acknowledgement\n\nImage credits: [BPR world](https://www.bprworld.com/news/spotify-vs-radio-the-battle-continues/)", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2570056, "CreatorUserId": 7899307, "OwnerUserId": 7899307.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4372070.0, "CurrentDatasourceVersionId": 4430808.0, "ForumId": 2599627, "Type": 2, "CreationDate": "10/22/2022 14:40:15", "LastActivityDate": "10/22/2022", "TotalViews": 74784, "TotalDownloads": 11612, "TotalVotes": 194, "TotalKernels": 29}]
|
[{"Id": 7899307, "UserName": "maharshipandya", "DisplayName": "MaharshiPandya", "RegisterDate": "07/14/2021", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
spt_music = pd.read_csv("/kaggle/input/-spotify-tracks-dataset/dataset.csv")
spt_music.describe()
spt_music_a = spt_music[
["popularity", "duration_ms", "tempo", "key"]
] # filtramos las columnas que no son "medidas subjetivas", falta revisar
popul = spt_music_a.sort_values("popularity")
popul
popul.head(
100
) # key???, pero no eran 0, 1?? [algunas ideas: qué tempos son más populares, qué duración es la más popular...?]
| false | 1 | 201 | 0 | 1,395 | 201 |
||
129200354
|
import warnings
warnings.filterwarnings("ignore")
from datasets import load_dataset
dataset = load_dataset("few_rel", "default")
dataset
dataset["train_wiki"][0]
import numpy as np
def train_val_test(dataset):
indexes = np.arange(0, len(dataset["train_wiki"]), 1)
train = []
val = []
test = []
while len(indexes) > 0:
instance = np.random.randint(0, len(indexes))
if len(train) == int(len(dataset["train_wiki"]) * 0.6):
if len(val) == int(len(dataset["train_wiki"]) * 0.2):
test.append(instance)
else:
val.append(instance)
else:
train.append(instance)
indexes = np.delete(indexes, instance)
train = np.sort(train)
val = np.sort(val)
test = np.sort(test)
return train, val, test
train, val, test = train_val_test(dataset)
print("Train instances: ", len(train))
print("Validation instances: ", len(val))
print("Test instances: ", len(test))
def pairs_with_markers(instance, conjunto):
new_instances = []
for i in conjunto:
i = int(i)
# Definir y ordenar los marcadores por orden de aparición
markers = [
("[E1]", instance["train_wiki"][i]["head"]["indices"][0][0]),
("[/E1]", instance["train_wiki"][i]["head"]["indices"][0][-1] + 1),
("[E2]", instance["train_wiki"][i]["tail"]["indices"][0][0]),
("[/E2]", instance["train_wiki"][i]["tail"]["indices"][0][-1] + 1),
]
markers = sorted(markers, key=lambda x: x[-1], reverse=True)
# Añade el texto de los marcadores de entidad.
tokens = instance["train_wiki"][i]["tokens"]
for marker, idx in markers:
tokens.insert(idx, marker)
# Añade el nuevo ejemplo a la lista.
new_instances.append(
{
"text": " ".join(tokens),
"head": instance["train_wiki"][i]["head"]["type"],
"tail": instance["train_wiki"][i]["tail"]["type"],
"label": instance["train_wiki"][i]["relation"],
}
)
return new_instances
train_data = pairs_with_markers(dataset, train)
val_data = pairs_with_markers(dataset, val)
test_data = pairs_with_markers(dataset, test)
print(train_data[0])
from datasets import Dataset, Value, ClassLabel, Sequence, Features
label_names = sorted(
list(set([inst["label"] for inst in train_data + val_data + test_data]))
)
label_to_id = {label: i for i, label in enumerate(label_names)}
num_labels = len(label_names)
features = Features(
{
"text": Value(dtype="string", id=None),
"head": Value(dtype="string", id=None),
"tail": Value(dtype="string", id=None),
"label": ClassLabel(names=label_names, id=None),
}
)
features
train_dataset = Dataset.from_list(train_data, features=features, split="train")
val_dataset = Dataset.from_list(val_data, features=features, split="dev")
from transformers.models.xlm_roberta.modeling_xlm_roberta import (
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
from transformers.modeling_outputs import SequenceClassifierOutput
from typing import Optional, Union, Tuple
import torch.nn as nn
import torch
class XLMRobertaForEntityPairClassification(XLMRobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout
if config.classifier_dropout is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
head_pos: Optional[torch.LongTensor] = None,
tail_pos: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
head_pos = (
head_pos.unsqueeze(dim=-1).repeat(1, sequence_output.size(-1)).unsqueeze(1)
)
tail_pos = (
tail_pos.unsqueeze(dim=-1).repeat(1, sequence_output.size(-1)).unsqueeze(1)
)
### Introduce tu código ###
h_head = sequence_output.gather(dim=1, index=head_pos).squeeze(1)
h_tail = sequence_output.gather(dim=1, index=tail_pos).squeeze(1)
entity_pair_repr = torch.cat([h_head, h_tail], dim=-1)
entity_pair_repr = self.dropout(entity_pair_repr)
entity_pair_repr = self.dense(entity_pair_repr)
entity_pair_repr = torch.tanh(entity_pair_repr)
#######################
entity_pair_repr = self.dropout(entity_pair_repr)
logits = self.out_proj(entity_pair_repr)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# @title Configuración del tokenizador
model_name = "xlm-roberta-base" # @param {type:"string"}
use_fast_tokenizer = True # @param {type:"boolean"}
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_name,
use_fast=True,
add_prefix_space=True,
)
tokenizer.add_tokens(["[E1]", "[/E1]", "[E2]", "[/E2]"])
head_token_id, tail_token_id = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"])
def preprocess_function(examples):
# Tokenize the texts
result = tokenizer(examples["text"], padding=False, truncation=True)
result["head_pos"] = [
next((i for i, token in enumerate(tokens) if token == head_token_id), -1)
for tokens in result["input_ids"]
]
result["tail_pos"] = [
next((i for i, token in enumerate(tokens) if token == tail_token_id), -1)
for tokens in result["input_ids"]
]
result["label"] = examples["label"]
return result
print(preprocess_function(val_dataset[:1]))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on train dataset",
)
val_dataset = val_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on validation dataset",
)
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
# @title Configuración de modelo
# model_name = "xlm-roberta-base" #@param {type:"string"}
# use_fast_tokenizer = True #@param {type:"boolean"}
from transformers import AutoConfig, AutoModelForSequenceClassification
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels)
# tokenizer = AutoTokenizer.from_pretrained(
# model_name,
# use_fast=True,
# add_prefix_space=True,
# )
# tokenizer.add_tokens(['[E1]', '[/E1]', '[E2]', '[/E2]'])
model = XLMRobertaForEntityPairClassification.from_pretrained(model_name, config=config)
model.resize_token_embeddings(len(tokenizer))
# Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_names)}
model.config.id2label = {i: l for i, l in enumerate(label_names)}
# head_token_id, tail_token_id = tokenizer.convert_tokens_to_ids(['[E1]', '[E2]'])
model.config.head_token = "[E1]"
model.config.head_token_id = head_token_id
model.config.tail_token = "[E2]"
model.config.tail_token_id = tail_token_id
config = model.config
from sklearn.metrics import precision_recall_fscore_support
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=-1)
positive_labels = [
i for i, label in enumerate(label_names) if label != "no_relation"
]
precision, recall, f1, _ = precision_recall_fscore_support(
labels, predictions, labels=positive_labels, average="micro"
)
return {"precision": precision, "recall": recall, "f1": f1}
# @title Definición de una instancia del Trainer
learning_rate = 1e-5 # @param {type:"number"}
batch_size = 32 # @param {type:"integer"}
gradient_accumulation_steps = 1 # @param {type:"integer"}
epochs = 3 # @param {type:"integer"}
scheduler_type = "constant" # @param {type:"string"}
weight_decay = 0.01 # @param {type: "number"}
evaluation_strategy = "steps" # @param {type: "string"}
metric_for_best_model = "f1" # @param {type: "string"}
seed = 42 # @param {type: "integer"}
fp16 = True # @param {type: "boolean"}
from transformers import Trainer, TrainingArguments
train_args = TrainingArguments(
do_train=True,
do_eval=True,
evaluation_strategy=evaluation_strategy,
save_strategy=evaluation_strategy,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=learning_rate,
weight_decay=weight_decay,
num_train_epochs=epochs,
lr_scheduler_type=scheduler_type,
seed=seed,
fp16=fp16,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model=metric_for_best_model,
output_dir="tmp/",
)
trainer = Trainer(
model=model,
args=train_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
train_result = trainer.train()
metrics = train_result.metrics
test_dataset = Dataset.from_list(test_data, features=features, split="test")
test_dataset = test_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on test dataset",
)
test_results = trainer.evaluate(test_dataset)
test_results
from tabulate import tabulate
# Obtener predicciones del conjunto de datos de desarrollo
val_predictions = trainer.predict(val_dataset)
# Calcula la precisión, recall y F1 para cada clase
scores = precision_recall_fscore_support(
val_predictions.label_ids, val_predictions.predictions.argmax(-1), average=None
)
# Mostrar tabla (ordenada por F1)
table = tabulate(
sorted(list(zip(label_names, *scores)), key=lambda x: x[-2], reverse=True),
headers=["Class", "Precision", "Recall", "F1", "Freq"],
tablefmt="orgtbl",
)
print(table)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(val_predictions.label_ids, val_predictions.predictions.argmax(-1))
table = tabulate(
list(zip(label_names, *cm.T.tolist())),
headers=["", *label_names],
tablefmt="orgtbl",
)
print(table)
# 1. Ajunstar el train para quedarse con las 16 clases del val, y luego dividir el train en train y val, dejando el primer val como test.
# 2. Hacer entity-marker como en el lab
# 3. Medir métricas
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/200/129200354.ipynb
| null | null |
[{"Id": 129200354, "ScriptId": 38026999, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12126094, "CreationDate": "05/11/2023 18:59:59", "VersionNumber": 3.0, "Title": "Extracci\u00f3n de Relaciones MDT", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 405.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 405.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import warnings
warnings.filterwarnings("ignore")
from datasets import load_dataset
dataset = load_dataset("few_rel", "default")
dataset
dataset["train_wiki"][0]
import numpy as np
def train_val_test(dataset):
indexes = np.arange(0, len(dataset["train_wiki"]), 1)
train = []
val = []
test = []
while len(indexes) > 0:
instance = np.random.randint(0, len(indexes))
if len(train) == int(len(dataset["train_wiki"]) * 0.6):
if len(val) == int(len(dataset["train_wiki"]) * 0.2):
test.append(instance)
else:
val.append(instance)
else:
train.append(instance)
indexes = np.delete(indexes, instance)
train = np.sort(train)
val = np.sort(val)
test = np.sort(test)
return train, val, test
train, val, test = train_val_test(dataset)
print("Train instances: ", len(train))
print("Validation instances: ", len(val))
print("Test instances: ", len(test))
def pairs_with_markers(instance, conjunto):
new_instances = []
for i in conjunto:
i = int(i)
# Definir y ordenar los marcadores por orden de aparición
markers = [
("[E1]", instance["train_wiki"][i]["head"]["indices"][0][0]),
("[/E1]", instance["train_wiki"][i]["head"]["indices"][0][-1] + 1),
("[E2]", instance["train_wiki"][i]["tail"]["indices"][0][0]),
("[/E2]", instance["train_wiki"][i]["tail"]["indices"][0][-1] + 1),
]
markers = sorted(markers, key=lambda x: x[-1], reverse=True)
# Añade el texto de los marcadores de entidad.
tokens = instance["train_wiki"][i]["tokens"]
for marker, idx in markers:
tokens.insert(idx, marker)
# Añade el nuevo ejemplo a la lista.
new_instances.append(
{
"text": " ".join(tokens),
"head": instance["train_wiki"][i]["head"]["type"],
"tail": instance["train_wiki"][i]["tail"]["type"],
"label": instance["train_wiki"][i]["relation"],
}
)
return new_instances
train_data = pairs_with_markers(dataset, train)
val_data = pairs_with_markers(dataset, val)
test_data = pairs_with_markers(dataset, test)
print(train_data[0])
from datasets import Dataset, Value, ClassLabel, Sequence, Features
label_names = sorted(
list(set([inst["label"] for inst in train_data + val_data + test_data]))
)
label_to_id = {label: i for i, label in enumerate(label_names)}
num_labels = len(label_names)
features = Features(
{
"text": Value(dtype="string", id=None),
"head": Value(dtype="string", id=None),
"tail": Value(dtype="string", id=None),
"label": ClassLabel(names=label_names, id=None),
}
)
features
train_dataset = Dataset.from_list(train_data, features=features, split="train")
val_dataset = Dataset.from_list(val_data, features=features, split="dev")
from transformers.models.xlm_roberta.modeling_xlm_roberta import (
XLMRobertaModel,
XLMRobertaPreTrainedModel,
)
from transformers.modeling_outputs import SequenceClassifierOutput
from typing import Optional, Union, Tuple
import torch.nn as nn
import torch
class XLMRobertaForEntityPairClassification(XLMRobertaPreTrainedModel):
def __init__(self, config):
super().__init__(config)
self.num_labels = config.num_labels
self.config = config
self.roberta = XLMRobertaModel(config, add_pooling_layer=False)
self.dense = nn.Linear(2 * config.hidden_size, config.hidden_size)
classifier_dropout = (
config.classifier_dropout
if config.classifier_dropout is not None
else config.hidden_dropout_prob
)
self.dropout = nn.Dropout(classifier_dropout)
self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
# Initialize weights and apply final processing
self.post_init()
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
head_pos: Optional[torch.LongTensor] = None,
tail_pos: Optional[torch.LongTensor] = None,
attention_mask: Optional[torch.FloatTensor] = None,
token_type_ids: Optional[torch.LongTensor] = None,
position_ids: Optional[torch.LongTensor] = None,
head_mask: Optional[torch.FloatTensor] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
labels: Optional[torch.LongTensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
) -> Union[Tuple, SequenceClassifierOutput]:
r"""
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
return_dict = (
return_dict if return_dict is not None else self.config.use_return_dict
)
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
sequence_output = outputs[0]
head_pos = (
head_pos.unsqueeze(dim=-1).repeat(1, sequence_output.size(-1)).unsqueeze(1)
)
tail_pos = (
tail_pos.unsqueeze(dim=-1).repeat(1, sequence_output.size(-1)).unsqueeze(1)
)
### Introduce tu código ###
h_head = sequence_output.gather(dim=1, index=head_pos).squeeze(1)
h_tail = sequence_output.gather(dim=1, index=tail_pos).squeeze(1)
entity_pair_repr = torch.cat([h_head, h_tail], dim=-1)
entity_pair_repr = self.dropout(entity_pair_repr)
entity_pair_repr = self.dense(entity_pair_repr)
entity_pair_repr = torch.tanh(entity_pair_repr)
#######################
entity_pair_repr = self.dropout(entity_pair_repr)
logits = self.out_proj(entity_pair_repr)
loss = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
self.config.problem_type = "regression"
elif self.num_labels > 1 and (
labels.dtype == torch.long or labels.dtype == torch.int
):
self.config.problem_type = "single_label_classification"
else:
self.config.problem_type = "multi_label_classification"
if self.config.problem_type == "regression":
loss_fct = nn.MSELoss()
if self.num_labels == 1:
loss = loss_fct(logits.squeeze(), labels.squeeze())
else:
loss = loss_fct(logits, labels)
elif self.config.problem_type == "single_label_classification":
loss_fct = nn.CrossEntropyLoss()
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
elif self.config.problem_type == "multi_label_classification":
loss_fct = nn.BCEWithLogitsLoss()
loss = loss_fct(logits, labels)
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# @title Configuración del tokenizador
model_name = "xlm-roberta-base" # @param {type:"string"}
use_fast_tokenizer = True # @param {type:"boolean"}
from transformers import AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
model_name,
use_fast=True,
add_prefix_space=True,
)
tokenizer.add_tokens(["[E1]", "[/E1]", "[E2]", "[/E2]"])
head_token_id, tail_token_id = tokenizer.convert_tokens_to_ids(["[E1]", "[E2]"])
def preprocess_function(examples):
# Tokenize the texts
result = tokenizer(examples["text"], padding=False, truncation=True)
result["head_pos"] = [
next((i for i, token in enumerate(tokens) if token == head_token_id), -1)
for tokens in result["input_ids"]
]
result["tail_pos"] = [
next((i for i, token in enumerate(tokens) if token == tail_token_id), -1)
for tokens in result["input_ids"]
]
result["label"] = examples["label"]
return result
print(preprocess_function(val_dataset[:1]))
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on train dataset",
)
val_dataset = val_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on validation dataset",
)
from transformers import DataCollatorWithPadding
data_collator = DataCollatorWithPadding(tokenizer, pad_to_multiple_of=8)
# @title Configuración de modelo
# model_name = "xlm-roberta-base" #@param {type:"string"}
# use_fast_tokenizer = True #@param {type:"boolean"}
from transformers import AutoConfig, AutoModelForSequenceClassification
config = AutoConfig.from_pretrained(model_name, num_labels=num_labels)
# tokenizer = AutoTokenizer.from_pretrained(
# model_name,
# use_fast=True,
# add_prefix_space=True,
# )
# tokenizer.add_tokens(['[E1]', '[/E1]', '[E2]', '[/E2]'])
model = XLMRobertaForEntityPairClassification.from_pretrained(model_name, config=config)
model.resize_token_embeddings(len(tokenizer))
# Set the correspondences label/ID inside the model config
model.config.label2id = {l: i for i, l in enumerate(label_names)}
model.config.id2label = {i: l for i, l in enumerate(label_names)}
# head_token_id, tail_token_id = tokenizer.convert_tokens_to_ids(['[E1]', '[E2]'])
model.config.head_token = "[E1]"
model.config.head_token_id = head_token_id
model.config.tail_token = "[E2]"
model.config.tail_token_id = tail_token_id
config = model.config
from sklearn.metrics import precision_recall_fscore_support
def compute_metrics(p):
predictions, labels = p
predictions = np.argmax(predictions, axis=-1)
positive_labels = [
i for i, label in enumerate(label_names) if label != "no_relation"
]
precision, recall, f1, _ = precision_recall_fscore_support(
labels, predictions, labels=positive_labels, average="micro"
)
return {"precision": precision, "recall": recall, "f1": f1}
# @title Definición de una instancia del Trainer
learning_rate = 1e-5 # @param {type:"number"}
batch_size = 32 # @param {type:"integer"}
gradient_accumulation_steps = 1 # @param {type:"integer"}
epochs = 3 # @param {type:"integer"}
scheduler_type = "constant" # @param {type:"string"}
weight_decay = 0.01 # @param {type: "number"}
evaluation_strategy = "steps" # @param {type: "string"}
metric_for_best_model = "f1" # @param {type: "string"}
seed = 42 # @param {type: "integer"}
fp16 = True # @param {type: "boolean"}
from transformers import Trainer, TrainingArguments
train_args = TrainingArguments(
do_train=True,
do_eval=True,
evaluation_strategy=evaluation_strategy,
save_strategy=evaluation_strategy,
per_device_train_batch_size=batch_size,
per_device_eval_batch_size=batch_size,
gradient_accumulation_steps=gradient_accumulation_steps,
learning_rate=learning_rate,
weight_decay=weight_decay,
num_train_epochs=epochs,
lr_scheduler_type=scheduler_type,
seed=seed,
fp16=fp16,
load_best_model_at_end=True,
save_total_limit=1,
metric_for_best_model=metric_for_best_model,
output_dir="tmp/",
)
trainer = Trainer(
model=model,
args=train_args,
train_dataset=train_dataset,
eval_dataset=val_dataset,
tokenizer=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
train_result = trainer.train()
metrics = train_result.metrics
test_dataset = Dataset.from_list(test_data, features=features, split="test")
test_dataset = test_dataset.map(
preprocess_function,
batched=True,
desc="Running tokenizer on test dataset",
)
test_results = trainer.evaluate(test_dataset)
test_results
from tabulate import tabulate
# Obtener predicciones del conjunto de datos de desarrollo
val_predictions = trainer.predict(val_dataset)
# Calcula la precisión, recall y F1 para cada clase
scores = precision_recall_fscore_support(
val_predictions.label_ids, val_predictions.predictions.argmax(-1), average=None
)
# Mostrar tabla (ordenada por F1)
table = tabulate(
sorted(list(zip(label_names, *scores)), key=lambda x: x[-2], reverse=True),
headers=["Class", "Precision", "Recall", "F1", "Freq"],
tablefmt="orgtbl",
)
print(table)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(val_predictions.label_ids, val_predictions.predictions.argmax(-1))
table = tabulate(
list(zip(label_names, *cm.T.tolist())),
headers=["", *label_names],
tablefmt="orgtbl",
)
print(table)
# 1. Ajunstar el train para quedarse con las 16 clases del val, y luego dividir el train en train y val, dejando el primer val como test.
# 2. Hacer entity-marker como en el lab
# 3. Medir métricas
| false | 0 | 3,863 | 0 | 3,863 | 3,863 |
||
129200350
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
veriler = pd.read_csv("maaslar.csv")
x = veriler.iloc[:, 1:2]
y = veriler.iloc[:, 2:]
X = x.values
Y = y.values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, Y)
plt.scatter(X, Y, color="red")
plt.plot(x, lin_reg.predict(X), color="blue")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=2)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly, y)
plt.scatter(X, Y, color="red")
plt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), color="blue")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly, y)
plt.scatter(X, Y, color="red")
plt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), color="blue")
plt.show()
print(lin_reg.predict([[11]]))
print(lin_reg.predict([[6.6]]))
print(lin_reg2.predict(poly_reg.fit_transform([[6.6]])))
print(lin_reg2.predict(poly_reg.fit_transform([[11]])))
from sklearn.preprocessing import StandardScaler
sc1 = StandardScaler()
x_olcekli = sc1.fit_transform(X)
sc2 = StandardScaler()
y_olcekli = np.ravel(sc2.fit_transform(Y.reshape(-1, 1)))
from sklearn.svm import SVR
svr_reg = SVR(kernel="rbf")
svr_reg.fit(x_olcekli, y_olcekli)
plt.scatter(x_olcekli, y_olcekli, color="red")
plt.plot(x_olcekli, svr_reg.predict(x_olcekli), color="blue")
plt.show()
print(svr_reg.predict([[11]]))
print(svr_reg.predict([[6.6]]))
from sklearn.tree import DecisionTreeRegressor
r_dt = DecisionTreeRegressor(random_state=0)
r_dt.fit(X, Y)
Z = X + 0.5
K = X - 0.4
plt.scatter(X, Y, color="red")
plt.plot(x, r_dt.predict(X), color="blue")
plt.plot(x, r_dt.predict(Z), color="green")
plt.plot(x, r_dt.predict(K), color="yellow")
plt.show()
print(r_dt.predict([[11]]))
print(r_dt.predict([[6.6]]))
from sklearn.ensemble import RandomForestRegressor
rf_reg = RandomForestRegressor(n_estimators=10, random_state=0)
rf_reg.fit(X, Y.ravel())
print(rf_reg.predict([[6.6]]))
plt.scatter(X, Y, color="red")
plt.plot(X, rf_reg.predict(X), color="blue")
plt.plot(X, rf_reg.predict(Z), color="green")
plt.plot(x, r_dt.predict(K), color="yellow")
# # ornek2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pydot
kyphosis_df = pd.read_csv("Kyphosis_Data.csv")
kyphosis_df.head()
kyphosis_df.shape
kyphosis_df.columns
kyphosis_df.select_dtypes(include="number").columns
kyphosis_df.select_dtypes(exclude="number").columns
kyphosis_df.dtypes
kyphosis_df.info()
kyphosis_df.describe()
kyphosis_df.describe(exclude="number")
kyphosis_df["Kyphosis"].describe()
kyphosis_df["Kyphosis"].nunique()
kyphosis_df["Kyphosis"].value_counts()
kyphosis_df["Kyphosis"].value_counts(normalize=True, dropna=False)
sns.set_style("whitegrid")
plt.figure(figsize=(12, 8))
sns.countplot(x="Kyphosis", data=kyphosis_df)
plt.show()
kyphosis_df["Kyphosis"].value_counts()
kyphosis_df["Kyphosis"].value_counts(normalize=True, dropna=False)
kyphosis_df[["Age", "Number", "Start"]].plot(
kind="hist", subplots=True, layout=(1, 3), figsize=(16, 8)
)
plt.show()
plt.figure(figsize=(12, 8))
sns.pairplot(data=kyphosis_df, hue="Kyphosis", palette="Dark2")
plt.show()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Age"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Age"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Age", bins=30)
plt.show()
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == "present"].mean()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Number"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Number"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Number", bins=30)
plt.show()
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == "present"].mean()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Start"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Start"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Start", bins=30)
plt.show()
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == "present"].mean()
kyphosis_df.corr()
plt.figure(figsize=(12, 8))
ax = sns.heatmap(data=kyphosis_df.corr(), annot=True, cmap="coolwarm")
plt.ylim(3, 0)
plt.show()
plt.figure(figsize=(12, 8))
plt.subplot(1, 3, 1)
sns.boxplot(x="Kyphosis", y="Age", data=kyphosis_df)
plt.subplot(1, 3, 2)
sns.boxplot(x="Kyphosis", y="Number", data=kyphosis_df)
plt.subplot(1, 3, 3)
sns.boxplot(x="Kyphosis", y="Start", data=kyphosis_df)
plt.show()
kyphosis_df.columns
X = kyphosis_df.drop("Kyphosis", axis=1)
y = kyphosis_df["Kyphosis"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=101
)
X_train.shape
y_train.shape
X_test.shape
y_test.shape
from sklearn.ensemble import RandomForestClassifier
rfc_model = RandomForestClassifier(n_estimators=200)
rfc_model.fit(X_train, y_train)
predictions = rfc_model.predict(X_test)
predictions
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/200/129200350.ipynb
| null | null |
[{"Id": 129200350, "ScriptId": 38410891, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11781124, "CreationDate": "05/11/2023 18:59:56", "VersionNumber": 1.0, "Title": "hafta10", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 295.0, "LinesInsertedFromPrevious": 295.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
veriler = pd.read_csv("maaslar.csv")
x = veriler.iloc[:, 1:2]
y = veriler.iloc[:, 2:]
X = x.values
Y = y.values
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X, Y)
plt.scatter(X, Y, color="red")
plt.plot(x, lin_reg.predict(X), color="blue")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=2)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly, y)
plt.scatter(X, Y, color="red")
plt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), color="blue")
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree=4)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly, y)
plt.scatter(X, Y, color="red")
plt.plot(X, lin_reg2.predict(poly_reg.fit_transform(X)), color="blue")
plt.show()
print(lin_reg.predict([[11]]))
print(lin_reg.predict([[6.6]]))
print(lin_reg2.predict(poly_reg.fit_transform([[6.6]])))
print(lin_reg2.predict(poly_reg.fit_transform([[11]])))
from sklearn.preprocessing import StandardScaler
sc1 = StandardScaler()
x_olcekli = sc1.fit_transform(X)
sc2 = StandardScaler()
y_olcekli = np.ravel(sc2.fit_transform(Y.reshape(-1, 1)))
from sklearn.svm import SVR
svr_reg = SVR(kernel="rbf")
svr_reg.fit(x_olcekli, y_olcekli)
plt.scatter(x_olcekli, y_olcekli, color="red")
plt.plot(x_olcekli, svr_reg.predict(x_olcekli), color="blue")
plt.show()
print(svr_reg.predict([[11]]))
print(svr_reg.predict([[6.6]]))
from sklearn.tree import DecisionTreeRegressor
r_dt = DecisionTreeRegressor(random_state=0)
r_dt.fit(X, Y)
Z = X + 0.5
K = X - 0.4
plt.scatter(X, Y, color="red")
plt.plot(x, r_dt.predict(X), color="blue")
plt.plot(x, r_dt.predict(Z), color="green")
plt.plot(x, r_dt.predict(K), color="yellow")
plt.show()
print(r_dt.predict([[11]]))
print(r_dt.predict([[6.6]]))
from sklearn.ensemble import RandomForestRegressor
rf_reg = RandomForestRegressor(n_estimators=10, random_state=0)
rf_reg.fit(X, Y.ravel())
print(rf_reg.predict([[6.6]]))
plt.scatter(X, Y, color="red")
plt.plot(X, rf_reg.predict(X), color="blue")
plt.plot(X, rf_reg.predict(Z), color="green")
plt.plot(x, r_dt.predict(K), color="yellow")
# # ornek2
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import pydot
kyphosis_df = pd.read_csv("Kyphosis_Data.csv")
kyphosis_df.head()
kyphosis_df.shape
kyphosis_df.columns
kyphosis_df.select_dtypes(include="number").columns
kyphosis_df.select_dtypes(exclude="number").columns
kyphosis_df.dtypes
kyphosis_df.info()
kyphosis_df.describe()
kyphosis_df.describe(exclude="number")
kyphosis_df["Kyphosis"].describe()
kyphosis_df["Kyphosis"].nunique()
kyphosis_df["Kyphosis"].value_counts()
kyphosis_df["Kyphosis"].value_counts(normalize=True, dropna=False)
sns.set_style("whitegrid")
plt.figure(figsize=(12, 8))
sns.countplot(x="Kyphosis", data=kyphosis_df)
plt.show()
kyphosis_df["Kyphosis"].value_counts()
kyphosis_df["Kyphosis"].value_counts(normalize=True, dropna=False)
kyphosis_df[["Age", "Number", "Start"]].plot(
kind="hist", subplots=True, layout=(1, 3), figsize=(16, 8)
)
plt.show()
plt.figure(figsize=(12, 8))
sns.pairplot(data=kyphosis_df, hue="Kyphosis", palette="Dark2")
plt.show()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Age"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Age"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Age", bins=30)
plt.show()
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Age"][kyphosis_df["Kyphosis"] == "present"].mean()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Number"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Number"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Number", bins=30)
plt.show()
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Number"][kyphosis_df["Kyphosis"] == "present"].mean()
plt.figure(figsize=(12, 8))
sns.distplot(kyphosis_df["Start"].dropna(), kde=False, bins=30)
plt.show()
kyphosis_df["Start"].mean()
plt.figure(figsize=(12, 8))
for i in kyphosis_df["Kyphosis"].unique():
sns.distplot(
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == i],
kde=True,
label="{}".format(i),
bins=20,
)
plt.legend()
plt.show()
g = sns.FacetGrid(kyphosis_df, col="Kyphosis", height=7)
g.map(plt.hist, "Start", bins=30)
plt.show()
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == "absent"].mean()
kyphosis_df["Start"][kyphosis_df["Kyphosis"] == "present"].mean()
kyphosis_df.corr()
plt.figure(figsize=(12, 8))
ax = sns.heatmap(data=kyphosis_df.corr(), annot=True, cmap="coolwarm")
plt.ylim(3, 0)
plt.show()
plt.figure(figsize=(12, 8))
plt.subplot(1, 3, 1)
sns.boxplot(x="Kyphosis", y="Age", data=kyphosis_df)
plt.subplot(1, 3, 2)
sns.boxplot(x="Kyphosis", y="Number", data=kyphosis_df)
plt.subplot(1, 3, 3)
sns.boxplot(x="Kyphosis", y="Start", data=kyphosis_df)
plt.show()
kyphosis_df.columns
X = kyphosis_df.drop("Kyphosis", axis=1)
y = kyphosis_df["Kyphosis"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=101
)
X_train.shape
y_train.shape
X_test.shape
y_test.shape
from sklearn.ensemble import RandomForestClassifier
rfc_model = RandomForestClassifier(n_estimators=200)
rfc_model.fit(X_train, y_train)
predictions = rfc_model.predict(X_test)
predictions
from sklearn.metrics import classification_report
print(classification_report(y_test, predictions))
| false | 0 | 2,430 | 0 | 2,430 | 2,430 |
||
129394735
|
<jupyter_start><jupyter_text>Annotated CT Scans Lung Masks
This is a dataset for CT scans of Lung regions. The dataset is in NIfTI (Neuroimaging Informatics Technology Initiative) format. It consists of CT scans of 7 patients with an overall total of 1775 slices. Along with the CT scans are given annotated lung masks where the left lung and the right lung are annotated separately. The dimensions of the slices are 512 x 512.
The png format of the dataset can be found at https://www.kaggle.com/datasets/nervegear/ct-scans-lung-masks.
The code for the conversion from NIfTI format to PNG format can be found here: https://www.kaggle.com/code/nervegear/nifti-to-png
Kaggle dataset identifier: annotated-ct-scans-lung-masks
<jupyter_script># # Import libraries
import numpy as np
import shutil, os, nibabel
import sys, getopt
import argparse
import tqdm
import imageio, cv2
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import io, exposure
from scipy import ndimage
# Get list of nii or nii.gz source files
dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/COVID-19-CT-Seg_20cases/"
source_files = os.listdir(dir_path)
slice_counter = 0
# Identify sample ids and get source ids
source_ids = [
files[0:]
for files in source_files
if files.endswith(".nii") and files.startswith("corona")
]
print(source_ids)
sample_ids = list(set(source_ids))
print(sample_ids)
def process_ct(image):
# Process the CT image
min_value = np.min(image)
max_value = np.max(image)
# Normalize the image
norm_image = (image - min_value) / (max_value - min_value)
p1, p2 = np.percentile(norm_image, (3, 97))
# print(p1, p2)
# Apply histogram equalization
hist_image = exposure.rescale_intensity(norm_image, in_range=(p1, p2))
# Rotate the image
rotate_image = ndimage.rotate(hist_image, angle=90, reshape=False)
# Crop the image
# cropped_image = rotate_image[40:360, 60:450]
cropped_image = rotate_image
# plt.imshow(cropped_image, cmap='gray')
# plt.show()
return cropped_image
def process_mask(image):
# Process mask image
binary_image = cv2.threshold(image, 0.5, 1, cv2.THRESH_BINARY)[1]
rotate_image = ndimage.rotate(binary_image, angle=90, reshape=False)
# cropped_image = rotate_image[40:360, 60:450]
cropped_image = rotate_image
# plt.imshow(cropped_image, cmap='gray')
# plt.show()
return cropped_image
shutil.rmtree("/kaggle/working/")
# dir_path = '/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/'
ct_dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/COVID-19-CT-Seg_20cases/"
mask_dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/Lung_Mask/Lung_Mask/"
output_ct_path = "/kaggle/working/COVID-19-CT-Seg_20cases/"
os.mkdir(output_ct_path)
output_mask_path = "/kaggle/working/Lung_Mask/"
os.mkdir(output_mask_path)
for file in sample_ids:
fname = os.path.basename(file)
# print(fname)
ct_file_name = ct_dir_path + fname + "/" + fname[:11] + "_org_" + fname[-7:-4]
mask_file_name = mask_dir_path + fname[:-4]
# print(ct_file_name)
# print(mask_file_name)
ct_image_array = nibabel.load(ct_file_name + ".nii").get_fdata()
mask_image_array = nibabel.load(mask_file_name + ".nii").get_fdata()
# print(ct_image_array.shape)
# print(mask_image_array.shape)
# print(len(image_array.shape))
# print(fname[:-4] + "_z" + "{:0>3}".format(str(0+1))+".png")
# For 3D image inputted
if len(ct_image_array.shape) == 3:
nx, ny, nz = ct_image_array.shape
total_slices = ct_image_array.shape[2]
# iterate through the slices
for current_slice in range(0, total_slices):
# alternate slices
if (slice_counter % 1) == 0:
ct_data = ct_image_array[:, :, current_slice]
mask_data = mask_image_array[:, :, current_slice]
# print("Saving image... ")
# print(ct_data.shape)
# print(mask_data.shape)
processed_ct_image = process_ct(ct_data)
processed_mask_image = process_mask(mask_data)
ct_image_name = (
fname[:-4] + "_z" + "{:0>3}".format(str(current_slice + 1)) + ".png"
)
imageio.imwrite(output_ct_path + ct_image_name, processed_ct_image)
# plt.imshow(processed_ct_image, cmap='gray')
# plt.show()
mask_image_name = (
fname[:-4] + "_z" + "{:0>3}".format(str(current_slice + 1)) + ".png"
)
imageio.imwrite(
output_mask_path + mask_image_name, processed_mask_image
)
# plt.imshow(processed_mask_image, cmap='gray')
# plt.show()
# print("Saved.")
# print("Saved" + str(fname) + str(current_slice))
print("Saved" + str(fname))
# # Zip the output folders
import zipfile
# Set the name and path of the zip file you want to create
zip_name = "COVID-19-CT-Seg_20cases.zip"
zip_name = "Lung_Mask.zip"
zip_path = "/kaggle/working/" + zip_name
# Set the path of the folder you want to zip
folder_path = "/kaggle/working/COVID-19-CT-Seg_20cases"
folder_path = "/kaggle/working/Lung_Mask"
# Create a zip file
with zipfile.ZipFile(zip_path, "w") as myzip:
# Iterate over all files in the folder and add them to the zip file
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
myzip.write(file_path, file_name)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/394/129394735.ipynb
|
annotated-ct-scans-lung-masks
|
nervegear
|
[{"Id": 129394735, "ScriptId": 38223303, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3311238, "CreationDate": "05/13/2023 12:21:59", "VersionNumber": 2.0, "Title": "NifTI to png", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 32.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185402139, "KernelVersionId": 129394735, "SourceDatasetVersionId": 5674613}]
|
[{"Id": 5674613, "DatasetId": 3233996, "DatasourceVersionId": 5750158, "CreatorUserId": 3311238, "LicenseName": "Unknown", "CreationDate": "05/13/2023 06:42:22", "VersionNumber": 2.0, "Title": "Annotated CT Scans Lung Masks", "Slug": "annotated-ct-scans-lung-masks", "Subtitle": "Annotated CT Scans of Lungs in NIfTI Format", "Description": "This is a dataset for CT scans of Lung regions. The dataset is in NIfTI (Neuroimaging Informatics Technology Initiative) format. It consists of CT scans of 7 patients with an overall total of 1775 slices. Along with the CT scans are given annotated lung masks where the left lung and the right lung are annotated separately. The dimensions of the slices are 512 x 512.\n\nThe png format of the dataset can be found at https://www.kaggle.com/datasets/nervegear/ct-scans-lung-masks. \nThe code for the conversion from NIfTI format to PNG format can be found here: https://www.kaggle.com/code/nervegear/nifti-to-png", "VersionNotes": "Data Update 2023-05-13", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3233996, "CreatorUserId": 3311238, "OwnerUserId": 3311238.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5674613.0, "CurrentDatasourceVersionId": 5750158.0, "ForumId": 3299154, "Type": 2, "CreationDate": "05/07/2023 10:31:49", "LastActivityDate": "05/07/2023", "TotalViews": 84, "TotalDownloads": 1, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 3311238, "UserName": "nervegear", "DisplayName": "Vivek N. Soren", "RegisterDate": "06/04/2019", "PerformanceTier": 0}]
|
# # Import libraries
import numpy as np
import shutil, os, nibabel
import sys, getopt
import argparse
import tqdm
import imageio, cv2
import matplotlib.pyplot as plt
from skimage.color import rgb2gray
from skimage import io, exposure
from scipy import ndimage
# Get list of nii or nii.gz source files
dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/COVID-19-CT-Seg_20cases/"
source_files = os.listdir(dir_path)
slice_counter = 0
# Identify sample ids and get source ids
source_ids = [
files[0:]
for files in source_files
if files.endswith(".nii") and files.startswith("corona")
]
print(source_ids)
sample_ids = list(set(source_ids))
print(sample_ids)
def process_ct(image):
# Process the CT image
min_value = np.min(image)
max_value = np.max(image)
# Normalize the image
norm_image = (image - min_value) / (max_value - min_value)
p1, p2 = np.percentile(norm_image, (3, 97))
# print(p1, p2)
# Apply histogram equalization
hist_image = exposure.rescale_intensity(norm_image, in_range=(p1, p2))
# Rotate the image
rotate_image = ndimage.rotate(hist_image, angle=90, reshape=False)
# Crop the image
# cropped_image = rotate_image[40:360, 60:450]
cropped_image = rotate_image
# plt.imshow(cropped_image, cmap='gray')
# plt.show()
return cropped_image
def process_mask(image):
# Process mask image
binary_image = cv2.threshold(image, 0.5, 1, cv2.THRESH_BINARY)[1]
rotate_image = ndimage.rotate(binary_image, angle=90, reshape=False)
# cropped_image = rotate_image[40:360, 60:450]
cropped_image = rotate_image
# plt.imshow(cropped_image, cmap='gray')
# plt.show()
return cropped_image
shutil.rmtree("/kaggle/working/")
# dir_path = '/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/'
ct_dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/COVID-19-CT-Seg_20cases/COVID-19-CT-Seg_20cases/"
mask_dir_path = "/kaggle/input/annotated-ct-scans-lung-masks/Lung_Mask/Lung_Mask/"
output_ct_path = "/kaggle/working/COVID-19-CT-Seg_20cases/"
os.mkdir(output_ct_path)
output_mask_path = "/kaggle/working/Lung_Mask/"
os.mkdir(output_mask_path)
for file in sample_ids:
fname = os.path.basename(file)
# print(fname)
ct_file_name = ct_dir_path + fname + "/" + fname[:11] + "_org_" + fname[-7:-4]
mask_file_name = mask_dir_path + fname[:-4]
# print(ct_file_name)
# print(mask_file_name)
ct_image_array = nibabel.load(ct_file_name + ".nii").get_fdata()
mask_image_array = nibabel.load(mask_file_name + ".nii").get_fdata()
# print(ct_image_array.shape)
# print(mask_image_array.shape)
# print(len(image_array.shape))
# print(fname[:-4] + "_z" + "{:0>3}".format(str(0+1))+".png")
# For 3D image inputted
if len(ct_image_array.shape) == 3:
nx, ny, nz = ct_image_array.shape
total_slices = ct_image_array.shape[2]
# iterate through the slices
for current_slice in range(0, total_slices):
# alternate slices
if (slice_counter % 1) == 0:
ct_data = ct_image_array[:, :, current_slice]
mask_data = mask_image_array[:, :, current_slice]
# print("Saving image... ")
# print(ct_data.shape)
# print(mask_data.shape)
processed_ct_image = process_ct(ct_data)
processed_mask_image = process_mask(mask_data)
ct_image_name = (
fname[:-4] + "_z" + "{:0>3}".format(str(current_slice + 1)) + ".png"
)
imageio.imwrite(output_ct_path + ct_image_name, processed_ct_image)
# plt.imshow(processed_ct_image, cmap='gray')
# plt.show()
mask_image_name = (
fname[:-4] + "_z" + "{:0>3}".format(str(current_slice + 1)) + ".png"
)
imageio.imwrite(
output_mask_path + mask_image_name, processed_mask_image
)
# plt.imshow(processed_mask_image, cmap='gray')
# plt.show()
# print("Saved.")
# print("Saved" + str(fname) + str(current_slice))
print("Saved" + str(fname))
# # Zip the output folders
import zipfile
# Set the name and path of the zip file you want to create
zip_name = "COVID-19-CT-Seg_20cases.zip"
zip_name = "Lung_Mask.zip"
zip_path = "/kaggle/working/" + zip_name
# Set the path of the folder you want to zip
folder_path = "/kaggle/working/COVID-19-CT-Seg_20cases"
folder_path = "/kaggle/working/Lung_Mask"
# Create a zip file
with zipfile.ZipFile(zip_path, "w") as myzip:
# Iterate over all files in the folder and add them to the zip file
for file_name in os.listdir(folder_path):
file_path = os.path.join(folder_path, file_name)
myzip.write(file_path, file_name)
| false | 0 | 1,611 | 0 | 1,819 | 1,611 |
||
129394350
|
<jupyter_start><jupyter_text>Market Sales Dataset
**This dataset was specifically created for the final project of a CS 302 course, with the intention of being used as a source of data for the project**
The dataset provided contains information on the sales of a particular product in different markets. The dataset consists of 11 columns:
**Sales**: the sales of the product (in thousands of units)
**CompPrice**: the price charged by the competitor for a similar product
**Income**: the median income of households in the market area (in thousands of dollars)
**Advertising**: the level of advertising expenditure for the product in the market area (in thousands of dollars)
**Population**: the population of the market area (in thousands of people)
**Price**: the price charged for the product (in dollars)
**ShelveLoc**: the quality of the shelving location for the product at the store (categorical: Bad, Medium, Good)
**Age**: the average age of the people in the market area
**Education**: the education level of the people in the market area (measured as the average number of years of education)
**Urban**: whether the market area is an urban or rural area (categorical: Yes, No)
**US**: whether the market area is in the United States or not (categorical: Yes, No)
This dataset can be used to build models that predict the sales of the product based on the other features. The dataset can also be used for exploratory data analysis to gain insights into the relationships between the different features and the sales of the product.
Kaggle dataset identifier: cs-302-artificial-intelligence
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
features = pd.read_csv(
"/kaggle/input/cs-302-artificial-intelligence/CS 302 Artificial Intelligence.csv"
)
features.head(5)
features.info()
print("The shape of our features is:", features.shape)
features.isnull().any()
import seaborn as sns
sns.pairplot(data=features, hue="ShelveLoc")
df = pd.get_dummies(features, columns=["Urban", "US"], drop_first=True)
print(df.head())
df["ShelveLoc"] = df["ShelveLoc"].map({"Good": 1, "Medium": 2, "Bad": 3})
print(df.head())
x = df.iloc[:, 0:6]
y = df["ShelveLoc"]
x
y
df["ShelveLoc"].unique()
df.ShelveLoc.value_counts()
colnames = list(df.columns)
colnames
df.describe()
df.head()
labels = np.array(df["Income"])
features = df.drop("Income", axis=1)
feature_list = list(df.columns)
features = np.array(df)
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.25, random_state=42
)
print("Training Features Shape:", train_features.shape)
print("Training Labels Shape:", train_labels.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing Labels Shape:", test_labels.shape)
baseline_preds = test_features[:, feature_list.index("Sales")]
baseline_errors = abs(baseline_preds - test_labels)
print("Average baseline error: ", round(np.mean(baseline_errors), 2))
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=1000, random_state=42)
rf.fit(train_features, train_labels)
predictions = rf.predict(test_features)
errors = abs(predictions - test_labels)
print("Mean Absolute Error:", round(np.mean(errors), 2), "degrees.")
mape = 100 * (errors / test_labels)
accuracy = 100 - np.mean(mape)
print("Accuracy:", round(accuracy, 2), "%.")
from sklearn.tree import export_graphviz
import pydot
tree = rf.estimators_[5]
from sklearn.tree import export_graphviz
import pydot
tree = rf.estimators_[5]
export_graphviz(
tree, out_file="tree.dot", feature_names=feature_list, rounded=True, precision=1
)
(graph,) = pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
rf_small = RandomForestRegressor(n_estimators=10, max_depth=3)
rf_small.fit(train_features, train_labels)
tree_small = rf_small.estimators_[5]
export_graphviz(
tree_small,
out_file="small_tree.dot",
feature_names=feature_list,
rounded=True,
precision=1,
)
(graph,) = pydot.graph_from_dot_file("small_tree.dot")
graph.write_png("small_tree.png")
importances = list(rf.feature_importances_)
feature_importances = [
(feature, round(importance, 2))
for feature, importance in zip(feature_list, importances)
]
feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True)
[print("Variable: {:20} Importance: {}".format(*pair)) for pair in feature_importances]
rf_most_important = RandomForestRegressor(n_estimators=1000, random_state=42)
important_indices = [feature_list.index("Sales"), feature_list.index("Income")]
train_important = train_features[:, important_indices]
test_important = test_features[:, important_indices]
rf_most_important.fit(train_important, train_labels)
predictions = rf_most_important.predict(test_important)
errors = abs(predictions - test_labels)
print("Mean Absolute Error:", round(np.mean(errors), 2), "degrees.")
mape = np.mean(100 * (errors / test_labels))
accuracy = 100 - mape
print("Accuracy:", round(accuracy, 2), "%.")
import matplotlib.pyplot as plt
plt.style.use("fivethirtyeight")
x_values = list(range(len(importances)))
plt.bar(x_values, importances, orientation="vertical")
plt.xticks(x_values, feature_list, rotation="vertical")
plt.ylabel("Importance")
plt.xlabel("Variable")
plt.title("Variable Importances")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/394/129394350.ipynb
|
cs-302-artificial-intelligence
|
zhanbolotbakytbek
|
[{"Id": 129394350, "ScriptId": 38472585, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14682753, "CreationDate": "05/13/2023 12:18:01", "VersionNumber": 1.0, "Title": "notebookb5cecc27a7", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 128.0, "LinesInsertedFromPrevious": 128.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185401351, "KernelVersionId": 129394350, "SourceDatasetVersionId": 5676108}]
|
[{"Id": 5676108, "DatasetId": 3262980, "DatasourceVersionId": 5751660, "CreatorUserId": 14682753, "LicenseName": "Unknown", "CreationDate": "05/13/2023 11:27:37", "VersionNumber": 1.0, "Title": "Market Sales Dataset", "Slug": "cs-302-artificial-intelligence", "Subtitle": NaN, "Description": "**This dataset was specifically created for the final project of a CS 302 course, with the intention of being used as a source of data for the project**\n\nThe dataset provided contains information on the sales of a particular product in different markets. The dataset consists of 11 columns:\n\n**Sales**: the sales of the product (in thousands of units)\n**CompPrice**: the price charged by the competitor for a similar product\n**Income**: the median income of households in the market area (in thousands of dollars)\n**Advertising**: the level of advertising expenditure for the product in the market area (in thousands of dollars)\n**Population**: the population of the market area (in thousands of people)\n**Price**: the price charged for the product (in dollars)\n**ShelveLoc**: the quality of the shelving location for the product at the store (categorical: Bad, Medium, Good)\n**Age**: the average age of the people in the market area\n**Education**: the education level of the people in the market area (measured as the average number of years of education)\n**Urban**: whether the market area is an urban or rural area (categorical: Yes, No)\n**US**: whether the market area is in the United States or not (categorical: Yes, No)\n\n\nThis dataset can be used to build models that predict the sales of the product based on the other features. The dataset can also be used for exploratory data analysis to gain insights into the relationships between the different features and the sales of the product.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3262980, "CreatorUserId": 14682753, "OwnerUserId": 14682753.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5676108.0, "CurrentDatasourceVersionId": 5751660.0, "ForumId": 3328582, "Type": 2, "CreationDate": "05/13/2023 11:27:37", "LastActivityDate": "05/13/2023", "TotalViews": 226, "TotalDownloads": 22, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 14682753, "UserName": "zhanbolotbakytbek", "DisplayName": "Zhanbolot Bakytbek", "RegisterDate": "04/18/2023", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
features = pd.read_csv(
"/kaggle/input/cs-302-artificial-intelligence/CS 302 Artificial Intelligence.csv"
)
features.head(5)
features.info()
print("The shape of our features is:", features.shape)
features.isnull().any()
import seaborn as sns
sns.pairplot(data=features, hue="ShelveLoc")
df = pd.get_dummies(features, columns=["Urban", "US"], drop_first=True)
print(df.head())
df["ShelveLoc"] = df["ShelveLoc"].map({"Good": 1, "Medium": 2, "Bad": 3})
print(df.head())
x = df.iloc[:, 0:6]
y = df["ShelveLoc"]
x
y
df["ShelveLoc"].unique()
df.ShelveLoc.value_counts()
colnames = list(df.columns)
colnames
df.describe()
df.head()
labels = np.array(df["Income"])
features = df.drop("Income", axis=1)
feature_list = list(df.columns)
features = np.array(df)
from sklearn.model_selection import train_test_split
train_features, test_features, train_labels, test_labels = train_test_split(
features, labels, test_size=0.25, random_state=42
)
print("Training Features Shape:", train_features.shape)
print("Training Labels Shape:", train_labels.shape)
print("Testing Features Shape:", test_features.shape)
print("Testing Labels Shape:", test_labels.shape)
baseline_preds = test_features[:, feature_list.index("Sales")]
baseline_errors = abs(baseline_preds - test_labels)
print("Average baseline error: ", round(np.mean(baseline_errors), 2))
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators=1000, random_state=42)
rf.fit(train_features, train_labels)
predictions = rf.predict(test_features)
errors = abs(predictions - test_labels)
print("Mean Absolute Error:", round(np.mean(errors), 2), "degrees.")
mape = 100 * (errors / test_labels)
accuracy = 100 - np.mean(mape)
print("Accuracy:", round(accuracy, 2), "%.")
from sklearn.tree import export_graphviz
import pydot
tree = rf.estimators_[5]
from sklearn.tree import export_graphviz
import pydot
tree = rf.estimators_[5]
export_graphviz(
tree, out_file="tree.dot", feature_names=feature_list, rounded=True, precision=1
)
(graph,) = pydot.graph_from_dot_file("tree.dot")
graph.write_png("tree.png")
rf_small = RandomForestRegressor(n_estimators=10, max_depth=3)
rf_small.fit(train_features, train_labels)
tree_small = rf_small.estimators_[5]
export_graphviz(
tree_small,
out_file="small_tree.dot",
feature_names=feature_list,
rounded=True,
precision=1,
)
(graph,) = pydot.graph_from_dot_file("small_tree.dot")
graph.write_png("small_tree.png")
importances = list(rf.feature_importances_)
feature_importances = [
(feature, round(importance, 2))
for feature, importance in zip(feature_list, importances)
]
feature_importances = sorted(feature_importances, key=lambda x: x[1], reverse=True)
[print("Variable: {:20} Importance: {}".format(*pair)) for pair in feature_importances]
rf_most_important = RandomForestRegressor(n_estimators=1000, random_state=42)
important_indices = [feature_list.index("Sales"), feature_list.index("Income")]
train_important = train_features[:, important_indices]
test_important = test_features[:, important_indices]
rf_most_important.fit(train_important, train_labels)
predictions = rf_most_important.predict(test_important)
errors = abs(predictions - test_labels)
print("Mean Absolute Error:", round(np.mean(errors), 2), "degrees.")
mape = np.mean(100 * (errors / test_labels))
accuracy = 100 - mape
print("Accuracy:", round(accuracy, 2), "%.")
import matplotlib.pyplot as plt
plt.style.use("fivethirtyeight")
x_values = list(range(len(importances)))
plt.bar(x_values, importances, orientation="vertical")
plt.xticks(x_values, feature_list, rotation="vertical")
plt.ylabel("Importance")
plt.xlabel("Variable")
plt.title("Variable Importances")
| false | 1 | 1,382 | 1 | 1,753 | 1,382 |
||
129299489
|
<jupyter_start><jupyter_text>Pakistan House Price Prediction
### Context
There's a story behind every dataset and here's your opportunity to share yours.
Pakistan is the 5th most populous country and 33rd largest country. The real estate sector in Pakistan is one of the most expanding sector,, so it is of due importance to study the pricing of houses in different provinces, cities a and sectors of Pakistan to see what's the trend.
### Content
The data was created between May 15, 2020, 6:13 AM (UTC-07:00) to April 4, 2021, 12:41 PM (UTC-07:00).
Kaggle dataset identifier: pakistan-house-price-prediction
<jupyter_script># # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# # Data Importing
data = pd.read_csv("/kaggle/input/pakistan-house-price-prediction/Entities.csv")
data
# # Data Describe
data.head(5)
data.info()
data.describe()
# # Data Preprocessing & Cleaning
df = data.copy()
df
df.info()
df = df.drop(["Unnamed: 0"], axis=1)
df.columns
df.isnull().sum()
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# # filling the null values
fill_list = df["agency"].dropna()
df["agency"] = df["agency"].fillna(
pd.Series(np.random.choice(fill_list, size=len(df.index)))
)
fill_list = df["agent"].dropna()
df["agent"] = df["agent"].fillna(
pd.Series(np.random.choice(fill_list, size=len(df.index)))
)
df.isnull().sum()
# now we don't have null values
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# # Data Visualization
import plotly.graph_objects as go
labels = [
"House",
"Flat",
"Upper Portion",
"Lower Portion",
"Room",
"Farm House",
"Penthouse",
]
values = [105468, 38238, 13774, 9229, 685, 657, 395]
# Use `hole` to create a donut-like pie chart
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.3)])
fig.show()
# The house property type is most frequent
plt.figure(figsize=(10, 5))
sns.barplot(x="property_type", y="price", data=df)
# The most expensive property type farm house
fig = px.pie(
df,
names="province_name",
title="province_name",
color_discrete_sequence=px.colors.sequential.RdBu,
)
fig.show()
# Punjab is the most regions are in demand
sns.barplot(x="province_name", y="price", data=df)
# The most expensive province is the sindh
sns.histplot(data=df, x="baths", kde=True)
fig = px.pie(
df,
names="purpose",
title="purpose",
color_discrete_sequence=px.colors.sequential.Aggrnyl,
)
fig.show()
# The percentage of homes sold is much higher than homes for rent
sns.histplot(data=df, x="bedrooms", kde=True)
sns.scatterplot(data=df, x="bedrooms", y="price")
# As the number of bedrooms increases, the price of the house increases
px.scatter(
df.sample(2000),
title="Total_Area vs price.",
x="Total_Area",
y="price",
)
# As the area of the house increases, the price increases
px.scatter(
df.sample(2000),
title="Total_Area vs price.",
x="Total_Area",
y="price",
color="purpose",
)
# This graph shows us the price of the same house if it was bought or rented
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="property_type", hue="province_name")
# The most common province in which to buy is punjab and the most property type is the house
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="purpose", hue="province_name")
# Punjab province comes first in home sales, followed by Sindh province
# Islamabad province comes first in home rent followed by Sindh province
plt.figure(figsize=(10, 5))
sns.countplot(x=df["city"], palette="Set2")
# The most demanded city is Karachi
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="purpose", hue="city")
# Karachi city comes first in home sales, followed by Lahore city
# Islamabad city comes first in home rent followed by karachi city
sns.barplot(x="city", y="price", data=df)
# The most expensive city in the price of renting or buying houses is Lahore city is the capital of punjab province
import folium
from folium.plugins import FastMarkerCluster
latitudes = np.array(df["latitude"])
longitudes = np.array(df["longitude"])
la_mean = latitudes.mean()
lo_mean = longitudes.mean()
locations = list(zip(latitudes, longitudes))
m = folium.Map(location=[la_mean, lo_mean], zoom_start=11.5)
FastMarkerCluster(data=locations).add_to(m)
m
# # Encoding the String Dataset
df.describe(include=object)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
cols = [
"page_url",
"property_type",
"location",
"city",
"province_name",
"purpose",
"date_added",
"agency",
"agent",
]
df[cols] = df[cols].apply(LabelEncoder().fit_transform)
df.info()
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
# # train test split
from sklearn.model_selection import train_test_split
x = df.drop("price", axis=1).values
y = df["price"].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42
)
# # modeling
# # RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
random_forest = RandomForestRegressor()
random_forest.fit(x_train, y_train)
print("Random Forest Training Accuracy:", random_forest.score(x_train, y_train))
print("Random Forest Testing Accuracy:", random_forest.score(x_test, y_test))
# # Model Evaluation
y_pred = random_forest.predict(x_test)
y_pred
y_pred = random_forest.predict(x_test)
df4 = pd.DataFrame({"Y_test": y_test, "Y_pred": y_pred})
df4.head(20)
plt.figure(figsize=(20, 6))
plt.plot(df4[:500])
plt.legend(["Actual", "Predicted"])
from sklearn.metrics import r2_score
random_forest_r2 = r2_score(y_test, y_pred)
random_forest_r2
from sklearn.metrics import (
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
)
mse = mean_squared_error(y_test, y_pred)
print(mse)
mae = mean_absolute_error(y_test, y_pred)
print(mae)
mape = mean_absolute_percentage_error(y_test, y_pred)
print(mape)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/299/129299489.ipynb
|
pakistan-house-price-prediction
|
ebrahimhaquebhatti
|
[{"Id": 129299489, "ScriptId": 38329418, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10938564, "CreationDate": "05/12/2023 14:55:43", "VersionNumber": 1.0, "Title": "Pakistan House Price Prediction|EDA|ML model", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 217.0, "LinesInsertedFromPrevious": 217.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 185216383, "KernelVersionId": 129299489, "SourceDatasetVersionId": 2863985}]
|
[{"Id": 2863985, "DatasetId": 1753715, "DatasourceVersionId": 2910842, "CreatorUserId": 8014035, "LicenseName": "CC0: Public Domain", "CreationDate": "11/29/2021 00:12:58", "VersionNumber": 1.0, "Title": "Pakistan House Price Prediction", "Slug": "pakistan-house-price-prediction", "Subtitle": "Apply Feature Engineering to predict rental and House Price with minimum RMSE", "Description": "### Context\nThere's a story behind every dataset and here's your opportunity to share yours.\nPakistan is the 5th most populous country and 33rd largest country. The real estate sector in Pakistan is one of the most expanding sector,, so it is of due importance to study the pricing of houses in different provinces, cities a and sectors of Pakistan to see what's the trend.\n\n### Content\nThe data was created between May 15, 2020, 6:13 AM (UTC-07:00) to \tApril 4, 2021, 12:41 PM (UTC-07:00).\n\n### Acknowledgements\nThe data was web scraped by @huzzefakhan from [Zameen.com](https://www.zameen.com/) using 'beautiful soup' python library. The dataset was originally uploaded on Open Data Pakistan (https://opendata.com.pk/dataset/property-data-for-pakistan). The author made a few changes (converting areas in marlas and kanals to cubic feet and dropping redundant columns) and reuploaded the dataset for easy usability.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1753715, "CreatorUserId": 8014035, "OwnerUserId": 8014035.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2863985.0, "CurrentDatasourceVersionId": 2910842.0, "ForumId": 1775838, "Type": 2, "CreationDate": "11/29/2021 00:12:58", "LastActivityDate": "11/29/2021", "TotalViews": 6866, "TotalDownloads": 721, "TotalVotes": 23, "TotalKernels": 7}]
|
[{"Id": 8014035, "UserName": "ebrahimhaquebhatti", "DisplayName": "Ebrahim Haque Bhatti", "RegisterDate": "07/30/2021", "PerformanceTier": 2}]
|
# # Import Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
# # Data Importing
data = pd.read_csv("/kaggle/input/pakistan-house-price-prediction/Entities.csv")
data
# # Data Describe
data.head(5)
data.info()
data.describe()
# # Data Preprocessing & Cleaning
df = data.copy()
df
df.info()
df = df.drop(["Unnamed: 0"], axis=1)
df.columns
df.isnull().sum()
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# # filling the null values
fill_list = df["agency"].dropna()
df["agency"] = df["agency"].fillna(
pd.Series(np.random.choice(fill_list, size=len(df.index)))
)
fill_list = df["agent"].dropna()
df["agent"] = df["agent"].fillna(
pd.Series(np.random.choice(fill_list, size=len(df.index)))
)
df.isnull().sum()
# now we don't have null values
sns.heatmap(df.isnull(), cmap=sns.cubehelix_palette(as_cmap=True))
# # Data Visualization
import plotly.graph_objects as go
labels = [
"House",
"Flat",
"Upper Portion",
"Lower Portion",
"Room",
"Farm House",
"Penthouse",
]
values = [105468, 38238, 13774, 9229, 685, 657, 395]
# Use `hole` to create a donut-like pie chart
fig = go.Figure(data=[go.Pie(labels=labels, values=values, hole=0.3)])
fig.show()
# The house property type is most frequent
plt.figure(figsize=(10, 5))
sns.barplot(x="property_type", y="price", data=df)
# The most expensive property type farm house
fig = px.pie(
df,
names="province_name",
title="province_name",
color_discrete_sequence=px.colors.sequential.RdBu,
)
fig.show()
# Punjab is the most regions are in demand
sns.barplot(x="province_name", y="price", data=df)
# The most expensive province is the sindh
sns.histplot(data=df, x="baths", kde=True)
fig = px.pie(
df,
names="purpose",
title="purpose",
color_discrete_sequence=px.colors.sequential.Aggrnyl,
)
fig.show()
# The percentage of homes sold is much higher than homes for rent
sns.histplot(data=df, x="bedrooms", kde=True)
sns.scatterplot(data=df, x="bedrooms", y="price")
# As the number of bedrooms increases, the price of the house increases
px.scatter(
df.sample(2000),
title="Total_Area vs price.",
x="Total_Area",
y="price",
)
# As the area of the house increases, the price increases
px.scatter(
df.sample(2000),
title="Total_Area vs price.",
x="Total_Area",
y="price",
color="purpose",
)
# This graph shows us the price of the same house if it was bought or rented
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="property_type", hue="province_name")
# The most common province in which to buy is punjab and the most property type is the house
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="purpose", hue="province_name")
# Punjab province comes first in home sales, followed by Sindh province
# Islamabad province comes first in home rent followed by Sindh province
plt.figure(figsize=(10, 5))
sns.countplot(x=df["city"], palette="Set2")
# The most demanded city is Karachi
plt.figure(figsize=(12, 5))
sns.countplot(data=df, x="purpose", hue="city")
# Karachi city comes first in home sales, followed by Lahore city
# Islamabad city comes first in home rent followed by karachi city
sns.barplot(x="city", y="price", data=df)
# The most expensive city in the price of renting or buying houses is Lahore city is the capital of punjab province
import folium
from folium.plugins import FastMarkerCluster
latitudes = np.array(df["latitude"])
longitudes = np.array(df["longitude"])
la_mean = latitudes.mean()
lo_mean = longitudes.mean()
locations = list(zip(latitudes, longitudes))
m = folium.Map(location=[la_mean, lo_mean], zoom_start=11.5)
FastMarkerCluster(data=locations).add_to(m)
m
# # Encoding the String Dataset
df.describe(include=object)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
cols = [
"page_url",
"property_type",
"location",
"city",
"province_name",
"purpose",
"date_added",
"agency",
"agent",
]
df[cols] = df[cols].apply(LabelEncoder().fit_transform)
df.info()
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap="coolwarm")
# # train test split
from sklearn.model_selection import train_test_split
x = df.drop("price", axis=1).values
y = df["price"].values
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.25, random_state=42
)
# # modeling
# # RandomForestRegressor
from sklearn.ensemble import RandomForestRegressor
random_forest = RandomForestRegressor()
random_forest.fit(x_train, y_train)
print("Random Forest Training Accuracy:", random_forest.score(x_train, y_train))
print("Random Forest Testing Accuracy:", random_forest.score(x_test, y_test))
# # Model Evaluation
y_pred = random_forest.predict(x_test)
y_pred
y_pred = random_forest.predict(x_test)
df4 = pd.DataFrame({"Y_test": y_test, "Y_pred": y_pred})
df4.head(20)
plt.figure(figsize=(20, 6))
plt.plot(df4[:500])
plt.legend(["Actual", "Predicted"])
from sklearn.metrics import r2_score
random_forest_r2 = r2_score(y_test, y_pred)
random_forest_r2
from sklearn.metrics import (
mean_absolute_error,
mean_absolute_percentage_error,
mean_squared_error,
)
mse = mean_squared_error(y_test, y_pred)
print(mse)
mae = mean_absolute_error(y_test, y_pred)
print(mae)
mape = mean_absolute_percentage_error(y_test, y_pred)
print(mape)
| false | 1 | 1,877 | 4 | 2,065 | 1,877 |
||
129299139
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
df_courses = pd.read_csv("/kaggle/input/courses2/courses.csv")
df_courses["Categories"]
tfidf = TfidfVectorizer(stop_words="english")
df_courses["Categories"] = df_courses["Categories"].fillna("")
tfidf_matrix = tfidf.fit_transform(df_courses["Categories"])
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
def get_recommendations(description, cosine_sim=cosine_sim, data=df_courses):
temp = pd.DataFrame({"Categories": [categories]})
temp_matrix = tfidf.transform(temp["Categories"])
sim_scores = linear_kernel(temp_matrix, tfidf_matrix).flatten()
indices = sim_scores.argsort()[:-11:-1]
courses = data.iloc[indices]["Courses"].values.tolist()
return courses
categories = "Data and Analytics"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Leadership"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Business"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Entrepreneurship"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/299/129299139.ipynb
| null | null |
[{"Id": 129299139, "ScriptId": 38314000, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14722599, "CreationDate": "05/12/2023 14:52:59", "VersionNumber": 1.0, "Title": "notebook15f2e6178d", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 56.0, "LinesInsertedFromPrevious": 56.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
df_courses = pd.read_csv("/kaggle/input/courses2/courses.csv")
df_courses["Categories"]
tfidf = TfidfVectorizer(stop_words="english")
df_courses["Categories"] = df_courses["Categories"].fillna("")
tfidf_matrix = tfidf.fit_transform(df_courses["Categories"])
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
def get_recommendations(description, cosine_sim=cosine_sim, data=df_courses):
temp = pd.DataFrame({"Categories": [categories]})
temp_matrix = tfidf.transform(temp["Categories"])
sim_scores = linear_kernel(temp_matrix, tfidf_matrix).flatten()
indices = sim_scores.argsort()[:-11:-1]
courses = data.iloc[indices]["Courses"].values.tolist()
return courses
categories = "Data and Analytics"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Leadership"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Business"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
categories = "Entrepreneurship"
recommended_courses = get_recommendations(categories)
print(recommended_courses)
| false | 0 | 534 | 0 | 534 | 534 |
||
129299762
|
<jupyter_start><jupyter_text>UCI Bag Of Words
Kaggle dataset identifier: uci-bag-of-words
<jupyter_script># **Task 1: Working with Bag of Words Dataset**
# Importing important and necessary libraries is done inside the following cell
import os
import random
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
# The UCI Bag of Words dataset is a collection of text documents that have been preprocessed and represented as a bag-of-words model. It is commonly used in natural language processing and machine learning tasks, such as text classification and information retrieval.
# The dataset consists of a text corpus where each document is represented as a sparse vector of word frequencies. The bag-of-words model represents a document by counting the frequency of each word that appears in it, ignoring the order and structure of the text. This representation allows for efficient and straightforward analysis of text data.
# The UCI Bag of Words dataset includes two files:
# * vocab.txt: This file contains the vocabulary of the dataset, listing all the unique words found in the corpus. Each word is assigned a unique identifier or index.
# * docword.txt: This file represents the bag-of-words representation of the documents. It contains three columns: docID, wordID, and count. Each row corresponds to an occurrence of a word in a document. The docID identifies the document, the wordID represents the index of the word in the vocabulary, and the count indicates the frequency of that word in the document.
# We are going to combine three datasets into a common corpus. Therefore, two functions called **get_bow_file** and **get_vocab_file** are created to write these datasets into pandas dataframe one-by-one.
# Accessing three datasets and their corresponding vocabularies:
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.enron.txt")
enron = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.enron.txt")
enron_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
enron_vocab["wordID"] = (
enron_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.kos.txt")
kos = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.kos.txt")
kos_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
kos_vocab["wordID"] = (
kos_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.nips.txt")
nips = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.nips.txt")
nips_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
nips_vocab["wordID"] = (
nips_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
enron.head()
enron_vocab["word"][118]
# Enron dataset was giving memory issue since it has more documents compared with other two. Therefore, we trimmed it down to smaller number of samples (6000)
random.seed(42)
trimmed_doc_ids = random.sample(list(set(enron["docID"])), k=6000)
enron = enron[enron["docID"].isin(trimmed_doc_ids)].reset_index(drop=True)
enron["docID"].nunique()
for name, dataset in (("enron", enron), ("kos", kos), ("nips", nips)):
print(f"Documents size for {name}: {dataset['docID'].nunique()}")
# # a. Combining chosen three dataset into one common corpus
# Here, we face with several challanges. Firstly, we have overlapping documend IDs in each dataset. In order to combine them together, they need to be shifted by the amount equal to the last document ID of previously conctenated dataset, which is 0 at start. This variable is named offset below.
dfs = []
offset = 0
for df, vocab in ((enron, enron_vocab), (kos, kos_vocab), (nips, nips_vocab)):
ids = df["docID"] + offset
df["new_id"] = ids
offset = ids.max()
df = df.merge(vocab)[["new_id", "word", "count"]]
dfs.append(df)
# Secondly, we may have same words in different datasets with different IDs. The solution would be to merge all the words and assign them a new unique ID to compensate for this situation.
merged = pd.concat(dfs, ignore_index=True).rename(columns={"new_id": "docID"})
modified_words = merged["word"].unique()
modified_words.sort()
merged_vocab = (
pd.DataFrame({"word": modified_words})
.reset_index()
.rename(columns={"index": "wordID"})
)
merged = merged.merge(merged_vocab, how="left")
merged = (
merged[["docID", "wordID", "count"]]
.sort_values(["docID", "wordID"])
.reset_index(drop=True)
)
merged.head()
# Now we have 32721 unique words in our merged datasets.
len(merged_vocab)
# # b. Creating word-document matrix from merged datasets
# Here, we have to convert our acquired new corpus into word-document matrix, which has words for rows,
# documents for columns and each cell shows the number of a specific word inside the
# corresponding document.
wdm = (
merged.pivot(index="wordID", columns="docID", values="count")
.fillna(0.0)
.astype(pd.SparseDtype("float", 0.0))
)
wdm
# # c. Using SKLearn, find the truncated singular value decomposition of this matrix, retaining the first 100 dimensions
svd = TruncatedSVD(n_components=100, n_iter=10, random_state=42)
y_svd = svd.fit_transform(wdm)
y_svd.shape
# 1. Are these dimensions interpretable?
# * We have reduced number of dimensions from 10934 to 100. These dimensions can be interpreted as topics
# 2. What does dimension 1 represent
# * Topic is machine learning. See below code
# 3. What do the top 10 dimensions represent? (see below code)
# * Dimension 0: machine learning
# * Dimension 1: energy
# * Dimension 2: neural network
# * Dimension 3: energy + machine learning
# * Dimension 4: enron topics
# * Dimension 5: electric + artifical intelligence
# * Dimension 6: robot + machine learning
# * Dimension 7: image recognition
# * Dimension 8: finance
# * Dimension 9: Business company
wordsSVD = pd.DataFrame(y_svd, index=merged_vocab["word"])
wordsSVD.head()
pd.options.display.float_format = "{:.7f}".format # Set the desired format
# Describe the values of the column without scientific notation
description = wordsSVD.loc[:, 0].describe()
print(description)
thrsh = wordsSVD.loc[:, 0].quantile(0.75)
words = list(wordsSVD[wordsSVD[0] > thrsh].sort_values([0], ascending=False).index)[:10]
print(words)
# By looking at the first 10 words with higher frequency, we see that the topic is about machine learning
for index in range(10):
thrsh = wordsSVD.loc[:, index].quantile(0.9)
print(f"Dimension: {index}")
words = list(
wordsSVD[wordsSVD[index] > thrsh].sort_values([index], ascending=False).index
)[:25]
print(words)
print()
# # d. Determine the average cosine similarity between documents within in each corpus. Next, determine the average cosine similarity between documents across corpora.
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.enron.txt")
enron = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
enron = enron[enron["docID"].isin(trimmed_doc_ids)].reset_index(
drop=True
) # Reduce dataset
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.kos.txt")
kos = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.nips.txt")
nips = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
avrg_cos_sims = []
enron_wmd = (
enron.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
enron_sims = cosine_similarity(enron_wmd, dense_output=False)
avrg_cos_sims.append(("enron", enron_sims.mean()))
kos_wdm = (
kos.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
kos_sims = cosine_similarity(kos_wdm, dense_output=False)
avrg_cos_sims.append(("kos", kos_sims.mean()))
nips_wdm = (
nips.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
nips_sims = cosine_similarity(nips_wdm, dense_output=False)
avrg_cos_sims.append(("nips", nips_sims.mean()))
doc_term_mat = (
merged.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
sims = cosine_similarity(doc_term_mat, dense_output=False)
avrg_cos_sims.append(("all", sims.mean()))
cosine_sims = pd.DataFrame(
avrg_cos_sims, columns=["corpus", "average_cosine_similarity"]
)
cosine_sims
# # e. Does LSA work well as a tool for clustering corpora?
# Yes and In reality, the Scikit Learn documentation contains an illustration that employs LSA as a technique for reducing dimensionality before implementing KMeans clustering. To carry out clustering, we will utilize the document-term matrix, which can be derived by transposing the term-document matrix.
dtm = wdm.T
lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False))
x_lsa = lsa.fit_transform(dtm)
explained_variance = lsa[0].explained_variance_ratio_.sum()
print(f"Explained variance: {explained_variance * 100:.2f}%")
init = 1
kmeans = KMeans(
n_clusters=10,
max_iter=100,
n_init=init,
)
kmeans.fit(x_lsa)
def WSS(points, kmax):
sse = []
for k in range(1, kmax + 1):
kmeans = KMeans(n_clusters=k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (
points[i, 1] - curr_center[1]
) ** 2
sse.append(curr_sse)
return sse
results = WSS(x_lsa, 20)
pd.Series(results, index=range(1, len(results) + 1)).plot()
n_clusters = 10
kmeans = KMeans(n_clusters=n_clusters).fit(x_lsa)
labels = kmeans.labels_
dtm["label"] = labels
dtm["label"].value_counts()
for cluster in range(n_clusters):
print(f"Cluster: {cluster}")
cluster = dtm[dtm["label"] == cluster]
top_ten_words = set(cluster.sum().sort_values(ascending=False).head(10).index)
print(merged_vocab[merged_vocab["wordID"].isin(top_ten_words)]["word"].tolist())
print()
# Here, we observe that some of the topics from truncated SVD are replicated. However, there are new topics like politics, football and etc.
# # f. Try to use PCA instead of LSA. What are your results? Are they replaceable? Can you achieve same results using PCA? Why?
# Due to the fact that PCA does not handle sparse inputs, we now construct the term-document matrix in dense form. The top phrases for the first 10 dimensions (themes) are then displayed using PCA with 100 components, following the steps outlined in the Truncated SVD section:
tdm = merged.pivot(index="wordID", columns="docID", values="count").fillna(0.0)
pca = PCA(n_components=100)
pred = pca.fit_transform(tdm)
word_pca = pd.DataFrame(pred, index=merged_vocab["word"])
word_pca.head()
pd.options.display.float_format = "{:.7f}".format # Set the desired format
description = word_pca.loc[:, 0].describe()
print(description)
for index in range(10):
thrsh = word_pca.loc[:, index].quantile(0.9)
print(f"Dimension: {index}")
words = list(
word_pca[wordsSVD[index] > thrsh].sort_values([index], ascending=False).index
)[:25]
print(words)
print()
explained_variance = pca.explained_variance_ratio_.sum()
print(f"Explained variance: {explained_variance * 100:.2f}%")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/299/129299762.ipynb
|
uci-bag-of-words
|
aslanovmustafa
|
[{"Id": 129299762, "ScriptId": 38321703, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14337270, "CreationDate": "05/12/2023 14:58:10", "VersionNumber": 1.0, "Title": "Assignment 2 - Task 1", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 274.0, "LinesInsertedFromPrevious": 274.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185216846, "KernelVersionId": 129299762, "SourceDatasetVersionId": 3639953}]
|
[{"Id": 3639953, "DatasetId": 2180053, "DatasourceVersionId": 3693638, "CreatorUserId": 8278966, "LicenseName": "Unknown", "CreationDate": "05/15/2022 12:47:05", "VersionNumber": 1.0, "Title": "UCI Bag Of Words", "Slug": "uci-bag-of-words", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2180053, "CreatorUserId": 8278966, "OwnerUserId": 8278966.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3639953.0, "CurrentDatasourceVersionId": 3693638.0, "ForumId": 2206001, "Type": 2, "CreationDate": "05/15/2022 12:47:05", "LastActivityDate": "05/15/2022", "TotalViews": 587, "TotalDownloads": 68, "TotalVotes": 1, "TotalKernels": 15}]
|
[{"Id": 8278966, "UserName": "aslanovmustafa", "DisplayName": "Mustafa Aslanov", "RegisterDate": "09/04/2021", "PerformanceTier": 0}]
|
# **Task 1: Working with Bag of Words Dataset**
# Importing important and necessary libraries is done inside the following cell
import os
import random
import numpy as np
import pandas as pd
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA, TruncatedSVD
from sklearn.metrics import silhouette_score
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
# The UCI Bag of Words dataset is a collection of text documents that have been preprocessed and represented as a bag-of-words model. It is commonly used in natural language processing and machine learning tasks, such as text classification and information retrieval.
# The dataset consists of a text corpus where each document is represented as a sparse vector of word frequencies. The bag-of-words model represents a document by counting the frequency of each word that appears in it, ignoring the order and structure of the text. This representation allows for efficient and straightforward analysis of text data.
# The UCI Bag of Words dataset includes two files:
# * vocab.txt: This file contains the vocabulary of the dataset, listing all the unique words found in the corpus. Each word is assigned a unique identifier or index.
# * docword.txt: This file represents the bag-of-words representation of the documents. It contains three columns: docID, wordID, and count. Each row corresponds to an occurrence of a word in a document. The docID identifies the document, the wordID represents the index of the word in the vocabulary, and the count indicates the frequency of that word in the document.
# We are going to combine three datasets into a common corpus. Therefore, two functions called **get_bow_file** and **get_vocab_file** are created to write these datasets into pandas dataframe one-by-one.
# Accessing three datasets and their corresponding vocabularies:
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.enron.txt")
enron = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.enron.txt")
enron_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
enron_vocab["wordID"] = (
enron_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.kos.txt")
kos = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.kos.txt")
kos_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
kos_vocab["wordID"] = (
kos_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.nips.txt")
nips = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/vocab.nips.txt")
nips_vocab = pd.read_csv(filepath, header=None, names=["word"]).fillna(
"null"
) # there are "null" words which is replaced by NA in pandas. We are converting them back
nips_vocab["wordID"] = (
nips_vocab.index + 1
) # Indexing starts from 0, wordID starts from 1
enron.head()
enron_vocab["word"][118]
# Enron dataset was giving memory issue since it has more documents compared with other two. Therefore, we trimmed it down to smaller number of samples (6000)
random.seed(42)
trimmed_doc_ids = random.sample(list(set(enron["docID"])), k=6000)
enron = enron[enron["docID"].isin(trimmed_doc_ids)].reset_index(drop=True)
enron["docID"].nunique()
for name, dataset in (("enron", enron), ("kos", kos), ("nips", nips)):
print(f"Documents size for {name}: {dataset['docID'].nunique()}")
# # a. Combining chosen three dataset into one common corpus
# Here, we face with several challanges. Firstly, we have overlapping documend IDs in each dataset. In order to combine them together, they need to be shifted by the amount equal to the last document ID of previously conctenated dataset, which is 0 at start. This variable is named offset below.
dfs = []
offset = 0
for df, vocab in ((enron, enron_vocab), (kos, kos_vocab), (nips, nips_vocab)):
ids = df["docID"] + offset
df["new_id"] = ids
offset = ids.max()
df = df.merge(vocab)[["new_id", "word", "count"]]
dfs.append(df)
# Secondly, we may have same words in different datasets with different IDs. The solution would be to merge all the words and assign them a new unique ID to compensate for this situation.
merged = pd.concat(dfs, ignore_index=True).rename(columns={"new_id": "docID"})
modified_words = merged["word"].unique()
modified_words.sort()
merged_vocab = (
pd.DataFrame({"word": modified_words})
.reset_index()
.rename(columns={"index": "wordID"})
)
merged = merged.merge(merged_vocab, how="left")
merged = (
merged[["docID", "wordID", "count"]]
.sort_values(["docID", "wordID"])
.reset_index(drop=True)
)
merged.head()
# Now we have 32721 unique words in our merged datasets.
len(merged_vocab)
# # b. Creating word-document matrix from merged datasets
# Here, we have to convert our acquired new corpus into word-document matrix, which has words for rows,
# documents for columns and each cell shows the number of a specific word inside the
# corresponding document.
wdm = (
merged.pivot(index="wordID", columns="docID", values="count")
.fillna(0.0)
.astype(pd.SparseDtype("float", 0.0))
)
wdm
# # c. Using SKLearn, find the truncated singular value decomposition of this matrix, retaining the first 100 dimensions
svd = TruncatedSVD(n_components=100, n_iter=10, random_state=42)
y_svd = svd.fit_transform(wdm)
y_svd.shape
# 1. Are these dimensions interpretable?
# * We have reduced number of dimensions from 10934 to 100. These dimensions can be interpreted as topics
# 2. What does dimension 1 represent
# * Topic is machine learning. See below code
# 3. What do the top 10 dimensions represent? (see below code)
# * Dimension 0: machine learning
# * Dimension 1: energy
# * Dimension 2: neural network
# * Dimension 3: energy + machine learning
# * Dimension 4: enron topics
# * Dimension 5: electric + artifical intelligence
# * Dimension 6: robot + machine learning
# * Dimension 7: image recognition
# * Dimension 8: finance
# * Dimension 9: Business company
wordsSVD = pd.DataFrame(y_svd, index=merged_vocab["word"])
wordsSVD.head()
pd.options.display.float_format = "{:.7f}".format # Set the desired format
# Describe the values of the column without scientific notation
description = wordsSVD.loc[:, 0].describe()
print(description)
thrsh = wordsSVD.loc[:, 0].quantile(0.75)
words = list(wordsSVD[wordsSVD[0] > thrsh].sort_values([0], ascending=False).index)[:10]
print(words)
# By looking at the first 10 words with higher frequency, we see that the topic is about machine learning
for index in range(10):
thrsh = wordsSVD.loc[:, index].quantile(0.9)
print(f"Dimension: {index}")
words = list(
wordsSVD[wordsSVD[index] > thrsh].sort_values([index], ascending=False).index
)[:25]
print(words)
print()
# # d. Determine the average cosine similarity between documents within in each corpus. Next, determine the average cosine similarity between documents across corpora.
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.enron.txt")
enron = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
enron = enron[enron["docID"].isin(trimmed_doc_ids)].reset_index(
drop=True
) # Reduce dataset
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.kos.txt")
kos = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
filepath = os.path.join("..", f"input/uci-bag-of-words/docword.nips.txt")
nips = pd.read_csv(
filepath, header=None, names=["docID", "wordID", "count"], skiprows=3, sep=" "
)
avrg_cos_sims = []
enron_wmd = (
enron.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
enron_sims = cosine_similarity(enron_wmd, dense_output=False)
avrg_cos_sims.append(("enron", enron_sims.mean()))
kos_wdm = (
kos.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
kos_sims = cosine_similarity(kos_wdm, dense_output=False)
avrg_cos_sims.append(("kos", kos_sims.mean()))
nips_wdm = (
nips.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
nips_sims = cosine_similarity(nips_wdm, dense_output=False)
avrg_cos_sims.append(("nips", nips_sims.mean()))
doc_term_mat = (
merged.pivot(index="docID", columns="wordID", values="count")
.fillna(0)
.astype(pd.SparseDtype("int16", 0))
)
sims = cosine_similarity(doc_term_mat, dense_output=False)
avrg_cos_sims.append(("all", sims.mean()))
cosine_sims = pd.DataFrame(
avrg_cos_sims, columns=["corpus", "average_cosine_similarity"]
)
cosine_sims
# # e. Does LSA work well as a tool for clustering corpora?
# Yes and In reality, the Scikit Learn documentation contains an illustration that employs LSA as a technique for reducing dimensionality before implementing KMeans clustering. To carry out clustering, we will utilize the document-term matrix, which can be derived by transposing the term-document matrix.
dtm = wdm.T
lsa = make_pipeline(TruncatedSVD(n_components=100), Normalizer(copy=False))
x_lsa = lsa.fit_transform(dtm)
explained_variance = lsa[0].explained_variance_ratio_.sum()
print(f"Explained variance: {explained_variance * 100:.2f}%")
init = 1
kmeans = KMeans(
n_clusters=10,
max_iter=100,
n_init=init,
)
kmeans.fit(x_lsa)
def WSS(points, kmax):
sse = []
for k in range(1, kmax + 1):
kmeans = KMeans(n_clusters=k).fit(points)
centroids = kmeans.cluster_centers_
pred_clusters = kmeans.predict(points)
curr_sse = 0
for i in range(len(points)):
curr_center = centroids[pred_clusters[i]]
curr_sse += (points[i, 0] - curr_center[0]) ** 2 + (
points[i, 1] - curr_center[1]
) ** 2
sse.append(curr_sse)
return sse
results = WSS(x_lsa, 20)
pd.Series(results, index=range(1, len(results) + 1)).plot()
n_clusters = 10
kmeans = KMeans(n_clusters=n_clusters).fit(x_lsa)
labels = kmeans.labels_
dtm["label"] = labels
dtm["label"].value_counts()
for cluster in range(n_clusters):
print(f"Cluster: {cluster}")
cluster = dtm[dtm["label"] == cluster]
top_ten_words = set(cluster.sum().sort_values(ascending=False).head(10).index)
print(merged_vocab[merged_vocab["wordID"].isin(top_ten_words)]["word"].tolist())
print()
# Here, we observe that some of the topics from truncated SVD are replicated. However, there are new topics like politics, football and etc.
# # f. Try to use PCA instead of LSA. What are your results? Are they replaceable? Can you achieve same results using PCA? Why?
# Due to the fact that PCA does not handle sparse inputs, we now construct the term-document matrix in dense form. The top phrases for the first 10 dimensions (themes) are then displayed using PCA with 100 components, following the steps outlined in the Truncated SVD section:
tdm = merged.pivot(index="wordID", columns="docID", values="count").fillna(0.0)
pca = PCA(n_components=100)
pred = pca.fit_transform(tdm)
word_pca = pd.DataFrame(pred, index=merged_vocab["word"])
word_pca.head()
pd.options.display.float_format = "{:.7f}".format # Set the desired format
description = word_pca.loc[:, 0].describe()
print(description)
for index in range(10):
thrsh = word_pca.loc[:, index].quantile(0.9)
print(f"Dimension: {index}")
words = list(
word_pca[wordsSVD[index] > thrsh].sort_values([index], ascending=False).index
)[:25]
print(words)
print()
explained_variance = pca.explained_variance_ratio_.sum()
print(f"Explained variance: {explained_variance * 100:.2f}%")
| false | 0 | 3,852 | 0 | 3,879 | 3,852 |
||
129647995
|
<jupyter_start><jupyter_text>Market Basket Analysis
# Market Basket Analysis
Market basket analysis with Apriori algorithm
The retailer wants to target customers with suggestions on itemset that a customer is most likely to purchase .I was given dataset contains data of a retailer; the transaction data provides data around all the transactions that have happened over a period of time. Retailer will use result to grove in his industry and provide for customer suggestions on itemset, we be able increase customer engagement and improve customer experience and identify customer behavior. I will solve this problem with use Association Rules type of unsupervised learning technique that checks for the dependency of one data item on another data item.
### Introduction
Association Rule is most used when you are planning to build association in different objects in a set. It works when you are planning to find frequent patterns in a transaction database. It can tell you what items do customers frequently buy together and it allows retailer to identify relationships between the items.
### An Example of Association Rules
Assume there are 100 customers, 10 of them bought Computer Mouth, 9 bought Mat for Mouse and 8 bought both of them.
- bought Computer Mouth => bought Mat for Mouse
- support = P(Mouth & Mat) = 8/100 = 0.08
- confidence = support/P(Mat for Mouse) = 0.08/0.09 = 0.89
- lift = confidence/P(Computer Mouth) = 0.89/0.10 = 8.9
This just simple example. In practice, a rule needs the support of several hundred transactions, before it can be considered statistically significant, and datasets often contain thousands or millions of transactions.
### Strategy
- Data Import
- Data Understanding and Exploration
- Transformation of the data – so that is ready to be consumed by the association rules algorithm
- Running association rules
- Exploring the rules generated
- Filtering the generated rules
- Visualization of Rule
### Dataset Description
- File name: Assignment-1_Data
- List name: retaildata
- File format: . xlsx
- Number of Row: 522065
- Number of Attributes: 7
- BillNo: 6-digit number assigned to each transaction. Nominal.
- Itemname: Product name. Nominal.
- Quantity: The quantities of each product per transaction. Numeric.
- Date: The day and time when each transaction was generated. Numeric.
- Price: Product price. Numeric.
- CustomerID: 5-digit number assigned to each customer. Nominal.
- Country: Name of the country where each customer resides. Nominal.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270162-fc53e5a3-4ad1-4d06-b0e0-228aabcf6b70.png">
### Libraries in R
First, we need to load required libraries. Shortly I describe all libraries.
- arules - Provides the infrastructure for representing,
manipulating and analyzing transaction data and patterns (frequent itemsets and association rules).
- arulesViz - Extends package 'arules' with various visualization.
techniques for association rules and item-sets. The package also includes several interactive visualizations for rule exploration.
- tidyverse - The tidyverse is an opinionated collection of R packages designed for data science.
- readxl - Read Excel Files in R.
- plyr - Tools for Splitting, Applying and Combining Data.
- ggplot2 - A system for 'declaratively' creating graphics, based on "The Grammar of Graphics". You provide the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details.
- knitr - Dynamic Report generation in R.
- magrittr- Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions.
- dplyr - A fast, consistent tool for working with data frame like objects, both in memory and out of memory.
- tidyverse - This package is designed to make it easy to install and load multiple 'tidyverse' packages in a single step.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270210-49c8e1aa-9753-431b-a8d5-99601bc76cb5.png">
### Data Pre-processing
Next, we need to upload Assignment-1_Data. xlsx to R to read the dataset.Now we can see our data in R.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270229-514f0983-3bbb-4cd3-be64-980e92656a02.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270251-6f6f6472-8817-435c-a995-9bc4bfef10d1.png">
After we will clear our data frame, will remove missing values.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270286-05854e1a-2b6c-490e-ab30-9e99e731eacb.png">
To apply Association Rule mining, we need to convert dataframe into transaction data to make all items that are bought together in one invoice will be in one row. Below lines of code will combine all products from one BillNo and Date and combine all products from that BillNo and Date as one row, with each item, separated by (,)
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270333-b7848fce-53d9-4486-b2ae-c331f13b4275.png">
We don’t need BillNo and Date, we will make it as Null.
Next, you have to store this transaction data into .csv
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270368-787a1721-8d93-4f69-8369-da3a70082e95.png">
This how should look transaction data before we will go to next step.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270884-ca3d6d47-708f-4ab6-bc1d-0e17b912e1f3.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270942-14bfed99-473e-444c-9c14-a215c2957bee.png">
At this step we already have our transaction dataset, and it shows the matrix of items which bought together. We can’t see here any rules and how often it was purchase together. Now let’s check how many transactions we have and what they are. We will have to have to load this transaction data into an object of the transaction class. This is done by using the R function read.transactions of the arules package. Our format of Data frame is basket.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145270981-84b76556-380b-4a32-a2ee-465d192141e8.png">
Let’s have a view our transaction object by summary(transaction)
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271000-33fe3da8-6517-4d7a-a844-017022047ff6.png">
We can see 18193 transactions (rows) and 7698 items (columns). 7698 is the product descriptions and 18193 transactions are collections of these items.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271025-9acd295d-6cef-44eb-8e5b-8ff12ddd798f.png">
The summary gives us some useful information:
- Density tells the percentage of non-zero cells in a sparse matrix. In other words, total number of items that are purchased divided by a possible number of items in that matrix. You can calculate how many items were purchased by using density: 18193x7698x0.002291294=337445
- Summary will show us most frequent items.
- Element (itemset/transaction) length distribution: It will gave us how many transactions are there for 1-itemset, 2-itemset and so on. The first row is telling you a number of items and the second row is telling you the number of transactions.
For example, there is only 1546 transaction for one item, 860 transactions for 2 items, and there are 419 items in one transaction which is the longest.
Let’s check item frequency plot, we will generate an itemFrequencyPlot to create an item Frequency Bar Plot to view the distribution of objects based on itemMatrix (e.g., >transactions or items in >itemsets and >rules) which is our case.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271096-4279c764-0d0c-41dc-87fc-0b13b217f566.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271132-f3bf1374-06b8-4f71-b810-dbb098f6963c.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271187-3f6023db-3820-4083-a6e9-cd74020acc70.png">
In itemFrequencyPlot(transaction,topN=20,type="absolute") first argument - our transaction object to be plotted that is tr. topN is allows us to plot top N highest frequency items. type can be as type="absolute" or type="relative". If we will chouse absolute it will plot numeric frequencies of each item independently. If relative it will plot how many times these items have appeared as compared to others. As well I made it in colure for better visualization.
### Generating Rules
Next, we will generate rules using the Apriori algorithm. The function apriori() is from package arules. The algorithm employs level-wise search for frequent itemsets. Algorithm will generate frequent itemsets and association rules. We pass supp=0.001 and conf=0.8 to return all the rules that have a support of at least 0.1% and confidence of at least 80%. We sort the rules by decreasing confidence and will check summary of the rules.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271236-3a82733a-216f-4db2-9f60-9b43c2016ea4.png">
The apriori will take (transaction) as the transaction object on which mining is to be applied. parameter will allow you to set min_sup and min_confidence. The default values for parameter are minimum support of 0.1, the minimum confidence of 0.8, maximum of 10 items (maxlen).
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271269-4565e292-212a-47cb-8bc1-5bfe7f1819a8.png">
Summary of rules give us clear information as:
- Number of rules: 97267
- The distribution of rules by length: a length of 6 items has the most 33296 and length of 2 items has lowest number of rules 111
- The summary of quality measures: ranges of support, confidence, and lift.
- The information on data mining: total data mined, and the minimum parameters we set earlier
Now, 97267 it a lot of rules. We will identify only top 10.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271351-6977af6b-2d7a-4a81-a3c1-f160722b2897.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271400-b74ec578-5bf5-47fd-a564-407eacfb6b43.png">
Using the above output, you can make analysis such as:
- 100% of the customers who bought 'ART LIGHTS ' also bought 'FUNK MONKEY'.
- 100% of the customers who bought 'BILLBOARD FONTS DESIGN ' also bought 'WRAP'.
We can limit the size and number of rules generated. we can set parameter in Apriori. If we want stronger rules, we must to increase the value of conf. and for more extended rules give higher value to maxlen.
### Visualizing Association Rules
We have thousands of rules generated based on data, we will need a couple of ways to present our findings. We will use ItemFrequencyPlot to visualize association rules.
#### Scatter-Plot:
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271541-b9e4eb67-68c4-499b-b5c8-37f27bc27347.png">
A straight-forward visualization of association rules is to use a scatter plot using plot() of the arulesViz package. It uses Support and Confidence on the axes. In addition, third measure Liftis used by default to color (grey levels) of the points.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271713-e995b95e-381a-4e00-83b9-d197b5e4a7a5.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271728-e172716c-a3c7-4485-8c00-b5da2435f8b2.png">
#### Interactive Scatter-Plot:
We can have a look for each rule (interactively) and view all quality measures (support, confidence and lift).
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271752-d5d0406e-a42d-4cb9-9c34-db14e51556e9.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271774-70479173-6b0e-45bd-b403-b99af9747e31.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271779-fa20c370-7e18-4a1f-b70e-47c03aa42033.png">
#### Graph - Based Visualization and Group Method:
Graph plots are a great way to visualize rules but tend to become congested as the number of rules increases. So, it is better to visualize a smaller number of rules with graph-based visualizations. We can see as well group method for top 10 items.
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271807-c6328bf7-2b46-45cf-acec-2f4b858af85b.png">
<img alt="image" src="https://user-images.githubusercontent.com/91852182/145271812-d9ce223b-337e-44c6-bb8c-c75c77d4ee64.png">
### Conclusion
Based on the results of these calculations can be used as a recommendation for retail owners to arrange the arrangement of product catalogs and take strategic steps to improve product marketing.. By utilizing the association rules which are discovered as a result of the analyses, the retailer can apply effective marketing and sales promotion strategies, he will be able increase customer engagement and improve customer experience and identify customer behavior.
### Attached files
- Assigment Part1 Retailer.R
- Assignment-1_Data.xlsx
Kaggle dataset identifier: market-basket-analysis
<jupyter_script># # Market Basket Analysis - by Abhi Sharma
import pandas as pd
import numpy as np
import seaborn as sns
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
import warnings
warnings.filterwarnings("ignore")
# ##### Part 1 : Data Preparation and Cleaning
df = pd.read_csv(
"/kaggle/input/market-basket-analysis/Assignment-1_Data.csv", delimiter=";"
)
df.head()
df.shape
df.info()
# We see there are some null values in Itemname and Customer ID. Lets observe them
df[df.Itemname.isna()]
# lets remove rows with null itemnames
df.drop(df[df["Itemname"].isna()].index, inplace=True)
df.shape
df.info()
# Here we see we have 388023 rows with customerID as nulls. Lets observe them before moving forward
df[df["CustomerID"].isna()]
# The data above looks fine. There may be many reasons for customerID being null but lets consider these records valid for our analysis and move ahead
# check if there are any patterns for country and what is the share of each country
df.groupby(["Country"])["BillNo"].nunique().sort_values(ascending=False)
# ### Now, Lets prepare the data for association rules
dfprep = (
df.groupby(["BillNo", "Itemname"])
.agg({"Quantity": "sum"})
.reset_index()
.pivot(index="BillNo", columns="Itemname")
.fillna(0)
)
dfprep.columns = dfprep.columns.droplevel(0)
dfprep.head()
dfprep.reset_index(inplace=True)
# total number of items
dfprep.drop(dfprep.columns[0], inplace=True, axis=1)
dfprep = dfprep.applymap(lambda x: True if x > 0 else False)
dfprep
frequent_itemsets = apriori(dfprep, min_support=0.01, use_colnames=True, max_len=2)
frequent_itemsets
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1.5)
rules.shape
rules
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/647/129647995.ipynb
|
market-basket-analysis
|
aslanahmedov
|
[{"Id": 129647995, "ScriptId": 38021269, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3559766, "CreationDate": "05/15/2023 13:18:25", "VersionNumber": 1.0, "Title": "Association Rules - Market Basket Analysis", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 75.0, "LinesInsertedFromPrevious": 75.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185933427, "KernelVersionId": 129647995, "SourceDatasetVersionId": 2904404}]
|
[{"Id": 2904404, "DatasetId": 1779990, "DatasourceVersionId": 2951619, "CreatorUserId": 8993668, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "12/09/2021 00:27:56", "VersionNumber": 2.0, "Title": "Market Basket Analysis", "Slug": "market-basket-analysis", "Subtitle": "Analyzing Consumer Behaviour Using MBA Association Rule Mining", "Description": "# Market Basket Analysis\nMarket basket analysis with Apriori algorithm\n\nThe retailer wants to target customers with suggestions on itemset that a customer is most likely to purchase .I was given dataset contains data of a retailer; the transaction data provides data around all the transactions that have happened over a period of time. Retailer will use result to grove in his industry and provide for customer suggestions on itemset, we be able increase customer engagement and improve customer experience and identify customer behavior. I will solve this problem with use Association Rules type of unsupervised learning technique that checks for the dependency of one data item on another data item.\n\n \n### Introduction\n\nAssociation Rule is most used when you are planning to build association in different objects in a set. It works when you are planning to find frequent patterns in a transaction database. It can tell you what items do customers frequently buy together and it allows retailer to identify relationships between the items. \n\n### An Example of Association Rules\n\nAssume there are 100 customers, 10 of them bought Computer Mouth, 9 bought Mat for Mouse and 8 bought both of them.\n- bought Computer Mouth => bought Mat for Mouse\n- support = P(Mouth & Mat) = 8/100 = 0.08\n- confidence = support/P(Mat for Mouse) = 0.08/0.09 = 0.89\n- lift = confidence/P(Computer Mouth) = 0.89/0.10 = 8.9\nThis just simple example. In practice, a rule needs the support of several hundred transactions, before it can be considered statistically significant, and datasets often contain thousands or millions of transactions.\n\n### Strategy\n\n- Data Import\n- Data Understanding and Exploration\n- Transformation of the data \u2013 so that is ready to be consumed by the association rules algorithm\n- Running association rules\n- Exploring the rules generated\n- Filtering the generated rules\n- Visualization of Rule \n\n### Dataset Description\n\n- File name: Assignment-1_Data\n- List name: retaildata\n- File format: . xlsx\n- Number of Row: 522065\n- Number of Attributes: 7\n\n\t- BillNo: 6-digit number assigned to each transaction. Nominal.\n\t- Itemname: Product name. Nominal.\n\t- Quantity: The quantities of each product per transaction. Numeric.\n\t- Date: The day and time when each transaction was generated. Numeric.\n\t- Price: Product price. Numeric.\n\t- CustomerID: 5-digit number assigned to each customer. Nominal.\n\t- Country: Name of the country where each customer resides. Nominal.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270162-fc53e5a3-4ad1-4d06-b0e0-228aabcf6b70.png\">\n\n### Libraries in R\n\nFirst, we need to load required libraries. Shortly I describe all libraries.\n\n- arules - Provides the infrastructure for representing, \nmanipulating and analyzing transaction data and patterns (frequent itemsets and association rules).\n- arulesViz - Extends package 'arules' with various visualization.\ntechniques for association rules and item-sets. The package also includes several interactive visualizations for rule exploration.\n- tidyverse - The tidyverse is an opinionated collection of R packages designed for data science.\n- readxl - Read Excel Files in R.\n- plyr - Tools for Splitting, Applying and Combining Data.\n- ggplot2 - A system for 'declaratively' creating graphics, based on \"The Grammar of Graphics\". You provide the data, tell 'ggplot2' how to map variables to aesthetics, what graphical primitives to use, and it takes care of the details.\n- knitr - Dynamic Report generation in R.\n- magrittr- Provides a mechanism for chaining commands with a new forward-pipe operator, %>%. This operator will forward a value, or the result of an expression, into the next function call/expression. There is flexible support for the type of right-hand side expressions.\n- dplyr - A fast, consistent tool for working with data frame like objects, both in memory and out of memory.\n- tidyverse - This package is designed to make it easy to install and load multiple 'tidyverse' packages in a single step.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270210-49c8e1aa-9753-431b-a8d5-99601bc76cb5.png\">\n\n### Data Pre-processing\n\nNext, we need to upload Assignment-1_Data. xlsx to R to read the dataset.Now we can see our data in R. \n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270229-514f0983-3bbb-4cd3-be64-980e92656a02.png\">\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270251-6f6f6472-8817-435c-a995-9bc4bfef10d1.png\">\n \nAfter we will clear our data frame, will remove missing values.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270286-05854e1a-2b6c-490e-ab30-9e99e731eacb.png\">\n\nTo apply Association Rule mining, we need to convert dataframe into transaction data to make all items that are bought together in one invoice will be in one row. Below lines of code will combine all products from one BillNo and Date and combine all products from that BillNo and Date as one row, with each item, separated by (,)\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270333-b7848fce-53d9-4486-b2ae-c331f13b4275.png\">\n\nWe don\u2019t need BillNo and Date, we will make it as Null.\nNext, you have to store this transaction data into .csv \n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270368-787a1721-8d93-4f69-8369-da3a70082e95.png\">\n\n\nThis how should look transaction data before we will go to next step.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270884-ca3d6d47-708f-4ab6-bc1d-0e17b912e1f3.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270942-14bfed99-473e-444c-9c14-a215c2957bee.png\">\n\n\nAt this step we already have our transaction dataset, and it shows the matrix of items which bought together. We can\u2019t see here any rules and how often it was purchase together. Now let\u2019s check how many transactions we have and what they are. We will have to have to load this transaction data into an object of the transaction class. This is done by using the R function read.transactions of the arules package. Our format of Data frame is basket. \n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145270981-84b76556-380b-4a32-a2ee-465d192141e8.png\">\n \nLet\u2019s have a view our transaction object by summary(transaction)\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271000-33fe3da8-6517-4d7a-a844-017022047ff6.png\">\n\nWe can see 18193 transactions (rows) and 7698 items (columns). 7698 is the product descriptions and 18193 transactions are collections of these items.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271025-9acd295d-6cef-44eb-8e5b-8ff12ddd798f.png\">\n\nThe summary gives us some useful information:\n- Density tells the percentage of non-zero cells in a sparse matrix. In other words, total number of items that are purchased divided by a possible number of items in that matrix. You can calculate how many items were purchased by using density: 18193x7698x0.002291294=337445\n- Summary will show us most frequent items.\n- Element (itemset/transaction) length distribution: It will gave us how many transactions are there for 1-itemset, 2-itemset and so on. The first row is telling you a number of items and the second row is telling you the number of transactions.\nFor example, there is only 1546 transaction for one item, 860 transactions for 2 items, and there are 419 items in one transaction which is the longest.\n\nLet\u2019s check item frequency plot, we will generate an itemFrequencyPlot to create an item Frequency Bar Plot to view the distribution of objects based on itemMatrix (e.g., >transactions or items in >itemsets and >rules) which is our case.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271096-4279c764-0d0c-41dc-87fc-0b13b217f566.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271132-f3bf1374-06b8-4f71-b810-dbb098f6963c.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271187-3f6023db-3820-4083-a6e9-cd74020acc70.png\">\n\nIn itemFrequencyPlot(transaction,topN=20,type=\"absolute\") first argument - our transaction object to be plotted that is tr. topN is allows us to plot top N highest frequency items. type can be as type=\"absolute\" or type=\"relative\". If we will chouse absolute it will plot numeric frequencies of each item independently. If relative it will plot how many times these items have appeared as compared to others. As well I made it in colure for better visualization.\n\n### Generating Rules\n \nNext, we will generate rules using the Apriori algorithm. The function apriori() is from package arules. The algorithm employs level-wise search for frequent itemsets. Algorithm will generate frequent itemsets and association rules. We pass supp=0.001 and conf=0.8 to return all the rules that have a support of at least 0.1% and confidence of at least 80%. We sort the rules by decreasing confidence and will check summary of the rules.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271236-3a82733a-216f-4db2-9f60-9b43c2016ea4.png\">\n \nThe apriori will take (transaction) as the transaction object on which mining is to be applied. parameter will allow you to set min_sup and min_confidence. The default values for parameter are minimum support of 0.1, the minimum confidence of 0.8, maximum of 10 items (maxlen).\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271269-4565e292-212a-47cb-8bc1-5bfe7f1819a8.png\">\n\nSummary of rules give us clear information as:\n- Number of rules: 97267\n- The distribution of rules by length: a length of 6 items has the most 33296 and length of 2 items has lowest number of rules 111\n- The summary of quality measures: ranges of support, confidence, and lift.\n- The information on data mining: total data mined, and the minimum parameters we set earlier\n\nNow, 97267 it a lot of rules. We will identify only top 10.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271351-6977af6b-2d7a-4a81-a3c1-f160722b2897.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271400-b74ec578-5bf5-47fd-a564-407eacfb6b43.png\">\n\n\nUsing the above output, you can make analysis such as:\n- 100% of the customers who bought 'ART LIGHTS ' also bought 'FUNK MONKEY'.\n- 100% of the customers who bought 'BILLBOARD FONTS DESIGN ' also bought 'WRAP'.\nWe can limit the size and number of rules generated. we can set parameter in Apriori. If we want stronger rules, we must to increase the value of conf. and for more extended rules give higher value to maxlen. \n\n### Visualizing Association Rules\n\nWe have thousands of rules generated based on data, we will need a couple of ways to present our findings. We will use ItemFrequencyPlot to visualize association rules.\n\n#### Scatter-Plot:\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271541-b9e4eb67-68c4-499b-b5c8-37f27bc27347.png\">\n\nA straight-forward visualization of association rules is to use a scatter plot using plot() of the arulesViz package. It uses Support and Confidence on the axes. In addition, third measure Liftis used by default to color (grey levels) of the points.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271713-e995b95e-381a-4e00-83b9-d197b5e4a7a5.png\">\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271728-e172716c-a3c7-4485-8c00-b5da2435f8b2.png\">\n\n\n\n#### Interactive Scatter-Plot:\n\nWe can have a look for each rule (interactively) and view all quality measures (support, confidence and lift). \n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271752-d5d0406e-a42d-4cb9-9c34-db14e51556e9.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271774-70479173-6b0e-45bd-b403-b99af9747e31.png\">\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271779-fa20c370-7e18-4a1f-b70e-47c03aa42033.png\">\n\n#### Graph - Based Visualization and Group Method:\n\nGraph plots are a great way to visualize rules but tend to become congested as the number of rules increases. So, it is better to visualize a smaller number of rules with graph-based visualizations.\tWe can see as well group method for top 10 items.\n\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271807-c6328bf7-2b46-45cf-acec-2f4b858af85b.png\">\n<img alt=\"image\" src=\"https://user-images.githubusercontent.com/91852182/145271812-d9ce223b-337e-44c6-bb8c-c75c77d4ee64.png\">\n\n### Conclusion\nBased on the results of these calculations can be used as a recommendation for retail owners to arrange the arrangement of product catalogs and take strategic steps to improve product marketing.. By utilizing the association rules which are discovered as a result of the analyses, the retailer can apply effective marketing and sales promotion strategies, he will be able increase customer engagement and improve customer experience and identify customer behavior.\n\n### Attached files \n\n- Assigment Part1 Retailer.R\n- Assignment-1_Data.xlsx", "VersionNotes": "Data Update 2021/12/09", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1779990, "CreatorUserId": 8993668, "OwnerUserId": 8993668.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2904404.0, "CurrentDatasourceVersionId": 2951619.0, "ForumId": 1802343, "Type": 2, "CreationDate": "12/08/2021 22:47:18", "LastActivityDate": "12/08/2021", "TotalViews": 53280, "TotalDownloads": 7601, "TotalVotes": 92, "TotalKernels": 17}]
|
[{"Id": 8993668, "UserName": "aslanahmedov", "DisplayName": "Aslan Ahmedov", "RegisterDate": "11/25/2021", "PerformanceTier": 2}]
|
# # Market Basket Analysis - by Abhi Sharma
import pandas as pd
import numpy as np
import seaborn as sns
from mlxtend.frequent_patterns import apriori
from mlxtend.frequent_patterns import association_rules
import warnings
warnings.filterwarnings("ignore")
# ##### Part 1 : Data Preparation and Cleaning
df = pd.read_csv(
"/kaggle/input/market-basket-analysis/Assignment-1_Data.csv", delimiter=";"
)
df.head()
df.shape
df.info()
# We see there are some null values in Itemname and Customer ID. Lets observe them
df[df.Itemname.isna()]
# lets remove rows with null itemnames
df.drop(df[df["Itemname"].isna()].index, inplace=True)
df.shape
df.info()
# Here we see we have 388023 rows with customerID as nulls. Lets observe them before moving forward
df[df["CustomerID"].isna()]
# The data above looks fine. There may be many reasons for customerID being null but lets consider these records valid for our analysis and move ahead
# check if there are any patterns for country and what is the share of each country
df.groupby(["Country"])["BillNo"].nunique().sort_values(ascending=False)
# ### Now, Lets prepare the data for association rules
dfprep = (
df.groupby(["BillNo", "Itemname"])
.agg({"Quantity": "sum"})
.reset_index()
.pivot(index="BillNo", columns="Itemname")
.fillna(0)
)
dfprep.columns = dfprep.columns.droplevel(0)
dfprep.head()
dfprep.reset_index(inplace=True)
# total number of items
dfprep.drop(dfprep.columns[0], inplace=True, axis=1)
dfprep = dfprep.applymap(lambda x: True if x > 0 else False)
dfprep
frequent_itemsets = apriori(dfprep, min_support=0.01, use_colnames=True, max_len=2)
frequent_itemsets
rules = association_rules(frequent_itemsets, metric="lift", min_threshold=1.5)
rules.shape
rules
| false | 1 | 541 | 2 | 4,988 | 541 |
||
129647825
|
""" Repeat all preprocessing of the original notebook with suitable modifications """
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# import seaborn as sns
# from tqdm.notebook import tqdm
# import matplotlib
# from matplotlib import rc
# DATA LOADING
df = pd.read_csv(
"../input/eurecom-aml-2023-challenge-1/public/train.csv", low_memory=True
) # TRAINING DATA
df_test = pd.read_csv(
"../input/eurecom-aml-2023-challenge-1/public/test_feat.csv", low_memory=True
) # TESTING DATA
""" Feature selection and linearization """
feat_selection = [
"wrf_t2_interpolated",
"wrf_t2_next",
"gfs_temperature_97500",
"gfs_temperature_70000",
"gfs_temperature_40000",
"gfs_temperature_10000",
# 'gfs_temperature_95000', 'gfs_temperature_92500', 'gfs_temperature_90000', 'gfs_temperature_85000', 'gfs_temperature_80000',
# 'gfs_temperature_75000', 'gfs_temperature_65000', 'gfs_temperature_60000', 'gfs_temperature_55000',
# 'gfs_temperature_50000', 'gfs_temperature_45000', 'gfs_temperature_35000', 'gfs_temperature_30000',
# 'gfs_temperature_25000', 'gfs_temperature_15000', 'gfs_temperature_7000',
"climate_temperature",
"cmc_0_0_6_2",
"cmc_0_1_0_0",
"gfs_2m_dewpoint_next",
"gfs_2m_dewpoint",
"cmc_0_3_5_500",
"gfs_precipitable_water",
"cmc_0_3_5_700",
"fact_time",
"fact_latitude",
"cmc_0_0_7_2",
"cmc_0_3_1_0",
"sun_elevation",
"cmc_0_3_5_1000",
"cmc_0_3_5_850",
"gfs_humidity",
"cmc_0_2_2_500",
"cmc_0_2_2_700",
"fact_temperature",
]
print("Final number of features:", len(feat_selection) - 1)
slice_df = df[feat_selection].drop_duplicates()
# LINEARIZE LATITUDE
abs_latitude = abs(slice_df["fact_latitude"])
slice_df.loc[:, "fact_latitude"] = abs_latitude
# CHECK LINEARITY OF FEATURES WITH TEMPERATURE
fig, ax = plt.subplots(4, 6, figsize=(24, 12), dpi=80)
for idx_f, feature in enumerate(feat_selection[:-1]):
idx_ax = np.unravel_index(idx_f, shape=(4, 6), order="C")
ax[idx_ax[0], idx_ax[1]].scatter(
slice_df[[feature]], slice_df[["fact_temperature"]], s=5
)
ax[idx_ax[0], idx_ax[1]].set_title(feature, fontsize=18)
ax[idx_ax[0], idx_ax[1]].grid()
fig.subplots_adjust(
left=0.125, # the left side of the subplots of the figure
right=0.9, # the right side of the subplots of the figure
bottom=0.05, # the bottom of the subplots of the figure
top=0.85, # the top of the subplots of the figure
wspace=0.27, # the amount of width reserved for blank space between subplots
hspace=0.37,
) # the amount of height reserved for white space between subplots
""" Prepare data, optimizae models """
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import GridSearchCV
import time
# PREPARE DATA
X = slice_df.dropna().iloc[:, :-1].values
y = slice_df.dropna().iloc[:, -1].values
Xmean, Xstd, ymean, ystd = X.mean(0), X.std(0), y.mean(), y.std()
X = (X - Xmean) / Xstd
y = (y - ymean) / ystd
# SPLIT INTO TRAIN AND TEST DATASETS
from sklearn.model_selection import train_test_split
Xtr, Xval, ytr, yval = train_test_split(X, y, random_state=1, test_size=10000)
# DEFINE RMSE FUNCTION
def compute_rmse(y, ypred, ystd=1.0):
return np.mean((y - ypred) ** 2) ** 0.5 * ystd
""" Find best parameters for each model considered with cross validation """
# (done spearately in the following cells)
# LASSO
# param_grid_lasso = {"alpha": [0.001, 0.01, 0.1, 1, 10]}
# param_grid_lasso = {"alpha": [0.001, 0.01, 0.1]}
param_grid_lasso = {"alpha": [0.0001, 0.001]}
# param_grid_lasso = {"alpha": [0.00001, 0.0001, 0.001, 0.01, 0.1]} # convergence error
start_time = time.time()
lasso_model = Lasso()
search_Lasso = GridSearchCV(
lasso_model,
param_grid_lasso,
n_jobs=-1,
verbose=1,
scoring="neg_root_mean_squared_error",
).fit(Xtr, ytr)
end_time = time.time()
print("Done! It took", end_time - start_time, "seconds") # 322.698, 335.120
print("Best parameters set found on cv set:", search.best_params_)
ypred_val = search.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val, ystd):.3f}")
# RIDGE
param_grid_ridge = {"alpha": [0.01, 0.1, 1, 10, 100]}
start_time = time.time()
ridge_model = Ridge()
search_ridge = GridSearchCV(
ridge_model,
param_grid_ridge,
n_jobs=-1,
verbose=1,
scoring="neg_root_mean_squared_error",
).fit(Xtr, ytr)
end_time = time.time()
print("Done! It took", end_time - start_time, "seconds") # 10.54, 7.91
print("Best parameters set found on cv set:", search_ridge.best_params_)
ypred_val_ridge = search_ridge.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val_ridge, ystd):.3f}")
# LINEAR LEAST SQUARES
linear_model = LinearRegression()
linear_model.fit(Xtr, ytr)
ypred_val_linear = linear_model.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val_linear, ystd):.3f}")
""" Train again with optimal parameters and rpint both training and validation errors """
model_lasso = Lasso(alpha=0.001)
start_time = time.time()
model_lasso.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_lasso = model_lasso.predict(Xtr)
ypred_val_lasso = model_lasso.predict(Xval)
print(
f"LASSO MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_lasso, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_lasso, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
model_ridge = Ridge(alpha=1)
start_time = time.time()
model_ridge.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_ridge = model_ridge.predict(Xtr)
ypred_val_ridge = model_ridge.predict(Xval)
print(
f"RIDGE MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_ridge, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_ridge, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
model_linear = LinearRegression()
start_time = time.time()
model_linear.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_linear = model_linear.predict(Xtr)
ypred_val_linear = model_linear.predict(Xval)
print(
f"LINEAR MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_linear, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_linear, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
""" Generate predictions with the chosen model """
test_feat_selection = feat_selection[
:-1
] # all the same except for fact_temperature, which doesn't exist in test set
Xte = df_test[test_feat_selection].values
Xte = (Xte - Xmean) / Xstd
ypred_te = (
model_ridge.predict(Xte) * ystd + ymean
) # Remember to un-standardize the predictions
# PUT IN DATA FRAME
submission_df = pd.DataFrame(
data={"index": df_test["index"].values, "fact_temperature": ypred_te.squeeze()}
)
submission_df.to_csv("/kaggle/working/submission.csv", index=False)
print(submission_df)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/647/129647825.ipynb
| null | null |
[{"Id": 129647825, "ScriptId": 38542441, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2620011, "CreationDate": "05/15/2023 13:17:14", "VersionNumber": 2.0, "Title": "Compare_models", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 165.0, "LinesInsertedFromPrevious": 159.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 6.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
""" Repeat all preprocessing of the original notebook with suitable modifications """
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# import seaborn as sns
# from tqdm.notebook import tqdm
# import matplotlib
# from matplotlib import rc
# DATA LOADING
df = pd.read_csv(
"../input/eurecom-aml-2023-challenge-1/public/train.csv", low_memory=True
) # TRAINING DATA
df_test = pd.read_csv(
"../input/eurecom-aml-2023-challenge-1/public/test_feat.csv", low_memory=True
) # TESTING DATA
""" Feature selection and linearization """
feat_selection = [
"wrf_t2_interpolated",
"wrf_t2_next",
"gfs_temperature_97500",
"gfs_temperature_70000",
"gfs_temperature_40000",
"gfs_temperature_10000",
# 'gfs_temperature_95000', 'gfs_temperature_92500', 'gfs_temperature_90000', 'gfs_temperature_85000', 'gfs_temperature_80000',
# 'gfs_temperature_75000', 'gfs_temperature_65000', 'gfs_temperature_60000', 'gfs_temperature_55000',
# 'gfs_temperature_50000', 'gfs_temperature_45000', 'gfs_temperature_35000', 'gfs_temperature_30000',
# 'gfs_temperature_25000', 'gfs_temperature_15000', 'gfs_temperature_7000',
"climate_temperature",
"cmc_0_0_6_2",
"cmc_0_1_0_0",
"gfs_2m_dewpoint_next",
"gfs_2m_dewpoint",
"cmc_0_3_5_500",
"gfs_precipitable_water",
"cmc_0_3_5_700",
"fact_time",
"fact_latitude",
"cmc_0_0_7_2",
"cmc_0_3_1_0",
"sun_elevation",
"cmc_0_3_5_1000",
"cmc_0_3_5_850",
"gfs_humidity",
"cmc_0_2_2_500",
"cmc_0_2_2_700",
"fact_temperature",
]
print("Final number of features:", len(feat_selection) - 1)
slice_df = df[feat_selection].drop_duplicates()
# LINEARIZE LATITUDE
abs_latitude = abs(slice_df["fact_latitude"])
slice_df.loc[:, "fact_latitude"] = abs_latitude
# CHECK LINEARITY OF FEATURES WITH TEMPERATURE
fig, ax = plt.subplots(4, 6, figsize=(24, 12), dpi=80)
for idx_f, feature in enumerate(feat_selection[:-1]):
idx_ax = np.unravel_index(idx_f, shape=(4, 6), order="C")
ax[idx_ax[0], idx_ax[1]].scatter(
slice_df[[feature]], slice_df[["fact_temperature"]], s=5
)
ax[idx_ax[0], idx_ax[1]].set_title(feature, fontsize=18)
ax[idx_ax[0], idx_ax[1]].grid()
fig.subplots_adjust(
left=0.125, # the left side of the subplots of the figure
right=0.9, # the right side of the subplots of the figure
bottom=0.05, # the bottom of the subplots of the figure
top=0.85, # the top of the subplots of the figure
wspace=0.27, # the amount of width reserved for blank space between subplots
hspace=0.37,
) # the amount of height reserved for white space between subplots
""" Prepare data, optimizae models """
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.model_selection import GridSearchCV
import time
# PREPARE DATA
X = slice_df.dropna().iloc[:, :-1].values
y = slice_df.dropna().iloc[:, -1].values
Xmean, Xstd, ymean, ystd = X.mean(0), X.std(0), y.mean(), y.std()
X = (X - Xmean) / Xstd
y = (y - ymean) / ystd
# SPLIT INTO TRAIN AND TEST DATASETS
from sklearn.model_selection import train_test_split
Xtr, Xval, ytr, yval = train_test_split(X, y, random_state=1, test_size=10000)
# DEFINE RMSE FUNCTION
def compute_rmse(y, ypred, ystd=1.0):
return np.mean((y - ypred) ** 2) ** 0.5 * ystd
""" Find best parameters for each model considered with cross validation """
# (done spearately in the following cells)
# LASSO
# param_grid_lasso = {"alpha": [0.001, 0.01, 0.1, 1, 10]}
# param_grid_lasso = {"alpha": [0.001, 0.01, 0.1]}
param_grid_lasso = {"alpha": [0.0001, 0.001]}
# param_grid_lasso = {"alpha": [0.00001, 0.0001, 0.001, 0.01, 0.1]} # convergence error
start_time = time.time()
lasso_model = Lasso()
search_Lasso = GridSearchCV(
lasso_model,
param_grid_lasso,
n_jobs=-1,
verbose=1,
scoring="neg_root_mean_squared_error",
).fit(Xtr, ytr)
end_time = time.time()
print("Done! It took", end_time - start_time, "seconds") # 322.698, 335.120
print("Best parameters set found on cv set:", search.best_params_)
ypred_val = search.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val, ystd):.3f}")
# RIDGE
param_grid_ridge = {"alpha": [0.01, 0.1, 1, 10, 100]}
start_time = time.time()
ridge_model = Ridge()
search_ridge = GridSearchCV(
ridge_model,
param_grid_ridge,
n_jobs=-1,
verbose=1,
scoring="neg_root_mean_squared_error",
).fit(Xtr, ytr)
end_time = time.time()
print("Done! It took", end_time - start_time, "seconds") # 10.54, 7.91
print("Best parameters set found on cv set:", search_ridge.best_params_)
ypred_val_ridge = search_ridge.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val_ridge, ystd):.3f}")
# LINEAR LEAST SQUARES
linear_model = LinearRegression()
linear_model.fit(Xtr, ytr)
ypred_val_linear = linear_model.predict(Xval)
print(f"Validation RMSE: {compute_rmse(yval, ypred_val_linear, ystd):.3f}")
""" Train again with optimal parameters and rpint both training and validation errors """
model_lasso = Lasso(alpha=0.001)
start_time = time.time()
model_lasso.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_lasso = model_lasso.predict(Xtr)
ypred_val_lasso = model_lasso.predict(Xval)
print(
f"LASSO MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_lasso, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_lasso, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
model_ridge = Ridge(alpha=1)
start_time = time.time()
model_ridge.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_ridge = model_ridge.predict(Xtr)
ypred_val_ridge = model_ridge.predict(Xval)
print(
f"RIDGE MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_ridge, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_ridge, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
model_linear = LinearRegression()
start_time = time.time()
model_linear.fit(Xtr, ytr)
end_time = time.time()
ypred_tr_linear = model_linear.predict(Xtr)
ypred_val_linear = model_linear.predict(Xval)
print(
f"LINEAR MODEL \t Train RMSE: {compute_rmse(ytr, ypred_tr_linear, ystd):.3f}",
f"\t Valid RMSE: {compute_rmse(yval, ypred_val_linear, ystd):.3f}",
f"\t training time: {(end_time - start_time):.3f}",
)
""" Generate predictions with the chosen model """
test_feat_selection = feat_selection[
:-1
] # all the same except for fact_temperature, which doesn't exist in test set
Xte = df_test[test_feat_selection].values
Xte = (Xte - Xmean) / Xstd
ypred_te = (
model_ridge.predict(Xte) * ystd + ymean
) # Remember to un-standardize the predictions
# PUT IN DATA FRAME
submission_df = pd.DataFrame(
data={"index": df_test["index"].values, "fact_temperature": ypred_te.squeeze()}
)
submission_df.to_csv("/kaggle/working/submission.csv", index=False)
print(submission_df)
| false | 0 | 2,647 | 0 | 2,647 | 2,647 |
||
129647361
|
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import (
accuracy_score,
classification_report,
roc_auc_score,
roc_curve,
)
from lightgbm import LGBMClassifier, plot_importance
from sklearn.preprocessing import StandardScaler
# ## Data Cleansing
df_billing = pd.read_excel("/kaggle/input/churn-data/Customer_Billing.xlsx")
df_billing.to_csv("Customer_Billing.csv")
df_billing.tail()
df_usage = pd.read_excel("/kaggle/input/churn-data/Customer_Usage.xlsx")
df_usage.to_csv("Customer_Usage.csv")
df_usage.tail()
df_demographics = pd.read_excel("/kaggle/input/churn-data/Customer_Demographics.xlsx")
df_demographics.to_csv("Customer_Demographics.csv")
df_demographics.tail()
df_merge_col1 = pd.merge(df_billing, df_usage, on="customerID")
df_merge = pd.merge(df_merge_col1, df_demographics, on="customerID")
df_merge.columns.values
df = df_merge.drop(["Unnamed: 0_x"], axis=1)
df = df.drop(["Unnamed: 0_y"], axis=1)
df = df.drop(["Unnamed: 0"], axis=1)
df.tail()
df.info()
df.gender = pd.Categorical(df.gender).codes
df.SeniorCitizen = pd.Categorical(df.SeniorCitizen).codes
df.Partner = pd.Categorical(df.Partner).codes
df.Dependents = pd.Categorical(df.Dependents).codes
df.tenure = pd.Categorical(df.tenure).codes
df.PhoneService = pd.Categorical(df.PhoneService).codes
df.MultipleLines = pd.Categorical(df.MultipleLines).codes
df.InternetService = pd.Categorical(df.InternetService).codes
df.OnlineSecurity = pd.Categorical(df.OnlineSecurity).codes
df.OnlineBackup = pd.Categorical(df.OnlineBackup).codes
df.DeviceProtection = pd.Categorical(df.DeviceProtection).codes
df.TechSupport = pd.Categorical(df.TechSupport).codes
df.StreamingTV = pd.Categorical(df.StreamingTV).codes
df.StreamingMovies = pd.Categorical(df.StreamingMovies).codes
df.Contract = pd.Categorical(df.Contract).codes
df.PaperlessBilling = pd.Categorical(df.PaperlessBilling).codes
df.PaymentMethod = pd.Categorical(df.PaymentMethod).codes
df.MonthlyCharges = pd.Categorical(df.MonthlyCharges).codes
df.TotalCharges = pd.Categorical(df.TotalCharges).codes
df.Churn = pd.Categorical(df.Churn).codes
df.customerID = pd.Categorical(df.customerID).codes
sns.set_style("dark")
df.hist(bins=50, figsize=(20, 20), color="navy")
df.info()
# create fontdicts for formatting figure text
axtitle_dict = {"family": "serif", "color": "darkred", "weight": "bold", "size": 16}
axlab_dict = {"family": "serif", "color": "black", "size": 14}
# plot correlation matrix heatmap
fig, ax = plt.subplots(figsize=[13, 5])
sns.heatmap(df.corr(), ax=ax, annot=True, linewidths=0.05, fmt=".2f", cmap="RdBu")
ax.tick_params(axis="both", which="major", labelsize=14)
ax.set_title("Dataset Correlation Matrix", fontdict=axtitle_dict)
fig.show()
from sklearn.linear_model import LogisticRegression
# Drop the Churn column from the data frame.
X = df.drop("Churn", axis=1)
y = df["Churn"]
# Split the data into training and testing sets.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Create the logistic regression model.
model = LogisticRegression()
# Train the model.
model.fit(X_train, y_train)
# Evaluate the model.
print("Accuracy:", model.score(X_test, y_test))
# Deploy the model.
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/647/129647361.ipynb
| null | null |
[{"Id": 129647361, "ScriptId": 38549310, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15109094, "CreationDate": "05/15/2023 13:13:48", "VersionNumber": 2.0, "Title": "Working_Churn", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 109.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 109.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import (
accuracy_score,
classification_report,
roc_auc_score,
roc_curve,
)
from lightgbm import LGBMClassifier, plot_importance
from sklearn.preprocessing import StandardScaler
# ## Data Cleansing
df_billing = pd.read_excel("/kaggle/input/churn-data/Customer_Billing.xlsx")
df_billing.to_csv("Customer_Billing.csv")
df_billing.tail()
df_usage = pd.read_excel("/kaggle/input/churn-data/Customer_Usage.xlsx")
df_usage.to_csv("Customer_Usage.csv")
df_usage.tail()
df_demographics = pd.read_excel("/kaggle/input/churn-data/Customer_Demographics.xlsx")
df_demographics.to_csv("Customer_Demographics.csv")
df_demographics.tail()
df_merge_col1 = pd.merge(df_billing, df_usage, on="customerID")
df_merge = pd.merge(df_merge_col1, df_demographics, on="customerID")
df_merge.columns.values
df = df_merge.drop(["Unnamed: 0_x"], axis=1)
df = df.drop(["Unnamed: 0_y"], axis=1)
df = df.drop(["Unnamed: 0"], axis=1)
df.tail()
df.info()
df.gender = pd.Categorical(df.gender).codes
df.SeniorCitizen = pd.Categorical(df.SeniorCitizen).codes
df.Partner = pd.Categorical(df.Partner).codes
df.Dependents = pd.Categorical(df.Dependents).codes
df.tenure = pd.Categorical(df.tenure).codes
df.PhoneService = pd.Categorical(df.PhoneService).codes
df.MultipleLines = pd.Categorical(df.MultipleLines).codes
df.InternetService = pd.Categorical(df.InternetService).codes
df.OnlineSecurity = pd.Categorical(df.OnlineSecurity).codes
df.OnlineBackup = pd.Categorical(df.OnlineBackup).codes
df.DeviceProtection = pd.Categorical(df.DeviceProtection).codes
df.TechSupport = pd.Categorical(df.TechSupport).codes
df.StreamingTV = pd.Categorical(df.StreamingTV).codes
df.StreamingMovies = pd.Categorical(df.StreamingMovies).codes
df.Contract = pd.Categorical(df.Contract).codes
df.PaperlessBilling = pd.Categorical(df.PaperlessBilling).codes
df.PaymentMethod = pd.Categorical(df.PaymentMethod).codes
df.MonthlyCharges = pd.Categorical(df.MonthlyCharges).codes
df.TotalCharges = pd.Categorical(df.TotalCharges).codes
df.Churn = pd.Categorical(df.Churn).codes
df.customerID = pd.Categorical(df.customerID).codes
sns.set_style("dark")
df.hist(bins=50, figsize=(20, 20), color="navy")
df.info()
# create fontdicts for formatting figure text
axtitle_dict = {"family": "serif", "color": "darkred", "weight": "bold", "size": 16}
axlab_dict = {"family": "serif", "color": "black", "size": 14}
# plot correlation matrix heatmap
fig, ax = plt.subplots(figsize=[13, 5])
sns.heatmap(df.corr(), ax=ax, annot=True, linewidths=0.05, fmt=".2f", cmap="RdBu")
ax.tick_params(axis="both", which="major", labelsize=14)
ax.set_title("Dataset Correlation Matrix", fontdict=axtitle_dict)
fig.show()
from sklearn.linear_model import LogisticRegression
# Drop the Churn column from the data frame.
X = df.drop("Churn", axis=1)
y = df["Churn"]
# Split the data into training and testing sets.
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25)
# Create the logistic regression model.
model = LogisticRegression()
# Train the model.
model.fit(X_train, y_train)
# Evaluate the model.
print("Accuracy:", model.score(X_test, y_test))
# Deploy the model.
| false | 0 | 1,099 | 0 | 1,099 | 1,099 |
||
129630793
|
# # Baseline Model
# If you are new to Machine Learning, follow the 5 steps explained below.
# 1. Reading CSV as DataFrame
# 2. Splitting data into train and test data
# 3. Fit a selected model to the data and predict on test data
# 4. Check the prediction visually
# 5. Create submission CSV file
# If you think you are already at an intermediate level, you can try:
# - data cleaning: including NA imputations, removing duplicates, fixing skewed attributes, fixing skewes/scaling, removing outliers
# - feature engineering: encoding categorical variables, interactions between variables, choosing features
# - validation designing: try reducing the variance of predicted values, using cross validation
# - algorithm selection: choose a suitable algorithm for the complexity of data (linear regression < support vector machine < decision tree < neural network), use ensemble techniques such as BAGGING or Boosting
# - hyperparameter optimization: try using GridSearchCV, RandomizedSearchCV, and Optuna
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# ## 1. Reading CSV
df = pd.read_csv(
"/kaggle/input/monash-ml-techpod-competition1-new/competition_train.csv"
)
# ## 2. Splitting into X_train, X_test, y_train, y_test
X, y = df.drop(["id", "Strength"], axis=1), df["Strength"]
X_train, X_test, y_train, y_test = train_test_split(X, y)
# ## 3. Fitting and Prediction of Linear Regression Model
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# ## 4. Checking the Prediction Visually
plt.scatter(y_test, y_pred)
plt.plot([x for x in range(10, 80)], [y for y in range(10, 80)], color="red")
plt.show()
# ## 5. Predicting for Sumbission and Generating Submission CSV File
test = pd.read_csv(
"/kaggle/input/monash-ml-techpod-competition1-new/competition_test.csv"
)
test_id = test["id"]
test_pred = model.predict(test.drop(["id"], axis=1))
df_submission = pd.DataFrame({"id": test_id, "Strength": test_pred})
df_submission.to_csv("submission1.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/630/129630793.ipynb
| null | null |
[{"Id": 129630793, "ScriptId": 38539651, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9164888, "CreationDate": "05/15/2023 11:00:50", "VersionNumber": 1.0, "Title": "Baseline Model ML Techpod", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 52.0, "LinesInsertedFromPrevious": 52.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
# # Baseline Model
# If you are new to Machine Learning, follow the 5 steps explained below.
# 1. Reading CSV as DataFrame
# 2. Splitting data into train and test data
# 3. Fit a selected model to the data and predict on test data
# 4. Check the prediction visually
# 5. Create submission CSV file
# If you think you are already at an intermediate level, you can try:
# - data cleaning: including NA imputations, removing duplicates, fixing skewed attributes, fixing skewes/scaling, removing outliers
# - feature engineering: encoding categorical variables, interactions between variables, choosing features
# - validation designing: try reducing the variance of predicted values, using cross validation
# - algorithm selection: choose a suitable algorithm for the complexity of data (linear regression < support vector machine < decision tree < neural network), use ensemble techniques such as BAGGING or Boosting
# - hyperparameter optimization: try using GridSearchCV, RandomizedSearchCV, and Optuna
import pandas as pd
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# ## 1. Reading CSV
df = pd.read_csv(
"/kaggle/input/monash-ml-techpod-competition1-new/competition_train.csv"
)
# ## 2. Splitting into X_train, X_test, y_train, y_test
X, y = df.drop(["id", "Strength"], axis=1), df["Strength"]
X_train, X_test, y_train, y_test = train_test_split(X, y)
# ## 3. Fitting and Prediction of Linear Regression Model
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
# ## 4. Checking the Prediction Visually
plt.scatter(y_test, y_pred)
plt.plot([x for x in range(10, 80)], [y for y in range(10, 80)], color="red")
plt.show()
# ## 5. Predicting for Sumbission and Generating Submission CSV File
test = pd.read_csv(
"/kaggle/input/monash-ml-techpod-competition1-new/competition_test.csv"
)
test_id = test["id"]
test_pred = model.predict(test.drop(["id"], axis=1))
df_submission = pd.DataFrame({"id": test_id, "Strength": test_pred})
df_submission.to_csv("submission1.csv", index=False)
| false | 0 | 618 | 3 | 618 | 618 |
||
129630062
|
<jupyter_start><jupyter_text>Possum Regression
### Context
Can you use your regression skills to predict the age of a possum, its head length, whether it is male or female? This classic practice regression dataset comes originally from the [DAAG R package](https://cran.r-project.org/web/packages/DAAG/index.html) (datasets used in examples and exercises in the book Maindonald, J.H. and Braun, W.J. (2003, 2007, 2010) "Data Analysis and Graphics Using R"). This dataset is also used in the [OpenIntro Statistics](https://www.openintro.org/book/os/) book chapter 8 *Introduction to linear regression*.
### Content
From the DAAG R package: "*The possum data frame consists of nine morphometric measurements on each of 104 mountain brushtail possums, trapped at seven sites from Southern Victoria to central Queensland*."
Kaggle dataset identifier: openintro-possum
<jupyter_code>import pandas as pd
df = pd.read_csv('openintro-possum/possum.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 104 entries, 0 to 103
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 case 104 non-null int64
1 site 104 non-null int64
2 Pop 104 non-null object
3 sex 104 non-null object
4 age 102 non-null float64
5 hdlngth 104 non-null float64
6 skullw 104 non-null float64
7 totlngth 104 non-null float64
8 taill 104 non-null float64
9 footlgth 103 non-null float64
10 earconch 104 non-null float64
11 eye 104 non-null float64
12 chest 104 non-null float64
13 belly 104 non-null float64
dtypes: float64(10), int64(2), object(2)
memory usage: 11.5+ KB
<jupyter_text>Examples:
{
"case": 1,
"site": 1,
"Pop": "Vic",
"sex": "m",
"age": 8,
"hdlngth": 94.1,
"skullw": 60.4,
"totlngth": 89.0,
"taill": 36.0,
"footlgth": 74.5,
"earconch": 54.5,
"eye": 15.2,
"chest": 28.0,
"belly": 36
}
{
"case": 2,
"site": 1,
"Pop": "Vic",
"sex": "f",
"age": 6,
"hdlngth": 92.5,
"skullw": 57.6,
"totlngth": 91.5,
"taill": 36.5,
"footlgth": 72.5,
"earconch": 51.2,
"eye": 16.0,
"chest": 28.5,
"belly": 33
}
{
"case": 3,
"site": 1,
"Pop": "Vic",
"sex": "f",
"age": 6,
"hdlngth": 94.0,
"skullw": 60.0,
"totlngth": 95.5,
"taill": 39.0,
"footlgth": 75.4,
"earconch": 51.9,
"eye": 15.5,
"chest": 30.0,
"belly": 34
}
{
"case": 4,
"site": 1,
"Pop": "Vic",
"sex": "f",
"age": 6,
"hdlngth": 93.2,
"skullw": 57.1,
"totlngth": 92.0,
"taill": 38.0,
"footlgth": 76.1,
"earconch": 52.2,
"eye": 15.2,
"chest": 28.0,
"belly": 34
}
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
df = pd.read_csv("/kaggle/input/openintro-possum/possum.csv")
df
df = df.dropna()
df.isnull().sum()
sex = df["sex"]
age = df["age"]
hLen = df["hdlngth"]
tLen = df["totlngth"]
age_train = age[:80]
sex_train = sex[:80]
hlen_train = hLen[:80]
tlen_train = tLen[:80]
age_test = age[80:]
sex_test = sex[80:]
hlen_test = hLen[80:]
tlen_test = tLen[80:]
input = tf.constant(hlen_train)
# Making Model
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(100, input_shape=[1]),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(1),
]
)
model.compile(
loss="mae",
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics="mae",
)
model.fit(hlen_train, tlen_train, epochs=200, verbose=0)
predictions = model.predict(hlen_test)
plt.figure(figsize=(10, 7))
plt.scatter(hlen_train, tlen_train, c="blue", label="Training Data")
plt.scatter(hlen_test, tlen_test, c="red", label="Test Data")
plt.scatter(hlen_test, predictions, c="green", label="Predicted Test")
plt.legend()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/630/129630062.ipynb
|
openintro-possum
|
abrambeyer
|
[{"Id": 129630062, "ScriptId": 38547961, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11512615, "CreationDate": "05/15/2023 10:54:35", "VersionNumber": 4.0, "Title": "PossumDLRegression", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 54.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 53.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185896547, "KernelVersionId": 129630062, "SourceDatasetVersionId": 2532158}]
|
[{"Id": 2532158, "DatasetId": 1534513, "DatasourceVersionId": 2575036, "CreatorUserId": 432563, "LicenseName": "CC0: Public Domain", "CreationDate": "08/17/2021 01:15:54", "VersionNumber": 1.0, "Title": "Possum Regression", "Slug": "openintro-possum", "Subtitle": "Get Your Feet Wet With This Beginner Regression Dataset!", "Description": "### Context\n\nCan you use your regression skills to predict the age of a possum, its head length, whether it is male or female? This classic practice regression dataset comes originally from the [DAAG R package](https://cran.r-project.org/web/packages/DAAG/index.html) (datasets used in examples and exercises in the book Maindonald, J.H. and Braun, W.J. (2003, 2007, 2010) \"Data Analysis and Graphics Using R\"). This dataset is also used in the [OpenIntro Statistics](https://www.openintro.org/book/os/) book chapter 8 *Introduction to linear regression*.\n\n### Content\n\nFrom the DAAG R package: \"*The possum data frame consists of nine morphometric measurements on each of 104 mountain brushtail possums, trapped at seven sites from Southern Victoria to central Queensland*.\"\n\n\n### Acknowledgements\n\nData originally found in the [DAAG R package](https://cran.r-project.org/web/packages/DAAG/index.html) and used in the book Maindonald, J.H. and Braun, W.J. (2003, 2007, 2010) \"Data Analysis and Graphics Using R\"). \n\nA subset of the data was also put together for the [OpenIntro Statistics](https://www.openintro.org/book/os/) book chapter 8 *Introduction to linear regression*.\n\n***Original Source of dataset:***\n*Lindenmayer, D. B., Viggers, K. L., Cunningham, R. B., and Donnelly, C. F. 1995. Morphological\nvariation among columns of the mountain brushtail possum, Trichosurus caninus Ogilby (Phalangeridae: Marsupiala). Australian Journal of Zoology 43: 449-458.*\n\n### Inspiration\n\nGet your feet wet with regression techniques here on Kaggle by using this dataset. Perfect for beginners since the OpenIntro Statistics book does a good explanation in Chapter 8.\n\n* Can we use total length to predict a possum's head length?\n* Which possum body dimensions are most correlated with age and sex?\n* Can we classify a possum's sex by its body dimensions and location?\n* Can we predict a possum's trapping location from its body dimensions?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1534513, "CreatorUserId": 432563, "OwnerUserId": 432563.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2532158.0, "CurrentDatasourceVersionId": 2575036.0, "ForumId": 1554365, "Type": 2, "CreationDate": "08/17/2021 01:15:54", "LastActivityDate": "08/17/2021", "TotalViews": 42492, "TotalDownloads": 6049, "TotalVotes": 74, "TotalKernels": 54}]
|
[{"Id": 432563, "UserName": "abrambeyer", "DisplayName": "ABeyer", "RegisterDate": "10/01/2015", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
df = pd.read_csv("/kaggle/input/openintro-possum/possum.csv")
df
df = df.dropna()
df.isnull().sum()
sex = df["sex"]
age = df["age"]
hLen = df["hdlngth"]
tLen = df["totlngth"]
age_train = age[:80]
sex_train = sex[:80]
hlen_train = hLen[:80]
tlen_train = tLen[:80]
age_test = age[80:]
sex_test = sex[80:]
hlen_test = hLen[80:]
tlen_test = tLen[80:]
input = tf.constant(hlen_train)
# Making Model
model = tf.keras.Sequential(
[
tf.keras.layers.Dense(100, input_shape=[1]),
tf.keras.layers.Dense(100),
tf.keras.layers.Dense(1),
]
)
model.compile(
loss="mae",
optimizer=tf.keras.optimizers.Adam(learning_rate=0.0001),
metrics="mae",
)
model.fit(hlen_train, tlen_train, epochs=200, verbose=0)
predictions = model.predict(hlen_test)
plt.figure(figsize=(10, 7))
plt.scatter(hlen_train, tlen_train, c="blue", label="Training Data")
plt.scatter(hlen_test, tlen_test, c="red", label="Test Data")
plt.scatter(hlen_test, predictions, c="green", label="Predicted Test")
plt.legend()
|
[{"openintro-possum/possum.csv": {"column_names": "[\"case\", \"site\", \"Pop\", \"sex\", \"age\", \"hdlngth\", \"skullw\", \"totlngth\", \"taill\", \"footlgth\", \"earconch\", \"eye\", \"chest\", \"belly\"]", "column_data_types": "{\"case\": \"int64\", \"site\": \"int64\", \"Pop\": \"object\", \"sex\": \"object\", \"age\": \"float64\", \"hdlngth\": \"float64\", \"skullw\": \"float64\", \"totlngth\": \"float64\", \"taill\": \"float64\", \"footlgth\": \"float64\", \"earconch\": \"float64\", \"eye\": \"float64\", \"chest\": \"float64\", \"belly\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 104 entries, 0 to 103\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 case 104 non-null int64 \n 1 site 104 non-null int64 \n 2 Pop 104 non-null object \n 3 sex 104 non-null object \n 4 age 102 non-null float64\n 5 hdlngth 104 non-null float64\n 6 skullw 104 non-null float64\n 7 totlngth 104 non-null float64\n 8 taill 104 non-null float64\n 9 footlgth 103 non-null float64\n 10 earconch 104 non-null float64\n 11 eye 104 non-null float64\n 12 chest 104 non-null float64\n 13 belly 104 non-null float64\ndtypes: float64(10), int64(2), object(2)\nmemory usage: 11.5+ KB\n", "summary": "{\"case\": {\"count\": 104.0, \"mean\": 52.5, \"std\": 30.166206257996713, \"min\": 1.0, \"25%\": 26.75, \"50%\": 52.5, \"75%\": 78.25, \"max\": 104.0}, \"site\": {\"count\": 104.0, \"mean\": 3.625, \"std\": 2.349085754819339, \"min\": 1.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 6.0, \"max\": 7.0}, \"age\": {\"count\": 102.0, \"mean\": 3.8333333333333335, \"std\": 1.9092444897006104, \"min\": 1.0, \"25%\": 2.25, \"50%\": 3.0, \"75%\": 5.0, \"max\": 9.0}, \"hdlngth\": {\"count\": 104.0, \"mean\": 92.60288461538462, \"std\": 3.573349486079402, \"min\": 82.5, \"25%\": 90.675, \"50%\": 92.8, \"75%\": 94.725, \"max\": 103.1}, \"skullw\": {\"count\": 104.0, \"mean\": 56.88365384615384, \"std\": 3.1134256903770203, \"min\": 50.0, \"25%\": 54.975, \"50%\": 56.349999999999994, \"75%\": 58.1, \"max\": 68.6}, \"totlngth\": {\"count\": 104.0, \"mean\": 87.08846153846154, \"std\": 4.310549436569344, \"min\": 75.0, \"25%\": 84.0, \"50%\": 88.0, \"75%\": 90.0, \"max\": 96.5}, \"taill\": {\"count\": 104.0, \"mean\": 37.00961538461539, \"std\": 1.959518428592603, \"min\": 32.0, \"25%\": 35.875, \"50%\": 37.0, \"75%\": 38.0, \"max\": 43.0}, \"footlgth\": {\"count\": 103.0, \"mean\": 68.45922330097088, \"std\": 4.395305804641412, \"min\": 60.3, \"25%\": 64.6, \"50%\": 68.0, \"75%\": 72.5, \"max\": 77.9}, \"earconch\": {\"count\": 104.0, \"mean\": 48.13076923076923, \"std\": 4.109380151285827, \"min\": 40.3, \"25%\": 44.8, \"50%\": 46.8, \"75%\": 52.0, \"max\": 56.2}, \"eye\": {\"count\": 104.0, \"mean\": 15.046153846153846, \"std\": 1.0503742353818448, \"min\": 12.8, \"25%\": 14.4, \"50%\": 14.9, \"75%\": 15.725, \"max\": 17.8}, \"chest\": {\"count\": 104.0, \"mean\": 27.0, \"std\": 2.0455967391979963, \"min\": 22.0, \"25%\": 25.5, \"50%\": 27.0, \"75%\": 28.0, \"max\": 32.0}, \"belly\": {\"count\": 104.0, \"mean\": 32.58653846153846, \"std\": 2.7619487172923667, \"min\": 25.0, \"25%\": 31.0, \"50%\": 32.5, \"75%\": 34.125, \"max\": 40.0}}", "examples": "{\"case\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"site\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"Pop\":{\"0\":\"Vic\",\"1\":\"Vic\",\"2\":\"Vic\",\"3\":\"Vic\"},\"sex\":{\"0\":\"m\",\"1\":\"f\",\"2\":\"f\",\"3\":\"f\"},\"age\":{\"0\":8.0,\"1\":6.0,\"2\":6.0,\"3\":6.0},\"hdlngth\":{\"0\":94.1,\"1\":92.5,\"2\":94.0,\"3\":93.2},\"skullw\":{\"0\":60.4,\"1\":57.6,\"2\":60.0,\"3\":57.1},\"totlngth\":{\"0\":89.0,\"1\":91.5,\"2\":95.5,\"3\":92.0},\"taill\":{\"0\":36.0,\"1\":36.5,\"2\":39.0,\"3\":38.0},\"footlgth\":{\"0\":74.5,\"1\":72.5,\"2\":75.4,\"3\":76.1},\"earconch\":{\"0\":54.5,\"1\":51.2,\"2\":51.9,\"3\":52.2},\"eye\":{\"0\":15.2,\"1\":16.0,\"2\":15.5,\"3\":15.2},\"chest\":{\"0\":28.0,\"1\":28.5,\"2\":30.0,\"3\":28.0},\"belly\":{\"0\":36.0,\"1\":33.0,\"2\":34.0,\"3\":34.0}}"}}]
| true | 1 |
<start_data_description><data_path>openintro-possum/possum.csv:
<column_names>
['case', 'site', 'Pop', 'sex', 'age', 'hdlngth', 'skullw', 'totlngth', 'taill', 'footlgth', 'earconch', 'eye', 'chest', 'belly']
<column_types>
{'case': 'int64', 'site': 'int64', 'Pop': 'object', 'sex': 'object', 'age': 'float64', 'hdlngth': 'float64', 'skullw': 'float64', 'totlngth': 'float64', 'taill': 'float64', 'footlgth': 'float64', 'earconch': 'float64', 'eye': 'float64', 'chest': 'float64', 'belly': 'float64'}
<dataframe_Summary>
{'case': {'count': 104.0, 'mean': 52.5, 'std': 30.166206257996713, 'min': 1.0, '25%': 26.75, '50%': 52.5, '75%': 78.25, 'max': 104.0}, 'site': {'count': 104.0, 'mean': 3.625, 'std': 2.349085754819339, 'min': 1.0, '25%': 1.0, '50%': 3.0, '75%': 6.0, 'max': 7.0}, 'age': {'count': 102.0, 'mean': 3.8333333333333335, 'std': 1.9092444897006104, 'min': 1.0, '25%': 2.25, '50%': 3.0, '75%': 5.0, 'max': 9.0}, 'hdlngth': {'count': 104.0, 'mean': 92.60288461538462, 'std': 3.573349486079402, 'min': 82.5, '25%': 90.675, '50%': 92.8, '75%': 94.725, 'max': 103.1}, 'skullw': {'count': 104.0, 'mean': 56.88365384615384, 'std': 3.1134256903770203, 'min': 50.0, '25%': 54.975, '50%': 56.349999999999994, '75%': 58.1, 'max': 68.6}, 'totlngth': {'count': 104.0, 'mean': 87.08846153846154, 'std': 4.310549436569344, 'min': 75.0, '25%': 84.0, '50%': 88.0, '75%': 90.0, 'max': 96.5}, 'taill': {'count': 104.0, 'mean': 37.00961538461539, 'std': 1.959518428592603, 'min': 32.0, '25%': 35.875, '50%': 37.0, '75%': 38.0, 'max': 43.0}, 'footlgth': {'count': 103.0, 'mean': 68.45922330097088, 'std': 4.395305804641412, 'min': 60.3, '25%': 64.6, '50%': 68.0, '75%': 72.5, 'max': 77.9}, 'earconch': {'count': 104.0, 'mean': 48.13076923076923, 'std': 4.109380151285827, 'min': 40.3, '25%': 44.8, '50%': 46.8, '75%': 52.0, 'max': 56.2}, 'eye': {'count': 104.0, 'mean': 15.046153846153846, 'std': 1.0503742353818448, 'min': 12.8, '25%': 14.4, '50%': 14.9, '75%': 15.725, 'max': 17.8}, 'chest': {'count': 104.0, 'mean': 27.0, 'std': 2.0455967391979963, 'min': 22.0, '25%': 25.5, '50%': 27.0, '75%': 28.0, 'max': 32.0}, 'belly': {'count': 104.0, 'mean': 32.58653846153846, 'std': 2.7619487172923667, 'min': 25.0, '25%': 31.0, '50%': 32.5, '75%': 34.125, 'max': 40.0}}
<dataframe_info>
RangeIndex: 104 entries, 0 to 103
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 case 104 non-null int64
1 site 104 non-null int64
2 Pop 104 non-null object
3 sex 104 non-null object
4 age 102 non-null float64
5 hdlngth 104 non-null float64
6 skullw 104 non-null float64
7 totlngth 104 non-null float64
8 taill 104 non-null float64
9 footlgth 103 non-null float64
10 earconch 104 non-null float64
11 eye 104 non-null float64
12 chest 104 non-null float64
13 belly 104 non-null float64
dtypes: float64(10), int64(2), object(2)
memory usage: 11.5+ KB
<some_examples>
{'case': {'0': 1, '1': 2, '2': 3, '3': 4}, 'site': {'0': 1, '1': 1, '2': 1, '3': 1}, 'Pop': {'0': 'Vic', '1': 'Vic', '2': 'Vic', '3': 'Vic'}, 'sex': {'0': 'm', '1': 'f', '2': 'f', '3': 'f'}, 'age': {'0': 8.0, '1': 6.0, '2': 6.0, '3': 6.0}, 'hdlngth': {'0': 94.1, '1': 92.5, '2': 94.0, '3': 93.2}, 'skullw': {'0': 60.4, '1': 57.6, '2': 60.0, '3': 57.1}, 'totlngth': {'0': 89.0, '1': 91.5, '2': 95.5, '3': 92.0}, 'taill': {'0': 36.0, '1': 36.5, '2': 39.0, '3': 38.0}, 'footlgth': {'0': 74.5, '1': 72.5, '2': 75.4, '3': 76.1}, 'earconch': {'0': 54.5, '1': 51.2, '2': 51.9, '3': 52.2}, 'eye': {'0': 15.2, '1': 16.0, '2': 15.5, '3': 15.2}, 'chest': {'0': 28.0, '1': 28.5, '2': 30.0, '3': 28.0}, 'belly': {'0': 36.0, '1': 33.0, '2': 34.0, '3': 34.0}}
<end_description>
| 428 | 2 | 1,582 | 428 |
129630956
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
from sklearn import metrics
from sklearn import model_selection
import tensorflow.keras.backend as K
import warnings, gc
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_clinic = []
tmp = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
tmp["CSF"] = 1
df_clinic.append(tmp)
tmp = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
tmp["CSF"] = 0
df_clinic.append(tmp)
df_clinic = pd.concat(df_clinic, axis=0).reset_index(drop=True)
df_clinic = df_clinic.rename(
columns={"upd23b_clinical_state_on_medication": "medication"}
)
df_clinic
df_clinic.info()
df_clinic.isna().sum()
df_clinic.nunique()
df_clinic.groupby("patient_id").size().describe()
import seaborn as sns
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(16, 30))
features = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
for i in range(len(features)):
fig.add_subplot(9, 5, i + 1)
sns.distplot(df_clinic[features[i]])
plt.tight_layout()
plt.show()
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_1"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_2"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_3"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_4"], dist="norm", plot=ax)
# **The q-q plots suggest that updrs_1, updrs_2 and updrs_4 have huge deviation from normal distribution wheras updrs_3 is not much deviated and correction is much easier.**
skewness_1 = df_clinic["updrs_1"].skew()
print(f"Skewness for updrs_1 = {skewness_1}")
skewness_2 = df_clinic["updrs_2"].skew()
print(f"Skewness for updrs_2 = {skewness_2}")
skewness_3 = df_clinic["updrs_3"].skew()
print(f"Skewness for updrs_3 = {skewness_3}")
skewness_4 = df_clinic["updrs_4"].skew()
print(f"Skewness for updrs_4 = {skewness_4}")
# All the above visualizations suggest that
# Inferences:
# * All targets are not normal distribution
# * updrs_4 has zeros as much as about 40% (It seems to be good to impute with zeros)
# * all the plots are right/positive skewed
import pandas as pd
import numpy as np
dataframe = pd.DataFrame(
df_clinic, columns=["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
)
print(dataframe.head())
dataframe.boxplot(grid="false", color="blue", fontsize=10, rot=30)
sns.heatmap(
df_clinic.filter(regex="updrs_*").dropna().corr(),
cmap="crest",
annot=True,
annot_kws={"fontweight": "bold"},
)
plt.yticks(rotation=0)
plt.show()
# **updrs_2 has high correlation with updrs_1 & updrs_3 (about 0.6)**
df_eda = df_clinic.groupby("patient_id")["visit_month"].max()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"Max month", fontweight="bold", pad=15)
plt.show()
# **Short-term(0-5) and mid-term(30-36) bins show high medical examination count**
df_eda = df_clinic.groupby("visit_month", as_index=False).mean().dropna()
df_eda
for i in range(4):
sns.regplot(
x=df_eda["visit_month"].values,
y=df_eda[f"updrs_{i+1}"].values,
color="green",
ci=None,
line_kws={"color": "orange", "linestyle": "--"},
)
plt.title(f"Mean of updrs_{i+1} by timeseries", fontweight="bold", pad=15)
plt.text(
0,
df_eda[f"updrs_{i+1}"].values.max() * 0.99,
f'CORR: {round(df_eda.corr().loc["visit_month", f"updrs_{i+1}"], 3)}',
)
plt.show()
# **This clearly shows that as time progresses severity of parkinson increases which is also reflected by updrs values**
df_eda = df_clinic.dropna()
from scipy.stats import f_oneway
for i in range(4):
sns.boxplot(
x=df_eda["medication"].values, y=df_eda[f"updrs_{i+1}"].values, showfliers=False
)
plt.title(f"Mean of updrs_{i+1} by timeseries", fontweight="bold", pad=15)
_, pvalue = f_oneway(
df_eda.loc[df_eda["medication"] == "On", f"updrs_{i+1}"].values,
df_eda.loc[df_eda["medication"] == "Off", f"updrs_{i+1}"].values,
)
if pvalue < 0.05:
print("Two groups are difference ! -> pvalue:", round(pvalue, 5))
plt.show()
# **Observation -**
# * updrs Median values of patients on medication is significantly less than that of patients not on medication.
# **Inference -**
# * Medication slows progress of parkinson's disease.
# * Medication must also be considered as factor while determining updrs scores.
# **SUMMARY OF CLINICAL DATA** -
# * All targets are not normal distribution
# * updrs_4 has zeros as much as about 40% (It seems to be good to impute with zeros)
# * Short-term(0-5) and mid-term(30-36) bins show high medical examination count
# * Medication is effective on reducing the updrs level
# * Provided original data and additional data statistically has no difference mean in all ndprs values
# * Therefore, We can use additional data as same way as original data without anxiety which be able to have bias
train_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv"
)
train_proteins
df_eda = train_proteins.groupby("patient_id")["UniProt"].size()
df_eda.describe()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"The number of protein per patients", fontweight="bold", pad=15)
plt.show()
df_eda = train_proteins.groupby("visit_id").mean()
df_eda = df_eda.groupby("visit_month")["NPX"].mean()
df_eda
print("Max timestamp :", df_eda.index[df_eda.argmax()])
sns.lineplot(x=df_eda.index.astype("int32"), y=np.log1p(df_eda.values))
plt.axvline(df_eda.index[df_eda.argmax()], color="red", alpha=0.5, linestyle="--")
plt.show()
train_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv"
)
train_peptides
df_eda = train_peptides.groupby("patient_id")["UniProt"].size()
df_eda.describe()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"The number of peptide per patients", fontweight="bold", pad=15)
plt.show()
all(train_proteins[["visit_id", "UniProt"]].value_counts() == 1)
# Hence we can say that all r unique
df_p = train_peptides.merge(
train_proteins[["visit_id", "UniProt", "NPX"]],
on=["visit_id", "UniProt"],
how="left",
)
df_p.head()
# It makes sense to transfer proteins' NPX value to peptide data on merge, since each protein could consist of multiple peptides.
train_scd = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
train_cd = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
train_scd["visit_id"] = (
train_scd["patient_id"].astype(str) + "_" + train_scd["visit_month"].astype(str)
)
# Here we combine both main and supplemental clinical data into a single dataframe
df_cd = pd.concat([train_cd, train_scd], ignore_index=True)
display(df_cd.info())
print(f'Unique Clinical Data patient #: {train_cd["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in train_cd.columns:
print(f"Null values found in {col}: {train_cd[col].isna().sum()}")
print("")
# Since Null values in updrs_3, updrs_2, updrs_1 are too less; we can impute using linear interpolation
train_cd.updrs_1 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd.updrs_2 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd.updrs_3 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd["updrs_4"] = train_cd["updrs_4"].fillna(0)
print(f'Unique Clinical Data patient #: {train_cd["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in train_cd.columns:
print(f"Null values found in {col}: {train_cd[col].isna().sum()}")
print("")
df_cd.melt(
id_vars=[
"visit_id",
"patient_id",
"visit_month",
"upd23b_clinical_state_on_medication",
],
var_name="updrs",
value_name="rating",
)
df_all = df_p.merge(
df_cd[
[
"visit_id",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
"upd23b_clinical_state_on_medication",
]
],
on=["visit_id"],
how="left",
)
df_all.info()
df_all
print("\n.... Total patients: ", train_peptides.patient_id.nunique())
print("\n.... Total number of visits:", train_proteins["visit_id"].nunique())
print("\n.... Total proteins: ", train_proteins.UniProt.nunique())
print("\n.... Total peptides: ", train_peptides.Peptide.nunique())
print("\n.... Months counts:")
print(train_proteins.visit_month.value_counts())
# **Inferences** -
# * There are 248 patients in the dataset.
# * There are 1,113 visit_id, which means each patient visited around 4.48 times on average. This means, on average, each patient has 4~5 records.
# * There are often several peptides per protein. You should merge peptides dataset to proteins.
# * There are 227 unique UniProt ID, while there are 968 types of peptide.
# * Data for some patients goes as far as 108 months or 9 years
corr = df_all.corr("spearman")
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
sns.heatmap(
corr,
mask=mask,
cmap="Spectral",
vmax=1,
center=0.1,
annot=True,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
)
from pandas_profiling import ProfileReport
profile = ProfileReport(df_all, title="Profiling Report")
profile.to_notebook_iframe()
print(f'Unique Clinical Data patient #: {df_all["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in df_all.columns:
print(f"Null values found in {col}: {df_all[col].isna().sum()}")
print("")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/630/129630956.ipynb
| null | null |
[{"Id": 129630956, "ScriptId": 37731689, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12040299, "CreationDate": "05/15/2023 11:02:14", "VersionNumber": 2.0, "Title": "mid eval - final", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 261.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 234.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
from sklearn import metrics
from sklearn import model_selection
import tensorflow.keras.backend as K
import warnings, gc
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_clinic = []
tmp = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
tmp["CSF"] = 1
df_clinic.append(tmp)
tmp = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
tmp["CSF"] = 0
df_clinic.append(tmp)
df_clinic = pd.concat(df_clinic, axis=0).reset_index(drop=True)
df_clinic = df_clinic.rename(
columns={"upd23b_clinical_state_on_medication": "medication"}
)
df_clinic
df_clinic.info()
df_clinic.isna().sum()
df_clinic.nunique()
df_clinic.groupby("patient_id").size().describe()
import seaborn as sns
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(16, 30))
features = ["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
for i in range(len(features)):
fig.add_subplot(9, 5, i + 1)
sns.distplot(df_clinic[features[i]])
plt.tight_layout()
plt.show()
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_1"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_2"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_3"], dist="norm", plot=ax)
from scipy import stats
fig, (ax) = plt.subplots(figsize=(4, 4))
res = stats.probplot(df_clinic["updrs_4"], dist="norm", plot=ax)
# **The q-q plots suggest that updrs_1, updrs_2 and updrs_4 have huge deviation from normal distribution wheras updrs_3 is not much deviated and correction is much easier.**
skewness_1 = df_clinic["updrs_1"].skew()
print(f"Skewness for updrs_1 = {skewness_1}")
skewness_2 = df_clinic["updrs_2"].skew()
print(f"Skewness for updrs_2 = {skewness_2}")
skewness_3 = df_clinic["updrs_3"].skew()
print(f"Skewness for updrs_3 = {skewness_3}")
skewness_4 = df_clinic["updrs_4"].skew()
print(f"Skewness for updrs_4 = {skewness_4}")
# All the above visualizations suggest that
# Inferences:
# * All targets are not normal distribution
# * updrs_4 has zeros as much as about 40% (It seems to be good to impute with zeros)
# * all the plots are right/positive skewed
import pandas as pd
import numpy as np
dataframe = pd.DataFrame(
df_clinic, columns=["updrs_1", "updrs_2", "updrs_3", "updrs_4"]
)
print(dataframe.head())
dataframe.boxplot(grid="false", color="blue", fontsize=10, rot=30)
sns.heatmap(
df_clinic.filter(regex="updrs_*").dropna().corr(),
cmap="crest",
annot=True,
annot_kws={"fontweight": "bold"},
)
plt.yticks(rotation=0)
plt.show()
# **updrs_2 has high correlation with updrs_1 & updrs_3 (about 0.6)**
df_eda = df_clinic.groupby("patient_id")["visit_month"].max()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"Max month", fontweight="bold", pad=15)
plt.show()
# **Short-term(0-5) and mid-term(30-36) bins show high medical examination count**
df_eda = df_clinic.groupby("visit_month", as_index=False).mean().dropna()
df_eda
for i in range(4):
sns.regplot(
x=df_eda["visit_month"].values,
y=df_eda[f"updrs_{i+1}"].values,
color="green",
ci=None,
line_kws={"color": "orange", "linestyle": "--"},
)
plt.title(f"Mean of updrs_{i+1} by timeseries", fontweight="bold", pad=15)
plt.text(
0,
df_eda[f"updrs_{i+1}"].values.max() * 0.99,
f'CORR: {round(df_eda.corr().loc["visit_month", f"updrs_{i+1}"], 3)}',
)
plt.show()
# **This clearly shows that as time progresses severity of parkinson increases which is also reflected by updrs values**
df_eda = df_clinic.dropna()
from scipy.stats import f_oneway
for i in range(4):
sns.boxplot(
x=df_eda["medication"].values, y=df_eda[f"updrs_{i+1}"].values, showfliers=False
)
plt.title(f"Mean of updrs_{i+1} by timeseries", fontweight="bold", pad=15)
_, pvalue = f_oneway(
df_eda.loc[df_eda["medication"] == "On", f"updrs_{i+1}"].values,
df_eda.loc[df_eda["medication"] == "Off", f"updrs_{i+1}"].values,
)
if pvalue < 0.05:
print("Two groups are difference ! -> pvalue:", round(pvalue, 5))
plt.show()
# **Observation -**
# * updrs Median values of patients on medication is significantly less than that of patients not on medication.
# **Inference -**
# * Medication slows progress of parkinson's disease.
# * Medication must also be considered as factor while determining updrs scores.
# **SUMMARY OF CLINICAL DATA** -
# * All targets are not normal distribution
# * updrs_4 has zeros as much as about 40% (It seems to be good to impute with zeros)
# * Short-term(0-5) and mid-term(30-36) bins show high medical examination count
# * Medication is effective on reducing the updrs level
# * Provided original data and additional data statistically has no difference mean in all ndprs values
# * Therefore, We can use additional data as same way as original data without anxiety which be able to have bias
train_proteins = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_proteins.csv"
)
train_proteins
df_eda = train_proteins.groupby("patient_id")["UniProt"].size()
df_eda.describe()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"The number of protein per patients", fontweight="bold", pad=15)
plt.show()
df_eda = train_proteins.groupby("visit_id").mean()
df_eda = df_eda.groupby("visit_month")["NPX"].mean()
df_eda
print("Max timestamp :", df_eda.index[df_eda.argmax()])
sns.lineplot(x=df_eda.index.astype("int32"), y=np.log1p(df_eda.values))
plt.axvline(df_eda.index[df_eda.argmax()], color="red", alpha=0.5, linestyle="--")
plt.show()
train_peptides = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_peptides.csv"
)
train_peptides
df_eda = train_peptides.groupby("patient_id")["UniProt"].size()
df_eda.describe()
ax = sns.histplot(
df_eda, kde=True, color="blue", line_kws={"linewidth": 2, "linestyle": "--"}
)
ax.lines[0].set_color("orange")
plt.title(f"The number of peptide per patients", fontweight="bold", pad=15)
plt.show()
all(train_proteins[["visit_id", "UniProt"]].value_counts() == 1)
# Hence we can say that all r unique
df_p = train_peptides.merge(
train_proteins[["visit_id", "UniProt", "NPX"]],
on=["visit_id", "UniProt"],
how="left",
)
df_p.head()
# It makes sense to transfer proteins' NPX value to peptide data on merge, since each protein could consist of multiple peptides.
train_scd = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/supplemental_clinical_data.csv"
)
train_cd = pd.read_csv(
"/kaggle/input/amp-parkinsons-disease-progression-prediction/train_clinical_data.csv"
)
train_scd["visit_id"] = (
train_scd["patient_id"].astype(str) + "_" + train_scd["visit_month"].astype(str)
)
# Here we combine both main and supplemental clinical data into a single dataframe
df_cd = pd.concat([train_cd, train_scd], ignore_index=True)
display(df_cd.info())
print(f'Unique Clinical Data patient #: {train_cd["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in train_cd.columns:
print(f"Null values found in {col}: {train_cd[col].isna().sum()}")
print("")
# Since Null values in updrs_3, updrs_2, updrs_1 are too less; we can impute using linear interpolation
train_cd.updrs_1 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd.updrs_2 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd.updrs_3 = train_cd.updrs_3.interpolate(method="linear", axis=0)
train_cd["updrs_4"] = train_cd["updrs_4"].fillna(0)
print(f'Unique Clinical Data patient #: {train_cd["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in train_cd.columns:
print(f"Null values found in {col}: {train_cd[col].isna().sum()}")
print("")
df_cd.melt(
id_vars=[
"visit_id",
"patient_id",
"visit_month",
"upd23b_clinical_state_on_medication",
],
var_name="updrs",
value_name="rating",
)
df_all = df_p.merge(
df_cd[
[
"visit_id",
"updrs_1",
"updrs_2",
"updrs_3",
"updrs_4",
"upd23b_clinical_state_on_medication",
]
],
on=["visit_id"],
how="left",
)
df_all.info()
df_all
print("\n.... Total patients: ", train_peptides.patient_id.nunique())
print("\n.... Total number of visits:", train_proteins["visit_id"].nunique())
print("\n.... Total proteins: ", train_proteins.UniProt.nunique())
print("\n.... Total peptides: ", train_peptides.Peptide.nunique())
print("\n.... Months counts:")
print(train_proteins.visit_month.value_counts())
# **Inferences** -
# * There are 248 patients in the dataset.
# * There are 1,113 visit_id, which means each patient visited around 4.48 times on average. This means, on average, each patient has 4~5 records.
# * There are often several peptides per protein. You should merge peptides dataset to proteins.
# * There are 227 unique UniProt ID, while there are 968 types of peptide.
# * Data for some patients goes as far as 108 months or 9 years
corr = df_all.corr("spearman")
mask = np.triu(np.ones_like(corr, dtype=bool))
f, ax = plt.subplots(figsize=(11, 9))
sns.heatmap(
corr,
mask=mask,
cmap="Spectral",
vmax=1,
center=0.1,
annot=True,
square=True,
linewidths=0.5,
cbar_kws={"shrink": 0.5},
)
from pandas_profiling import ProfileReport
profile = ProfileReport(df_all, title="Profiling Report")
profile.to_notebook_iframe()
print(f'Unique Clinical Data patient #: {df_all["patient_id"].nunique()}')
print("----------------------------------------------------------")
print(f"Null Values Found in Clinical Data:")
for col in df_all.columns:
print(f"Null values found in {col}: {df_all[col].isna().sum()}")
print("")
| false | 0 | 3,776 | 0 | 3,776 | 3,776 |
||
129737017
|
# # 1. Introdiction
# ## 1.1 About project
# The Titanic Survivor Prediction is a classic machine learning problem that involves predicting whether a passenger on the Titanic survived or not based on various features such as their age, gender, ticket class, and cabin location. This problem is based on the sinking of the RMS Titanic on April 15, 1912, which is one of the most infamous maritime disasters in history.
# ## 1.2 Variable Explanations
# * PassengerId: The unique ID assigned to each passenger.
# * Survived: Indicates whether a passenger survived or not (0 = No, 1 = Yes)
# * Pclass: Ticket class, a proxy for socio-economic status (1 = 1st, 2 = 2nd, 3 = 3rd)
# * Name: The name of the passenger
# * Sex: Gender of the passenger (Male or Female)
# * Age: Age of the passenger
# * SibSp: Number of siblings/spouses aboard the Titanic
# * Parch: Number of parents/children aboard the Titanic
# * Ticket: Ticket number of the passenger
# * Fare: The passenger fare
# * Cabin: Cabin number of the passenger
# * Embarked: Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)
# # 2. Import necessary libraries
#
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# # 3. Load the Titanic dataset
#
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
# # 4. Exploratory Data Analaysis
# ## 4.1 EDA for train data
train_data.sample(5)
train_data.info()
train_data.describe()
# View the distribution of the target variable
sns.countplot(x="Survived", data=train_data)
plt.show()
categorical_features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
def visualize_categorical_columns(df, col_list, hue="Survived"):
for col in col_list:
# hue='Survived'
sns.countplot(x=col, data=df, hue=hue)
plt.show()
return
visualize_categorical_columns(train_data, categorical_features)
# ## 4.2 EDA for test data
test_data.sample(5)
test_data.sample(5)
test_data.info()
test_data.describe()
# ## 4.3 Find number of missing values in each column
# Find the number of null values for each column
num_null = train_data.isnull().sum()
# Find the percentage of null values for each column
percent_null = round((train_data.isnull().sum() / len(train_data)) * 100, 2)
# Combine the results into a single DataFrame
null_values = pd.concat(
[num_null, percent_null],
axis=1,
keys=["Number of Null Values", "Percentage of Null Values"],
)
# Print the results
print(null_values)
# Find the number of null values for each column
num_null = test_data.isnull().sum()
# Find the percentage of null values for each column
percent_null = round((test_data.isnull().sum() / len(test_data)) * 100, 2)
# Combine the results into a single DataFrame
null_values = pd.concat(
[num_null, percent_null],
axis=1,
keys=["Number of Null Values", "Percentage of Null Values"],
)
# Print the results
print(null_values)
# # 5. Data preprocessing
# ## 5.1 Fill missing values
# Fill missing values for Age with the median
train_data["Age"].fillna(train_data["Age"].median(), inplace=True)
test_data["Age"].fillna(test_data["Age"].median(), inplace=True)
# Fill missing values for Embarked with the mode
train_data["Embarked"].fillna(train_data["Embarked"].mode()[0], inplace=True)
# Fill missing values for Age with the median
test_data["Fare"].fillna(test_data["Fare"].median(), inplace=True)
# ## 5.2 Encode categorical variables
train_data = pd.get_dummies(train_data, columns=["Sex", "Embarked"], drop_first=True)
test_data = pd.get_dummies(test_data, columns=["Sex", "Embarked"], drop_first=True)
# ## 5.3 Feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_data[["Age", "Fare"]] = scaler.fit_transform(train_data[["Age", "Fare"]])
test_data[["Age", "Fare"]] = scaler.transform(test_data[["Age", "Fare"]])
# ## 5.4 Split the data into training and validation sets
#
from sklearn.model_selection import train_test_split
X = train_data.drop(["Survived", "PassengerId", "Name", "Ticket", "Cabin"], axis=1)
y = train_data["Survived"]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# # 6. Modelling
# ## 6.1 Model Training
# Train a logistic regression model
from sklearn.linear_model import LogisticRegression
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
# Train a random forest classifier
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
rf_model.fit(X_train, y_train)
# Train a support vector machine
from sklearn.svm import SVC
svm_model = SVC(kernel="rbf", C=1, gamma="scale", probability=True, random_state=42)
svm_model.fit(X_train, y_train)
# ## 5.2 Evaluate the models on the validation set
#
from sklearn.metrics import accuracy_score, roc_auc_score
models = [
("Logistic Regression", lr_model),
("Random Forest", rf_model),
("SVM", svm_model),
]
for name, model in models:
y_pred = model.predict(X_val)
y_prob = model.predict_proba(X_val)[
:, 1
] # probability estimates of the positive class
accuracy = accuracy_score(y_val, y_pred)
auc = roc_auc_score(y_val, y_prob)
print(f"{name}: Validation accuracy = {accuracy:.4f}, AUC = {auc:.4f}")
# # 7. Making a Submission
# ## 7.1 Make predictions on the test data
#
X_test = test_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
lr_pred = lr_model.predict(X_test)
rf_pred = rf_model.predict(X_test)
svm_pred = svm_model.predict(X_test)
# ## 7.2 Save the predictions to a CSV file
# Ensemble the predictions using majority voting
ensemble_pred = np.round((lr_pred + rf_pred + svm_pred) / 3).astype(int)
output = pd.DataFrame(
{"PassengerId": test_data["PassengerId"], "Survived": ensemble_pred}
)
output.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/737/129737017.ipynb
| null | null |
[{"Id": 129737017, "ScriptId": 38165652, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10602437, "CreationDate": "05/16/2023 06:05:42", "VersionNumber": 1.0, "Title": "Simple Titanic Survival prediction", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 209.0, "LinesInsertedFromPrevious": 209.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 10}]
| null | null | null | null |
# # 1. Introdiction
# ## 1.1 About project
# The Titanic Survivor Prediction is a classic machine learning problem that involves predicting whether a passenger on the Titanic survived or not based on various features such as their age, gender, ticket class, and cabin location. This problem is based on the sinking of the RMS Titanic on April 15, 1912, which is one of the most infamous maritime disasters in history.
# ## 1.2 Variable Explanations
# * PassengerId: The unique ID assigned to each passenger.
# * Survived: Indicates whether a passenger survived or not (0 = No, 1 = Yes)
# * Pclass: Ticket class, a proxy for socio-economic status (1 = 1st, 2 = 2nd, 3 = 3rd)
# * Name: The name of the passenger
# * Sex: Gender of the passenger (Male or Female)
# * Age: Age of the passenger
# * SibSp: Number of siblings/spouses aboard the Titanic
# * Parch: Number of parents/children aboard the Titanic
# * Ticket: Ticket number of the passenger
# * Fare: The passenger fare
# * Cabin: Cabin number of the passenger
# * Embarked: Port of embarkation (C = Cherbourg, Q = Queenstown, S = Southampton)
# # 2. Import necessary libraries
#
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# # 3. Load the Titanic dataset
#
train_data = pd.read_csv("../input/titanic/train.csv")
test_data = pd.read_csv("../input/titanic/test.csv")
# # 4. Exploratory Data Analaysis
# ## 4.1 EDA for train data
train_data.sample(5)
train_data.info()
train_data.describe()
# View the distribution of the target variable
sns.countplot(x="Survived", data=train_data)
plt.show()
categorical_features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
def visualize_categorical_columns(df, col_list, hue="Survived"):
for col in col_list:
# hue='Survived'
sns.countplot(x=col, data=df, hue=hue)
plt.show()
return
visualize_categorical_columns(train_data, categorical_features)
# ## 4.2 EDA for test data
test_data.sample(5)
test_data.sample(5)
test_data.info()
test_data.describe()
# ## 4.3 Find number of missing values in each column
# Find the number of null values for each column
num_null = train_data.isnull().sum()
# Find the percentage of null values for each column
percent_null = round((train_data.isnull().sum() / len(train_data)) * 100, 2)
# Combine the results into a single DataFrame
null_values = pd.concat(
[num_null, percent_null],
axis=1,
keys=["Number of Null Values", "Percentage of Null Values"],
)
# Print the results
print(null_values)
# Find the number of null values for each column
num_null = test_data.isnull().sum()
# Find the percentage of null values for each column
percent_null = round((test_data.isnull().sum() / len(test_data)) * 100, 2)
# Combine the results into a single DataFrame
null_values = pd.concat(
[num_null, percent_null],
axis=1,
keys=["Number of Null Values", "Percentage of Null Values"],
)
# Print the results
print(null_values)
# # 5. Data preprocessing
# ## 5.1 Fill missing values
# Fill missing values for Age with the median
train_data["Age"].fillna(train_data["Age"].median(), inplace=True)
test_data["Age"].fillna(test_data["Age"].median(), inplace=True)
# Fill missing values for Embarked with the mode
train_data["Embarked"].fillna(train_data["Embarked"].mode()[0], inplace=True)
# Fill missing values for Age with the median
test_data["Fare"].fillna(test_data["Fare"].median(), inplace=True)
# ## 5.2 Encode categorical variables
train_data = pd.get_dummies(train_data, columns=["Sex", "Embarked"], drop_first=True)
test_data = pd.get_dummies(test_data, columns=["Sex", "Embarked"], drop_first=True)
# ## 5.3 Feature scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
train_data[["Age", "Fare"]] = scaler.fit_transform(train_data[["Age", "Fare"]])
test_data[["Age", "Fare"]] = scaler.transform(test_data[["Age", "Fare"]])
# ## 5.4 Split the data into training and validation sets
#
from sklearn.model_selection import train_test_split
X = train_data.drop(["Survived", "PassengerId", "Name", "Ticket", "Cabin"], axis=1)
y = train_data["Survived"]
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
# # 6. Modelling
# ## 6.1 Model Training
# Train a logistic regression model
from sklearn.linear_model import LogisticRegression
lr_model = LogisticRegression()
lr_model.fit(X_train, y_train)
# Train a random forest classifier
from sklearn.ensemble import RandomForestClassifier
rf_model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=42)
rf_model.fit(X_train, y_train)
# Train a support vector machine
from sklearn.svm import SVC
svm_model = SVC(kernel="rbf", C=1, gamma="scale", probability=True, random_state=42)
svm_model.fit(X_train, y_train)
# ## 5.2 Evaluate the models on the validation set
#
from sklearn.metrics import accuracy_score, roc_auc_score
models = [
("Logistic Regression", lr_model),
("Random Forest", rf_model),
("SVM", svm_model),
]
for name, model in models:
y_pred = model.predict(X_val)
y_prob = model.predict_proba(X_val)[
:, 1
] # probability estimates of the positive class
accuracy = accuracy_score(y_val, y_pred)
auc = roc_auc_score(y_val, y_prob)
print(f"{name}: Validation accuracy = {accuracy:.4f}, AUC = {auc:.4f}")
# # 7. Making a Submission
# ## 7.1 Make predictions on the test data
#
X_test = test_data.drop(["PassengerId", "Name", "Ticket", "Cabin"], axis=1)
lr_pred = lr_model.predict(X_test)
rf_pred = rf_model.predict(X_test)
svm_pred = svm_model.predict(X_test)
# ## 7.2 Save the predictions to a CSV file
# Ensemble the predictions using majority voting
ensemble_pred = np.round((lr_pred + rf_pred + svm_pred) / 3).astype(int)
output = pd.DataFrame(
{"PassengerId": test_data["PassengerId"], "Survived": ensemble_pred}
)
output.to_csv("submission.csv", index=False)
| false | 0 | 1,980 | 10 | 1,980 | 1,980 |
||
129737037
|
<jupyter_start><jupyter_text>Oxford-IIIT-Pet-from-XijiaTao
Kaggle dataset identifier: oxfordiiitpetfromxijiatao
<jupyter_script>import torch
import torchvision
import os
from matplotlib import pyplot
from PIL import Image
img2t = torchvision.transforms.ToTensor()
t2img = torchvision.transforms.ToPILImage()
# Oxford IIIT Pets Segmentation dataset loaded via torchvision.
oxford_pets_path = "/kaggle/working"
pets_train_orig = torchvision.datasets.OxfordIIITPet(
root=oxford_pets_path, split="trainval", target_types="segmentation", download=False
)
pets_test_orig = torchvision.datasets.OxfordIIITPet(
root=oxford_pets_path, split="test", target_types="segmentation", download=False
)
pets_train_orig, pets_test_orig
def display_images_and_masks(dataset, indexes):
# Display a maximum of 2 sets of (image, mask) pairs per row.
nrows = (len(indexes) + 1) // 2
# 3 units height per row.
fig = pyplot.figure(figsize=(10, 3 * nrows))
for i in range(len(indexes)):
image, mask = dataset[i][0], dataset[i][1]
fig.add_subplot(nrows, 4, i * 2 + 1)
pyplot.imshow(image)
pyplot.axis("off")
fig.add_subplot(nrows, 4, i * 2 + 2)
pyplot.imshow(mask)
pyplot.axis("off")
# end for
# end def
# Display 4 training and test images.
display_images_and_masks(pets_train_orig, indexes=(0, 1, 2, 3))
pyplot.show()
display_images_and_masks(pets_test_orig, indexes=(0, 1, 2, 3))
pyplot.show()
resizer = torchvision.transforms.Resize((128, 128))
image = pets_train_orig[0][0]
image128 = resizer(image)
print(image.size, image128.size)
fig = pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.imshow(image)
pyplot.axis("off")
pyplot.subplot(1, 2, 2)
pyplot.imshow(image128)
pyplot.axis("off")
def display_images_with_augmentations(image, transforms, labels):
# Display a maximum of 4 images per row.
nrows = (len(transforms) + 1 + 3) // 4
# 3 units height per row.
fig = pyplot.figure(figsize=(10, 3 * nrows))
# The first time an image is displayed, don't transform it.
transforms = [lambda x: x] + transforms
labels = ["Original"] + labels
for i in range(len(transforms)):
timage = transforms[i](image)
fig.add_subplot(nrows, 4, i + 1)
pyplot.title(labels[i])
pyplot.imshow(timage)
pyplot.axis("off")
# end for
# end def
class ChannelShuffle:
def __init__(self, permute):
super().__init__()
self.permute = list(permute)
def __call__(self, x):
if isinstance(x, Image.Image):
t = img2t(x)
back = t2img
else:
t = x
back = lambda x: x
tnew = t[self.permute]
return back(tnew)
image = torchvision.transforms.Resize((128, 128))(pets_train_orig[2][0])
transforms = [
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5),
torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.GaussianBlur(kernel_size=5),
torchvision.transforms.RandomPosterize(bits=3, p=1.0),
torchvision.transforms.RandomSolarize(threshold=0.5, p=1.0),
torchvision.transforms.RandomInvert(p=1.0),
ChannelShuffle((1, 2, 0)),
torchvision.transforms.RandomHorizontalFlip(p=1.0),
torchvision.transforms.RandomEqualize(p=1.0),
torchvision.transforms.RandomAutocontrast(p=1.0),
torchvision.transforms.RandomAdjustSharpness(sharpness_factor=2.0, p=1.0),
]
labels = [
"Color Jitter",
"Grayscale",
"Gaussian Blur",
"Posterize",
"Solarize",
"Invert",
"Channel Suffle",
"Horizontal Flip",
"Equalize",
"Autocontrast",
"Sharpness",
]
display_images_with_augmentations(image, transforms, labels)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/737/129737037.ipynb
|
oxfordiiitpetfromxijiatao
|
cielceline
|
[{"Id": 129737037, "ScriptId": 38579855, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4930, "CreationDate": "05/16/2023 06:05:54", "VersionNumber": 1.0, "Title": "Starter for Oxford IIIT Pet using torchvision", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 115.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186084284, "KernelVersionId": 129737037, "SourceDatasetVersionId": 1491706}]
|
[{"Id": 1491706, "DatasetId": 875741, "DatasourceVersionId": 1525665, "CreatorUserId": 3593372, "LicenseName": "Unknown", "CreationDate": "09/16/2020 02:21:31", "VersionNumber": 1.0, "Title": "Oxford-IIIT-Pet-from-XijiaTao", "Slug": "oxfordiiitpetfromxijiatao", "Subtitle": "For Training of Image Classification and Object Localization", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 875741, "CreatorUserId": 3593372, "OwnerUserId": 3593372.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1491706.0, "CurrentDatasourceVersionId": 1525665.0, "ForumId": 891166, "Type": 2, "CreationDate": "09/16/2020 02:21:31", "LastActivityDate": "09/16/2020", "TotalViews": 1308, "TotalDownloads": 16, "TotalVotes": 3, "TotalKernels": 2}]
|
[{"Id": 3593372, "UserName": "cielceline", "DisplayName": "ciel", "RegisterDate": "08/18/2019", "PerformanceTier": 1}]
|
import torch
import torchvision
import os
from matplotlib import pyplot
from PIL import Image
img2t = torchvision.transforms.ToTensor()
t2img = torchvision.transforms.ToPILImage()
# Oxford IIIT Pets Segmentation dataset loaded via torchvision.
oxford_pets_path = "/kaggle/working"
pets_train_orig = torchvision.datasets.OxfordIIITPet(
root=oxford_pets_path, split="trainval", target_types="segmentation", download=False
)
pets_test_orig = torchvision.datasets.OxfordIIITPet(
root=oxford_pets_path, split="test", target_types="segmentation", download=False
)
pets_train_orig, pets_test_orig
def display_images_and_masks(dataset, indexes):
# Display a maximum of 2 sets of (image, mask) pairs per row.
nrows = (len(indexes) + 1) // 2
# 3 units height per row.
fig = pyplot.figure(figsize=(10, 3 * nrows))
for i in range(len(indexes)):
image, mask = dataset[i][0], dataset[i][1]
fig.add_subplot(nrows, 4, i * 2 + 1)
pyplot.imshow(image)
pyplot.axis("off")
fig.add_subplot(nrows, 4, i * 2 + 2)
pyplot.imshow(mask)
pyplot.axis("off")
# end for
# end def
# Display 4 training and test images.
display_images_and_masks(pets_train_orig, indexes=(0, 1, 2, 3))
pyplot.show()
display_images_and_masks(pets_test_orig, indexes=(0, 1, 2, 3))
pyplot.show()
resizer = torchvision.transforms.Resize((128, 128))
image = pets_train_orig[0][0]
image128 = resizer(image)
print(image.size, image128.size)
fig = pyplot.figure(figsize=(10, 4))
pyplot.subplot(1, 2, 1)
pyplot.imshow(image)
pyplot.axis("off")
pyplot.subplot(1, 2, 2)
pyplot.imshow(image128)
pyplot.axis("off")
def display_images_with_augmentations(image, transforms, labels):
# Display a maximum of 4 images per row.
nrows = (len(transforms) + 1 + 3) // 4
# 3 units height per row.
fig = pyplot.figure(figsize=(10, 3 * nrows))
# The first time an image is displayed, don't transform it.
transforms = [lambda x: x] + transforms
labels = ["Original"] + labels
for i in range(len(transforms)):
timage = transforms[i](image)
fig.add_subplot(nrows, 4, i + 1)
pyplot.title(labels[i])
pyplot.imshow(timage)
pyplot.axis("off")
# end for
# end def
class ChannelShuffle:
def __init__(self, permute):
super().__init__()
self.permute = list(permute)
def __call__(self, x):
if isinstance(x, Image.Image):
t = img2t(x)
back = t2img
else:
t = x
back = lambda x: x
tnew = t[self.permute]
return back(tnew)
image = torchvision.transforms.Resize((128, 128))(pets_train_orig[2][0])
transforms = [
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5),
torchvision.transforms.Grayscale(num_output_channels=1),
torchvision.transforms.GaussianBlur(kernel_size=5),
torchvision.transforms.RandomPosterize(bits=3, p=1.0),
torchvision.transforms.RandomSolarize(threshold=0.5, p=1.0),
torchvision.transforms.RandomInvert(p=1.0),
ChannelShuffle((1, 2, 0)),
torchvision.transforms.RandomHorizontalFlip(p=1.0),
torchvision.transforms.RandomEqualize(p=1.0),
torchvision.transforms.RandomAutocontrast(p=1.0),
torchvision.transforms.RandomAdjustSharpness(sharpness_factor=2.0, p=1.0),
]
labels = [
"Color Jitter",
"Grayscale",
"Gaussian Blur",
"Posterize",
"Solarize",
"Invert",
"Channel Suffle",
"Horizontal Flip",
"Equalize",
"Autocontrast",
"Sharpness",
]
display_images_with_augmentations(image, transforms, labels)
| false | 0 | 1,165 | 0 | 1,206 | 1,165 |
||
129737410
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
with open(
"/kaggle/input/house-prices-advanced-regression-techniques/data_description.txt",
"r",
) as file:
contents = file.read()
print(contents)
## STEP 3: Multiple Linear Regression using feature selection (using corr, no feature penalties) + all-but-one validation + outlier transformations
train_og = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
train.drop("Id", inplace=True, axis=1)
catColumns = train.select_dtypes(exclude=[int, float]).columns.values
intColumns = train.select_dtypes(include=[int]).columns.values
floatColumns = train.select_dtypes(include=[float]).columns.values
naConv = {catColName: train[catColName].mode()[0] for catColName in catColumns}
naConv.update(
{floatColName: train[floatColName].median() for floatColName in floatColumns}
)
naConv.update(
{intColName: train[intColName].median().round() for intColName in intColumns}
)
print(naConv)
train.fillna(naConv, inplace=True)
## One-Hot Encoding for categorical variables
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
def oneHotEncode(df, columns, colsToDrop):
"""
df: dataframe requiring encoding
columns: list of features that need to be encoded
drop: if you want old cat coluns to be dropped from df after encoding
colsToDrop: self-explanatory
"""
df = df.reset_index(drop=True)
ohe = OneHotEncoder(sparse_output=False)
precolumns = [item for item in df.columns if item not in columns]
encDf = df[precolumns]
for colname in columns:
# one-hot encode
print(colname)
oheRes = ohe.fit_transform(df[[colname]])
# Convert to df
ohResDf = pd.DataFrame(oheRes, columns=ohe.get_feature_names_out([colname]))
# concat results with encoded df, currently empty
encDf = pd.concat([encDf, ohResDf], axis=1)
print(f"The following {len(columns)} features were one-hot encoded: {columns}\n")
encDf = encDf.drop(columns=colsToDrop)
print(
f"And the following categories were dropped: {colsToDrop}\n and the following {len(columns)} features: {columns}"
)
return encDf
## To perform one-hot encoding, for each column, we must find least occuring category, and drop the associated column from the encoded df.
## This decreases multicollinearity, since one of those columns will be enterily be producable by the other k-1 columns
def lowestFreqAttributes(df, columns):
"""
Goal: Return an np.array, where each item is the lowest-occuring category on the nth column, formatted to resemble name after onehotencoding
df: Pandas Dataframe
columns: Pandas Columns
"""
ans = []
for colname in columns:
lowestFreqAtt = df[colname].value_counts().index[-1]
ans.append(lowestFreqAtt)
columnsNp = np.array(columns)
ansNp = np.array(ans)
## Name modification to match with encoder column names
concatStrings = np.vectorize(lambda x, y: x + "_" + y)
## These are [ans] after formatting: the final names of columns that need to be dropped after df has undergone one-hot encoding
dropAfterOhe = concatStrings(columnsNp, ansNp)
return dropAfterOhe
## Encoding and deletion of statistically redundant indicator variables
dropAfterOhe = lowestFreqAttributes(train, catColumns)
trainEnc = oneHotEncode(df=train, columns=catColumns, colsToDrop=dropAfterOhe)
trainEnc.shape
# CLEANING AND ENCODING DONE.
# One way to improve data preprocessing/cleaning:
# - Manually select features that may require ordinal encoding instead of basic one hot encoding
# FEATURE SELECTION AND MODEL TRAINING:
# - Check for Multicollinearity
# - Use metric to pick important features
## ALL pairs of numerical features + label s.t corr is > 0.5
## Let's look at VIF and feature correlations
# Import library for VIF
from statsmodels.stats.outliers_influence import variance_inflation_factor
X = trainEnc.drop(["SalePrice"], inplace=False, axis=1)
y = trainEnc["SalePrice"]
corrMatrix = trainEnc.corr()
VIF = pd.DataFrame()
VIF["VIF_Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
VIF["Features"] = X.columns
## plt.figure(figsize=(30, 15))
## sns.heatmap(corrMatrix, annot=False, cmap='seismic')
print("VIF Values: \n", VIF.to_string())
## Very high multicollinearity
## Choose features based on corr and pvalue, perform LASSO Regression to introduce bias, deal with multicollinearity and overfitting
## Predictive Features:
import scipy
from scipy.stats import chi2
from scipy.stats import chi2_contingency
from scipy.stats import pearsonr, spearmanr
## FEATURE ENGINEERING:
## Select linear features + quad features + ln features
linearFeatures = pd.DataFrame(
[((col, col) + scipy.stats.pearsonr(X[col], y)) for col in X.columns],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
quadFeatures = pd.DataFrame(
[
(("sq_" + col, col) + scipy.stats.pearsonr((X[col]) ** 2, y))
for col in X.columns
],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
logFeatures = pd.DataFrame(
[
(("ln_" + col, col) + scipy.stats.pearsonr(np.log1p(X[col]), y))
for col in X.columns
],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
## Filter Features w/ corr > 0.5 and pvalue < 0.5
linearSigFeatures = linearFeatures[
(linearFeatures["Pearson Corr."].abs() > 0.5) & (linearFeatures["p-value"] < 0.05)
]
quadSigFeatures = quadFeatures[
(quadFeatures["Pearson Corr."].abs() > 0.5) & (quadFeatures["p-value"] < 0.05)
]
logSigFeatures = logFeatures[
(logFeatures["Pearson Corr."].abs() > 0.5) & (logFeatures["p-value"] < 0.05)
]
sigFeatures = pd.concat(
[linearSigFeatures, quadSigFeatures, logSigFeatures], axis=0, ignore_index=True
)
sigFeatures.sort_values("Pearson Corr.", ascending=False, inplace=True)
sigFeatures.drop_duplicates(
subset=["Original"], keep="first", inplace=True, ignore_index=True
)
print(sigFeatures)
## Imports
from xgboost import XGBRegressor
from sklearn.linear_model import (
BayesianRidge,
ElasticNet,
PassiveAggressiveRegressor,
Ridge,
)
from sklearn.ensemble import (
HistGradientBoostingRegressor,
GradientBoostingRegressor,
RandomForestRegressor,
BaggingRegressor,
)
from lightgbm import LGBMRegressor
# performance metric
from sklearn.metrics import mean_squared_error
# Pipeline
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler, StandardScaler
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
## AAAAA MAKE TRANSFORM FUNCTION TO TRANSFORM TEST DATA BEFORE TRAINING AS WELL AAAAAAAAAAAA
def dataTransform(data):
## NA's
catColumns = data.select_dtypes(exclude=[int, float]).columns.values
intColumns = data.select_dtypes(include=[int]).columns.values
floatColumns = data.select_dtypes(include=[float]).columns.values
naConv = {catColName: data[catColName].mode()[0] for catColName in catColumns}
naConv.update(
{
floatColName: data[floatColName][data[floatColName] != np.nan].median()
for floatColName in floatColumns
}
)
naConv.update(
{
intColName: data[intColName][data[intColName] != np.nan].median().round()
for intColName in intColumns
}
)
data.fillna(naConv, inplace=True)
print(naConv)
## Encoding
dropAfterOhe = lowestFreqAttributes(data, catColumns)
dataEnc = oneHotEncode(df=data, columns=catColumns, colsToDrop=dropAfterOhe)
return dataEnc
data = pd.concat([train_og, test], axis=0, ignore_index=True)
fullData = dataTransform(data)
## Separating data and initiating modelling process
train = fullData.iloc[:1452, :]
test = fullData.iloc[1452:, :]
Xtrain = train.drop("SalePrice", axis=1)
ytrain = train["SalePrice"]
test = test.drop("SalePrice", axis=1)
hgbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("hgbr", HistGradientBoostingRegressor())]
)
gbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("gbr", GradientBoostingRegressor())]
)
lgbmr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("lgbmr", LGBMRegressor(reg_alpha=0.1))]
)
br_pipeline = Pipeline([("scaler", RobustScaler()), ("br", BayesianRidge())])
par_pipeline = Pipeline(
[("scaler", RobustScaler()), ("par", PassiveAggressiveRegressor())]
)
rfr_pipeline = Pipeline([("scaler", RobustScaler()), ("rfr", RandomForestRegressor())])
brg_pipeline = Pipeline([("scaler", RobustScaler()), ("brg", BaggingRegressor())])
en_pipeline = Pipeline([("scaler", RobustScaler()), ("en", ElasticNet())])
xgbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("xgbr", XGBRegressor(reg_alpha=0.1))]
)
pipelines = [
"hgbr_pipeline",
"gbr_pipeline",
"lgbmr_pipeline",
"br_pipeline",
"par_pipeline",
"rfr_pipeline",
"brg_pipeline",
"en_pipeline",
"xgbr_pipeline",
]
results = []
for pipelinename in pipelines:
pipeline = eval(pipelinename)
pipeline.fit(Xtrain, ytrain)
ypredtrain = pipeline.predict(Xtrain)
rmse = np.sqrt(mean_squared_error(ytrain, ypredtrain))
results.append((pipelinename, rmse))
results.sort(key=lambda x: x[1])
for pipelinename, rmse in results:
print("Root Mean Squared Error ({0}): {1}".format(pipelinename, rmse))
bestmodelname = results[0][0]
print("Best model:", bestmodelname)
xbgModel = pipeline.named_steps["xgbr"]
## Do some diagnostics here later pls!!
ypred = pipeline.predict(test)[:1459]
predDf = pd.DataFrame(ypred) ## vector
subDf = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv"
) ## Since we dropped ID column earlier
df = pd.concat([subDf["Id"], predDf], axis=1, ignore_index=True)
df.columns = ["Id", "SalePrice"]
df.to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/737/129737410.ipynb
| null | null |
[{"Id": 129737410, "ScriptId": 38561281, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14876796, "CreationDate": "05/16/2023 06:09:31", "VersionNumber": 5.0, "Title": "House Prices Fixes", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 305.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 242.0, "LinesInsertedFromFork": 141.0, "LinesDeletedFromFork": 295.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 164.0, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from matplotlib import pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
with open(
"/kaggle/input/house-prices-advanced-regression-techniques/data_description.txt",
"r",
) as file:
contents = file.read()
print(contents)
## STEP 3: Multiple Linear Regression using feature selection (using corr, no feature penalties) + all-but-one validation + outlier transformations
train_og = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
train = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/train.csv"
)
train.drop("Id", inplace=True, axis=1)
catColumns = train.select_dtypes(exclude=[int, float]).columns.values
intColumns = train.select_dtypes(include=[int]).columns.values
floatColumns = train.select_dtypes(include=[float]).columns.values
naConv = {catColName: train[catColName].mode()[0] for catColName in catColumns}
naConv.update(
{floatColName: train[floatColName].median() for floatColName in floatColumns}
)
naConv.update(
{intColName: train[intColName].median().round() for intColName in intColumns}
)
print(naConv)
train.fillna(naConv, inplace=True)
## One-Hot Encoding for categorical variables
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder
def oneHotEncode(df, columns, colsToDrop):
"""
df: dataframe requiring encoding
columns: list of features that need to be encoded
drop: if you want old cat coluns to be dropped from df after encoding
colsToDrop: self-explanatory
"""
df = df.reset_index(drop=True)
ohe = OneHotEncoder(sparse_output=False)
precolumns = [item for item in df.columns if item not in columns]
encDf = df[precolumns]
for colname in columns:
# one-hot encode
print(colname)
oheRes = ohe.fit_transform(df[[colname]])
# Convert to df
ohResDf = pd.DataFrame(oheRes, columns=ohe.get_feature_names_out([colname]))
# concat results with encoded df, currently empty
encDf = pd.concat([encDf, ohResDf], axis=1)
print(f"The following {len(columns)} features were one-hot encoded: {columns}\n")
encDf = encDf.drop(columns=colsToDrop)
print(
f"And the following categories were dropped: {colsToDrop}\n and the following {len(columns)} features: {columns}"
)
return encDf
## To perform one-hot encoding, for each column, we must find least occuring category, and drop the associated column from the encoded df.
## This decreases multicollinearity, since one of those columns will be enterily be producable by the other k-1 columns
def lowestFreqAttributes(df, columns):
"""
Goal: Return an np.array, where each item is the lowest-occuring category on the nth column, formatted to resemble name after onehotencoding
df: Pandas Dataframe
columns: Pandas Columns
"""
ans = []
for colname in columns:
lowestFreqAtt = df[colname].value_counts().index[-1]
ans.append(lowestFreqAtt)
columnsNp = np.array(columns)
ansNp = np.array(ans)
## Name modification to match with encoder column names
concatStrings = np.vectorize(lambda x, y: x + "_" + y)
## These are [ans] after formatting: the final names of columns that need to be dropped after df has undergone one-hot encoding
dropAfterOhe = concatStrings(columnsNp, ansNp)
return dropAfterOhe
## Encoding and deletion of statistically redundant indicator variables
dropAfterOhe = lowestFreqAttributes(train, catColumns)
trainEnc = oneHotEncode(df=train, columns=catColumns, colsToDrop=dropAfterOhe)
trainEnc.shape
# CLEANING AND ENCODING DONE.
# One way to improve data preprocessing/cleaning:
# - Manually select features that may require ordinal encoding instead of basic one hot encoding
# FEATURE SELECTION AND MODEL TRAINING:
# - Check for Multicollinearity
# - Use metric to pick important features
## ALL pairs of numerical features + label s.t corr is > 0.5
## Let's look at VIF and feature correlations
# Import library for VIF
from statsmodels.stats.outliers_influence import variance_inflation_factor
X = trainEnc.drop(["SalePrice"], inplace=False, axis=1)
y = trainEnc["SalePrice"]
corrMatrix = trainEnc.corr()
VIF = pd.DataFrame()
VIF["VIF_Factor"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
VIF["Features"] = X.columns
## plt.figure(figsize=(30, 15))
## sns.heatmap(corrMatrix, annot=False, cmap='seismic')
print("VIF Values: \n", VIF.to_string())
## Very high multicollinearity
## Choose features based on corr and pvalue, perform LASSO Regression to introduce bias, deal with multicollinearity and overfitting
## Predictive Features:
import scipy
from scipy.stats import chi2
from scipy.stats import chi2_contingency
from scipy.stats import pearsonr, spearmanr
## FEATURE ENGINEERING:
## Select linear features + quad features + ln features
linearFeatures = pd.DataFrame(
[((col, col) + scipy.stats.pearsonr(X[col], y)) for col in X.columns],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
quadFeatures = pd.DataFrame(
[
(("sq_" + col, col) + scipy.stats.pearsonr((X[col]) ** 2, y))
for col in X.columns
],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
logFeatures = pd.DataFrame(
[
(("ln_" + col, col) + scipy.stats.pearsonr(np.log1p(X[col]), y))
for col in X.columns
],
columns=["Var Name", "Original", "Pearson Corr.", "p-value"],
index=X.columns,
).round(4)
## Filter Features w/ corr > 0.5 and pvalue < 0.5
linearSigFeatures = linearFeatures[
(linearFeatures["Pearson Corr."].abs() > 0.5) & (linearFeatures["p-value"] < 0.05)
]
quadSigFeatures = quadFeatures[
(quadFeatures["Pearson Corr."].abs() > 0.5) & (quadFeatures["p-value"] < 0.05)
]
logSigFeatures = logFeatures[
(logFeatures["Pearson Corr."].abs() > 0.5) & (logFeatures["p-value"] < 0.05)
]
sigFeatures = pd.concat(
[linearSigFeatures, quadSigFeatures, logSigFeatures], axis=0, ignore_index=True
)
sigFeatures.sort_values("Pearson Corr.", ascending=False, inplace=True)
sigFeatures.drop_duplicates(
subset=["Original"], keep="first", inplace=True, ignore_index=True
)
print(sigFeatures)
## Imports
from xgboost import XGBRegressor
from sklearn.linear_model import (
BayesianRidge,
ElasticNet,
PassiveAggressiveRegressor,
Ridge,
)
from sklearn.ensemble import (
HistGradientBoostingRegressor,
GradientBoostingRegressor,
RandomForestRegressor,
BaggingRegressor,
)
from lightgbm import LGBMRegressor
# performance metric
from sklearn.metrics import mean_squared_error
# Pipeline
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import RobustScaler, StandardScaler
test = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
## AAAAA MAKE TRANSFORM FUNCTION TO TRANSFORM TEST DATA BEFORE TRAINING AS WELL AAAAAAAAAAAA
def dataTransform(data):
## NA's
catColumns = data.select_dtypes(exclude=[int, float]).columns.values
intColumns = data.select_dtypes(include=[int]).columns.values
floatColumns = data.select_dtypes(include=[float]).columns.values
naConv = {catColName: data[catColName].mode()[0] for catColName in catColumns}
naConv.update(
{
floatColName: data[floatColName][data[floatColName] != np.nan].median()
for floatColName in floatColumns
}
)
naConv.update(
{
intColName: data[intColName][data[intColName] != np.nan].median().round()
for intColName in intColumns
}
)
data.fillna(naConv, inplace=True)
print(naConv)
## Encoding
dropAfterOhe = lowestFreqAttributes(data, catColumns)
dataEnc = oneHotEncode(df=data, columns=catColumns, colsToDrop=dropAfterOhe)
return dataEnc
data = pd.concat([train_og, test], axis=0, ignore_index=True)
fullData = dataTransform(data)
## Separating data and initiating modelling process
train = fullData.iloc[:1452, :]
test = fullData.iloc[1452:, :]
Xtrain = train.drop("SalePrice", axis=1)
ytrain = train["SalePrice"]
test = test.drop("SalePrice", axis=1)
hgbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("hgbr", HistGradientBoostingRegressor())]
)
gbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("gbr", GradientBoostingRegressor())]
)
lgbmr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("lgbmr", LGBMRegressor(reg_alpha=0.1))]
)
br_pipeline = Pipeline([("scaler", RobustScaler()), ("br", BayesianRidge())])
par_pipeline = Pipeline(
[("scaler", RobustScaler()), ("par", PassiveAggressiveRegressor())]
)
rfr_pipeline = Pipeline([("scaler", RobustScaler()), ("rfr", RandomForestRegressor())])
brg_pipeline = Pipeline([("scaler", RobustScaler()), ("brg", BaggingRegressor())])
en_pipeline = Pipeline([("scaler", RobustScaler()), ("en", ElasticNet())])
xgbr_pipeline = Pipeline(
[("scaler", RobustScaler()), ("xgbr", XGBRegressor(reg_alpha=0.1))]
)
pipelines = [
"hgbr_pipeline",
"gbr_pipeline",
"lgbmr_pipeline",
"br_pipeline",
"par_pipeline",
"rfr_pipeline",
"brg_pipeline",
"en_pipeline",
"xgbr_pipeline",
]
results = []
for pipelinename in pipelines:
pipeline = eval(pipelinename)
pipeline.fit(Xtrain, ytrain)
ypredtrain = pipeline.predict(Xtrain)
rmse = np.sqrt(mean_squared_error(ytrain, ypredtrain))
results.append((pipelinename, rmse))
results.sort(key=lambda x: x[1])
for pipelinename, rmse in results:
print("Root Mean Squared Error ({0}): {1}".format(pipelinename, rmse))
bestmodelname = results[0][0]
print("Best model:", bestmodelname)
xbgModel = pipeline.named_steps["xgbr"]
## Do some diagnostics here later pls!!
ypred = pipeline.predict(test)[:1459]
predDf = pd.DataFrame(ypred) ## vector
subDf = pd.read_csv(
"/kaggle/input/house-prices-advanced-regression-techniques/sample_submission.csv"
) ## Since we dropped ID column earlier
df = pd.concat([subDf["Id"], predDf], axis=1, ignore_index=True)
df.columns = ["Id", "SalePrice"]
df.to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 3,062 | 0 | 3,062 | 3,062 |
||
129293371
|
import tensorflow as tf
print(tf.__version__) # 2.11.0
# # Model
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
Dropout,
Concatenate,
UpSampling2D,
LeakyReLU,
BatchNormalization,
)
from tensorflow.keras.initializers import HeNormal
from tensorflow.keras.activations import relu
from tensorflow.keras.optimizers import Adam
def generator_model(input_shape, biggest_layer=512):
inputs = Input(shape=input_shape)
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(inputs)
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(pool1)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(pool2)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(
biggest_layer // 2,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool3)
conv4 = Conv2D(
biggest_layer // 2,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(
biggest_layer,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool4)
conv5 = Conv2D(
biggest_layer,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(
512, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(drop5))
merge6 = Concatenate()([drop4, up6])
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge6)
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv6)
up7 = Conv2D(
256, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv6))
merge7 = Concatenate()([conv3, up7])
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge7)
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv7)
up8 = Conv2D(
128, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv7))
merge8 = Concatenate()([conv2, up8])
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge8)
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv8)
up9 = Conv2D(
64, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv8))
merge9 = Concatenate()([conv1, up9])
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge9)
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv9)
conv9 = Conv2D(
2, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv9)
outputs = Conv2D(1, 1, activation="sigmoid")(conv9)
return Model(inputs=inputs, outputs=outputs)
def discriminator_model(input_shape):
img_A = Input(input_shape)
img_B = Input(input_shape)
x = Concatenate(axis=-1)([img_A, img_B])
x = Conv2D(64, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(128, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(256, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(256, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
outputs = Conv2D(1, kernel_size=4, strides=1, padding="same", activation="sigmoid")(
x
)
return Model([img_A, img_B], outputs)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
mse = tf.keras.losses.MeanSquaredError()
def discriminator_loss(real_predicts, fake_predicts):
real_loss = cross_entropy(tf.ones_like(real_predicts), real_predicts)
fake_loss = cross_entropy(tf.zeros_like(fake_predicts), fake_predicts)
return real_loss + fake_loss
def generator_loss(real_images, fake_images, fake_predicts):
fake_loss = cross_entropy(tf.ones_like(fake_predicts), fake_predicts)
mse_loss = mse(real_images, fake_images)
return fake_loss + mse_loss
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
input_shape = (256, 256, 1)
generator = generator_model(input_shape)
discriminator = discriminator_model(input_shape)
# ## Data Loader
# ## Training
epochs = 80
batch_size = 128
@tf.function
def train_step(input_images, output_images):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(input_images, training=True)
fake_predicts = discriminator(generated_images, training=True)
real_prediects = discriminator(output_images, training=True)
gen_loss = generator_loss(input_images, generated_images, fake_predicts)
disc_loss = discriminator_loss(real_prediects, fake_predicts)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/293/129293371.ipynb
| null | null |
[{"Id": 129293371, "ScriptId": 38403353, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4248412, "CreationDate": "05/12/2023 14:04:30", "VersionNumber": 1.0, "Title": "DE-GAN", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 127.0, "LinesInsertedFromPrevious": 127.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import tensorflow as tf
print(tf.__version__) # 2.11.0
# # Model
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import (
Conv2D,
MaxPooling2D,
Dropout,
Concatenate,
UpSampling2D,
LeakyReLU,
BatchNormalization,
)
from tensorflow.keras.initializers import HeNormal
from tensorflow.keras.activations import relu
from tensorflow.keras.optimizers import Adam
def generator_model(input_shape, biggest_layer=512):
inputs = Input(shape=input_shape)
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(inputs)
conv1 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv1)
pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(pool1)
conv2 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv2)
pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(pool2)
conv3 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv3)
pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)
conv4 = Conv2D(
biggest_layer // 2,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool3)
conv4 = Conv2D(
biggest_layer // 2,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv4)
drop4 = Dropout(0.5)(conv4)
pool4 = MaxPooling2D(pool_size=(2, 2))(drop4)
conv5 = Conv2D(
biggest_layer,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(pool4)
conv5 = Conv2D(
biggest_layer,
3,
activation="relu",
padding="same",
kernel_initializer="he_normal",
)(conv5)
drop5 = Dropout(0.5)(conv5)
up6 = Conv2D(
512, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(drop5))
merge6 = Concatenate()([drop4, up6])
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge6)
conv6 = Conv2D(
512, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv6)
up7 = Conv2D(
256, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv6))
merge7 = Concatenate()([conv3, up7])
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge7)
conv7 = Conv2D(
256, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv7)
up8 = Conv2D(
128, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv7))
merge8 = Concatenate()([conv2, up8])
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge8)
conv8 = Conv2D(
128, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv8)
up9 = Conv2D(
64, 2, activation="relu", padding="same", kernel_initializer="he_normal"
)(UpSampling2D(size=(2, 2))(conv8))
merge9 = Concatenate()([conv1, up9])
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(merge9)
conv9 = Conv2D(
64, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv9)
conv9 = Conv2D(
2, 3, activation="relu", padding="same", kernel_initializer="he_normal"
)(conv9)
outputs = Conv2D(1, 1, activation="sigmoid")(conv9)
return Model(inputs=inputs, outputs=outputs)
def discriminator_model(input_shape):
img_A = Input(input_shape)
img_B = Input(input_shape)
x = Concatenate(axis=-1)([img_A, img_B])
x = Conv2D(64, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = Conv2D(128, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(256, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
x = Conv2D(256, kernel_size=4, strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(momentum=0.8)(x)
outputs = Conv2D(1, kernel_size=4, strides=1, padding="same", activation="sigmoid")(
x
)
return Model([img_A, img_B], outputs)
cross_entropy = tf.keras.losses.BinaryCrossentropy(from_logits=True)
mse = tf.keras.losses.MeanSquaredError()
def discriminator_loss(real_predicts, fake_predicts):
real_loss = cross_entropy(tf.ones_like(real_predicts), real_predicts)
fake_loss = cross_entropy(tf.zeros_like(fake_predicts), fake_predicts)
return real_loss + fake_loss
def generator_loss(real_images, fake_images, fake_predicts):
fake_loss = cross_entropy(tf.ones_like(fake_predicts), fake_predicts)
mse_loss = mse(real_images, fake_images)
return fake_loss + mse_loss
generator_optimizer = tf.keras.optimizers.Adam(1e-4)
discriminator_optimizer = tf.keras.optimizers.Adam(1e-4)
input_shape = (256, 256, 1)
generator = generator_model(input_shape)
discriminator = discriminator_model(input_shape)
# ## Data Loader
# ## Training
epochs = 80
batch_size = 128
@tf.function
def train_step(input_images, output_images):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
generated_images = generator(input_images, training=True)
fake_predicts = discriminator(generated_images, training=True)
real_prediects = discriminator(output_images, training=True)
gen_loss = generator_loss(input_images, generated_images, fake_predicts)
disc_loss = discriminator_loss(real_prediects, fake_predicts)
gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(
disc_loss, discriminator.trainable_variables
)
generator_optimizer.apply_gradients(
zip(gradients_of_generator, generator.trainable_variables)
)
discriminator_optimizer.apply_gradients(
zip(gradients_of_discriminator, discriminator.trainable_variables)
)
| false | 0 | 2,081 | 0 | 2,081 | 2,081 |
||
129370435
|
# # Data Used
# ## - Image
# - RBG
# - NIR
# - Elevation (crop around location) - need only location (lon, lat)
# - Soil Rasters (crop around location) - need only location (lon, lat)
# - Land Cover (crop around location) - need only location (lon, lat)
# - Human Footprint - summarized - most recent ones (crop around location) - No function need - need only location (lon, lat)
# ## - Tabular
# - Lon, Lat
# - dayoftheYear
# - year
# - geoUncertaintyinM
# # Loading the libraries
from data.GLC23PatchesProviders import (
MultipleRasterPatchProvider,
RasterPatchProvider,
JpegPatchProvider,
)
from data.GLC23Datasets import PatchesDataset
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import precision_score, recall_score, f1_score
from torch import nn
from torchvision import transforms
from torchvision import models
from torch.utils.tensorboard import SummaryWriter
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
# # Declaring the data folders the data
# data_path = 'data/sample_data/' # root path of the data
data_path = "/Volumes/MasterDrive/GeoLifeClef_2023/"
train_path = data_path + "Presence_only_occurrences/Presences_only_train.csv"
validation_path = data_path + "Presence_Absence_surveys/Presences_Absences_train.csv"
# # EDA (Exploratory Data Analysis) & SAMPLING
train_path = data_path + "Presence_only_occurrences/Presences_only_train.csv"
validation_path = data_path + "Presence_Absence_surveys/Presences_Absences_train.csv"
train_df = pd.read_csv(train_path, sep=";", header="infer", low_memory=True)
validation_df = pd.read_csv(validation_path, sep=";", header="infer", low_memory=True)
print(len(train_df))
print(len(validation_df))
# # visualize the distribution of the classes (helps to know how big the imbalance is in the dataset)
# train_path = data_path+'Presence_only_occurrences/Presences_only_train_sample.csv'
# validation_path = data_path+'Presence_absences_occurrences/Presences_Absences_train_sample.csv'
train_nunique = train_df.speciesId.nunique()
sns.countplot(data=train_df, x="speciesId").set(
title=f"Train set - Nb duplicates Species: {len(train_df) - train_nunique} Nb Unique Species: {train_nunique}"
)
plt.show()
val_nunique = validation_df.speciesId.nunique()
sns.countplot(data=validation_df, x="speciesId").set(
title=f"Validation set - Nb duplicates Species: {len(validation_df) - val_nunique} Nb Unique Species: {val_nunique}"
)
plt.show()
# - In the Presence_only file, We know that each observation only has one species (but it does not mean other species are not present). this information is given in the competition
# - But a species can actually be found somewhere else right ?
# - So this graph illustrates how many time a species appear in the whole dataset.
# - This does not show how the label imbalance (otherwise we would plot the observations vs species)
# - It rather shows that lots of species have been spotted more than 4K times (all locations included)
# - The validation data does not have as many observations per label (but that okay because we use for evaluation)
# - Some species were not sampled in the validation set : The model should generalise better and learn better ?
train_groupby = train_df.groupby(by="patchID", as_index=False).count()
validation_groupby = validation_df.groupby(by="patchID", as_index=False).count()
sns.barplot(data=train_groupby, x="patchID", y="speciesId").set(
title="Train Set - Number of species observed by patchID"
)
plt.show()
sns.barplot(data=validation_groupby, x="patchID", y="speciesId").set(
title="Validation Set - Number of species observed by patchID"
)
plt.show()
# # AS expected, in the train set we only have one species detected (does not mean other are not present) per obervation (or patchID)
# - But was is supprising is the validation set: we expected to be more species per observation -> this might mean the imbalance is not is great as we though ?
# - The goal is to have a training sample with most different label possible but also with ALL the labels of the validation set
# # Split the training set to get the same labels as in the validations set
# ## I should get the same labels as in the validation but add more (until the training can't handle it)
# when sampling, gotta make sure the labels in the validation set are also in the training set
# Now the training sample has exactly the same labels as the validation set
validation_labels = validation_df.speciesId.unique()
is_contained_in_df = train_df.speciesId.isin(validation_labels)
filtered_train_df = train_df[is_contained_in_df]
print(
f"Number of unique labels in training before: {train_df.speciesId.nunique()} after: {filtered_train_df.speciesId.nunique()}"
)
print(
f"Number of unique labels in validation before: {validation_df.speciesId.nunique()} after: {validation_df.speciesId.nunique()}"
)
# Compare the quantity of data available for the filtered training (i.e we have the same labels as val set) vs the validation set
train_nunique = filtered_train_df.speciesId.nunique()
sns.countplot(data=filtered_train_df, x="speciesId").set(
title=f"Filtered Train set - Nb duplicates Species: {len(filtered_train_df) - train_nunique} Nb Unique Species: {train_nunique}"
)
plt.show()
val_nunique = validation_df.speciesId.nunique()
sns.countplot(data=validation_df, x="speciesId").set(
title=f"Validation set - Nb duplicates Species: {len(validation_df) - val_nunique} Nb Unique Species: {val_nunique}"
)
plt.show()
# As we can see, we have a lot of data for the training of each species
# # Sample train Data
# - As we can see, we have more than 4K observations for lots of species: for a local run, we are going to get some but not all
# Extract a subset of the train data for each group of species ID ? (to have just enough but not too much of data to train each label
# sample_train_df = train_df.groupby("speciesId").apply(lambda x: x.sample(1)).reset_index(drop=True)
sample_filtered_train_df = (
filtered_train_df.groupby("speciesId")
.apply(lambda x: x.sample(1))
.reset_index(drop=True)
)
sample_validation_df = (
validation_df.groupby("speciesId")
.apply(lambda x: x.sample(1))
.reset_index(drop=True)
)
print("Train set quantity:", len(sample_filtered_train_df))
print("Validation set quantity:", len(sample_validation_df))
print(
"the ratio train/validation is:",
round(len(sample_filtered_train_df) / len(sample_validation_df), ndigits=2),
)
# Normally this graphs should show that we have one species per patchId in the training set
# And we have a lot more species per patchID in the validation set
sample_train_groupby = sample_filtered_train_df.groupby(
by="patchID", as_index=False
).count()
sample_validation_groupby = sample_validation_df.groupby(
by="patchID", as_index=False
).count()
sns.barplot(data=sample_train_groupby, x="patchID", y="speciesId").set(
title="Sample Train Set - Number of species observed by patchID"
)
plt.show()
sns.barplot(data=sample_validation_groupby, x="patchID", y="speciesId").set(
title="Sample Validation Set - Number of species observed by patchID"
)
plt.show()
# Save the new occurences csv to be used as new data for training our models
# train_df = pd.read_csv(train_path, sep=";", header='infer', low_memory=True)
train_path_sampled = (
data_path + "Presence_only_occurrences/Presences_only_train_sampled.csv"
)
validation_path_sampled = (
data_path + "Presence_Absence_surveys/Presences_Absences_train_sampled.csv"
)
sample_filtered_train_df.to_csv(train_path_sampled, sep=";", index=False)
sample_validation_df.to_csv(validation_path_sampled, sep=";", index=False)
# TODO Maybe i should divide the dataset here so that it doesn't load everything ?
# configure providers
p_rgb = JpegPatchProvider(
root_path=data_path + "SatelliteImages/", size=20
) # take all sentinel imagery layers (r,g,b,nir = 4 layers)
p_hfp_s = RasterPatchProvider(
raster_path=data_path
+ "EnvironmentalRasters/HumanFootprint/summarized/HFP2009_WGS84.tif",
size=20,
fill_zero_if_error=True,
) # take the human footprint 2009 summurized raster (a single raster)
p_hfp_d = MultipleRasterPatchProvider(
rasters_folder=data_path + "EnvironmentalRasters/HumanFootprint/detailed/",
size=20,
fill_zero_if_error=True,
) # take all rasters from human footprint detailed (2 rasters here)
train_dataset = PatchesDataset(
occurrences=train_path_sampled, providers=(p_rgb, p_hfp_s, p_hfp_d)
)
test_dataset = PatchesDataset(
occurrences=validation_path_sampled, providers=(p_rgb, p_hfp_s, p_hfp_d)
)
# Appearently we don't always have all the data for all the observations (something the NIR & RGB) data are missing ... -> so we just feel the layers with zeros
test_dataset.plot_patch(1) # plot all the images of a patch
# # Create a torch dataloader
unique_label_for_all = sorted(
set([train_dataset[specieId][1] for specieId in range(train_dataset.__len__())])
)
unique_label_for_all = sample_validation_df.speciesId.unique().tolist()
# unique_label_for_all = [str(k) for k in unique_label_for_all]
class geolife_data_wrapper(Dataset):
def __init__(
self, custom_dataset, unique_labels=unique_label_for_all, transforms=None
):
self.transforms = transforms
samples = custom_dataset
# self.classes = sorted( set([ samples[specieId][1] for specieId in range(samples.__len__()) ])) #labels without duplicates
self.classes = unique_labels
self.imgs = []
self.annos = []
for sample in samples:
self.imgs.append(sample[0])
self.annos.append(sample[1])
for item_id in range(len(self.annos)):
item = self.annos[item_id]
vector = [
str(cls) == str(item) for cls in self.classes
] # est que peut utiliser "in" si item est un entier et pas une liste ?
self.annos[item_id] = np.array(vector, dtype=float)
def __getitem__(self, item):
anno = self.annos[item]
img = self.imgs[item]
if self.transforms is not None:
img = self.transforms(img)
return img, anno
def __len__(self):
return len(self.imgs)
# Use the torchvision's implementation of ResNeXt, but add FC layer for a different number of classes (96) and a Sigmoid instead of a default Softmax.
class Resnext50(nn.Module):
def __init__(self, n_classes):
super().__init__()
resnet = models.resnext50_32x4d(weights="ResNeXt50_32X4D_Weights.DEFAULT")
resnet.conv1 = nn.Conv2d(7, 64, kernel_size=7, stride=2, padding=3, bias=False)
resnet.fc = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(in_features=resnet.fc.in_features, out_features=n_classes),
)
self.base_model = resnet
self.sigm = nn.Sigmoid()
def forward(self, x):
return self.sigm(self.base_model(x))
# Use threshold to define predicted labels and invoke sklearn's metrics with different averaging strategies.
def calculate_metrics(pred, target, threshold=0.5):
pred = np.array(pred > threshold, dtype=float)
return {
"micro/precision": precision_score(y_true=target, y_pred=pred, average="micro"),
"micro/recall": recall_score(y_true=target, y_pred=pred, average="micro"),
"micro/f1": f1_score(y_true=target, y_pred=pred, average="micro"),
#'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'),
#'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'),
#'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'),
#'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'),
"samples/recall": recall_score(y_true=target, y_pred=pred, average="samples"),
"samples/f1": f1_score(y_true=target, y_pred=pred, average="samples"),
}
# Initialize the training parameters.
num_workers = 8 # Number of CPU processes for data preprocessing
lr = 1e-4 # Learning rate
batch_size = 32
save_freq = 1 # Save checkpoint frequency (epochs)
test_freq = 1 # Test model frequency (iterations)
max_epoch_number = 10 # Number of epochs for training
# Note: on the small subset of data overfitting happens after 30-35 epochs
# TODO got to find a way to set these parameters
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
device = torch.device("cpu")
# Save path for checkpoints
save_path = "chekpoints/"
# Save path for logs
logdir = "logs/"
# Run tensorboard
# Here is an auxiliary function for checkpoint saving.
def checkpoint_save(model, save_path, epoch):
f = os.path.join(save_path, "checkpoint-{:06d}.pth".format(epoch))
if "module" in dir(model):
torch.save(model.module.state_dict(), f)
else:
torch.save(model.state_dict(), f)
print("saved checkpoint:", f)
# Test preprocessing
test_transform = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
# print(tuple(np.array(np.array(mean)*255).tolist()))
# Train preprocessing
train_transform = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(),
transforms.RandomAffine(
degrees=20, translate=(0.2, 0.2), scale=(0.5, 1.5), shear=None
),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
# Initialize the dataloaders for training.
train_dataset = geolife_data_wrapper(
custom_dataset=train_dataset, transforms=None
) # tensors of shape 7, 128, 128
test_dataset = geolife_data_wrapper(
custom_dataset=test_dataset, transforms=None
) # tensors of shape 7, 128, 128
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, drop_last=True
)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Gotta make sure the number of labels in the training set is equal to the one in the training set
imgs_test, batch_targets_test = next(iter(test_dataloader))
imgs_train, batch_targets_train = next(iter(train_dataloader))
print("The number of Unique labels detected in test set is: ", batch_targets_test.shape)
print(
"The number of Unique labels detected in the train set is :",
batch_targets_train.shape,
)
num_train_batches = int(np.ceil(len(train_dataset) / batch_size))
# Initialize the model
model = Resnext50(len(train_dataset.classes))
# Switch model to the training mode and move it to GPU.
model.train()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# If more than one GPU is available we can use both to speed up the training.
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
os.makedirs(save_path, exist_ok=True)
# Loss function
criterion = nn.BCELoss()
# Tensoboard logger
logger = SummaryWriter(logdir)
# Run training
epoch = 0
iteration = 0
while True:
batch_losses = []
for imgs, targets in train_dataloader:
imgs, targets = imgs.to(device), targets.to(device)
optimizer.zero_grad()
model_result = model(imgs)
loss = criterion(model_result, targets.type(torch.float))
batch_loss_value = loss.item()
loss.backward()
optimizer.step()
logger.add_scalar("train_loss", batch_loss_value, iteration)
batch_losses.append(batch_loss_value)
with torch.no_grad():
result = calculate_metrics(
model_result.cpu().numpy(), targets.cpu().numpy()
)
for metric in result:
logger.add_scalar("train/" + metric, result[metric], iteration)
if iteration % test_freq == 0:
model.eval()
with torch.no_grad():
model_result = []
targets = []
for imgs, batch_targets in test_dataloader:
imgs = imgs.to(device)
model_batch_result = model(imgs)
model_result.extend(model_batch_result.cpu().numpy())
targets.extend(batch_targets.cpu().numpy())
result = calculate_metrics(np.array(model_result), np.array(targets))
for metric in result:
logger.add_scalar("test/" + metric, result[metric], iteration)
print(
f"epoch:{epoch} iter:{iteration} precision: {result['micro/precision']} F1: {result['micro/f1']} Recall: {result['micro/recall']} sample F1: {result['samples/f1']} sample recall: {result['samples/recall']}"
)
model.train()
iteration += 1
loss_value = np.mean(batch_losses)
# print("epoch:{:2d} iter:{:3d} train: loss:{:.3f}".format(epoch, iteration, loss_value))
if epoch % save_freq == 0:
checkpoint_save(model, save_path, epoch)
epoch += 1
if max_epoch_number < epoch:
break
# Evaluation on the test data and creating the submission file
# Run inference on the test data
model.eval()
for sample_id in [1, 2, 3, 4, 6]:
test_img, test_labels = test_dataset[sample_id]
with torch.no_grad():
raw_pred = model(test_img.unsqueeze(0)).cpu().numpy()[0]
raw_pred = np.array(raw_pred > 0.5, dtype=float)
predicted_labels = np.array(train_dataset.classes)[
np.argwhere(raw_pred > 0)[:, 0]
] # carefull here
if not len(predicted_labels):
predicted_labels = ["no predictions"]
img_labels = np.array(train_dataset.classes)[np.argwhere(test_labels > 0)[:, 0]]
print(
"Predicted labels: {} \nGT labels: {}".format(predicted_labels, img_labels),
end="\n ------------ \n",
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/370/129370435.ipynb
| null | null |
[{"Id": 129370435, "ScriptId": 38466374, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4620956, "CreationDate": "05/13/2023 08:00:31", "VersionNumber": 1.0, "Title": "notebook288181fce0", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 391.0, "LinesInsertedFromPrevious": 391.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Data Used
# ## - Image
# - RBG
# - NIR
# - Elevation (crop around location) - need only location (lon, lat)
# - Soil Rasters (crop around location) - need only location (lon, lat)
# - Land Cover (crop around location) - need only location (lon, lat)
# - Human Footprint - summarized - most recent ones (crop around location) - No function need - need only location (lon, lat)
# ## - Tabular
# - Lon, Lat
# - dayoftheYear
# - year
# - geoUncertaintyinM
# # Loading the libraries
from data.GLC23PatchesProviders import (
MultipleRasterPatchProvider,
RasterPatchProvider,
JpegPatchProvider,
)
from data.GLC23Datasets import PatchesDataset
import torch
from torch.utils.data.dataset import Dataset
from torch.utils.data.dataloader import DataLoader
from sklearn.metrics import precision_score, recall_score, f1_score
from torch import nn
from torchvision import transforms
from torchvision import models
from torch.utils.tensorboard import SummaryWriter
import seaborn as sns
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import os
import warnings
warnings.filterwarnings("ignore")
# # Declaring the data folders the data
# data_path = 'data/sample_data/' # root path of the data
data_path = "/Volumes/MasterDrive/GeoLifeClef_2023/"
train_path = data_path + "Presence_only_occurrences/Presences_only_train.csv"
validation_path = data_path + "Presence_Absence_surveys/Presences_Absences_train.csv"
# # EDA (Exploratory Data Analysis) & SAMPLING
train_path = data_path + "Presence_only_occurrences/Presences_only_train.csv"
validation_path = data_path + "Presence_Absence_surveys/Presences_Absences_train.csv"
train_df = pd.read_csv(train_path, sep=";", header="infer", low_memory=True)
validation_df = pd.read_csv(validation_path, sep=";", header="infer", low_memory=True)
print(len(train_df))
print(len(validation_df))
# # visualize the distribution of the classes (helps to know how big the imbalance is in the dataset)
# train_path = data_path+'Presence_only_occurrences/Presences_only_train_sample.csv'
# validation_path = data_path+'Presence_absences_occurrences/Presences_Absences_train_sample.csv'
train_nunique = train_df.speciesId.nunique()
sns.countplot(data=train_df, x="speciesId").set(
title=f"Train set - Nb duplicates Species: {len(train_df) - train_nunique} Nb Unique Species: {train_nunique}"
)
plt.show()
val_nunique = validation_df.speciesId.nunique()
sns.countplot(data=validation_df, x="speciesId").set(
title=f"Validation set - Nb duplicates Species: {len(validation_df) - val_nunique} Nb Unique Species: {val_nunique}"
)
plt.show()
# - In the Presence_only file, We know that each observation only has one species (but it does not mean other species are not present). this information is given in the competition
# - But a species can actually be found somewhere else right ?
# - So this graph illustrates how many time a species appear in the whole dataset.
# - This does not show how the label imbalance (otherwise we would plot the observations vs species)
# - It rather shows that lots of species have been spotted more than 4K times (all locations included)
# - The validation data does not have as many observations per label (but that okay because we use for evaluation)
# - Some species were not sampled in the validation set : The model should generalise better and learn better ?
train_groupby = train_df.groupby(by="patchID", as_index=False).count()
validation_groupby = validation_df.groupby(by="patchID", as_index=False).count()
sns.barplot(data=train_groupby, x="patchID", y="speciesId").set(
title="Train Set - Number of species observed by patchID"
)
plt.show()
sns.barplot(data=validation_groupby, x="patchID", y="speciesId").set(
title="Validation Set - Number of species observed by patchID"
)
plt.show()
# # AS expected, in the train set we only have one species detected (does not mean other are not present) per obervation (or patchID)
# - But was is supprising is the validation set: we expected to be more species per observation -> this might mean the imbalance is not is great as we though ?
# - The goal is to have a training sample with most different label possible but also with ALL the labels of the validation set
# # Split the training set to get the same labels as in the validations set
# ## I should get the same labels as in the validation but add more (until the training can't handle it)
# when sampling, gotta make sure the labels in the validation set are also in the training set
# Now the training sample has exactly the same labels as the validation set
validation_labels = validation_df.speciesId.unique()
is_contained_in_df = train_df.speciesId.isin(validation_labels)
filtered_train_df = train_df[is_contained_in_df]
print(
f"Number of unique labels in training before: {train_df.speciesId.nunique()} after: {filtered_train_df.speciesId.nunique()}"
)
print(
f"Number of unique labels in validation before: {validation_df.speciesId.nunique()} after: {validation_df.speciesId.nunique()}"
)
# Compare the quantity of data available for the filtered training (i.e we have the same labels as val set) vs the validation set
train_nunique = filtered_train_df.speciesId.nunique()
sns.countplot(data=filtered_train_df, x="speciesId").set(
title=f"Filtered Train set - Nb duplicates Species: {len(filtered_train_df) - train_nunique} Nb Unique Species: {train_nunique}"
)
plt.show()
val_nunique = validation_df.speciesId.nunique()
sns.countplot(data=validation_df, x="speciesId").set(
title=f"Validation set - Nb duplicates Species: {len(validation_df) - val_nunique} Nb Unique Species: {val_nunique}"
)
plt.show()
# As we can see, we have a lot of data for the training of each species
# # Sample train Data
# - As we can see, we have more than 4K observations for lots of species: for a local run, we are going to get some but not all
# Extract a subset of the train data for each group of species ID ? (to have just enough but not too much of data to train each label
# sample_train_df = train_df.groupby("speciesId").apply(lambda x: x.sample(1)).reset_index(drop=True)
sample_filtered_train_df = (
filtered_train_df.groupby("speciesId")
.apply(lambda x: x.sample(1))
.reset_index(drop=True)
)
sample_validation_df = (
validation_df.groupby("speciesId")
.apply(lambda x: x.sample(1))
.reset_index(drop=True)
)
print("Train set quantity:", len(sample_filtered_train_df))
print("Validation set quantity:", len(sample_validation_df))
print(
"the ratio train/validation is:",
round(len(sample_filtered_train_df) / len(sample_validation_df), ndigits=2),
)
# Normally this graphs should show that we have one species per patchId in the training set
# And we have a lot more species per patchID in the validation set
sample_train_groupby = sample_filtered_train_df.groupby(
by="patchID", as_index=False
).count()
sample_validation_groupby = sample_validation_df.groupby(
by="patchID", as_index=False
).count()
sns.barplot(data=sample_train_groupby, x="patchID", y="speciesId").set(
title="Sample Train Set - Number of species observed by patchID"
)
plt.show()
sns.barplot(data=sample_validation_groupby, x="patchID", y="speciesId").set(
title="Sample Validation Set - Number of species observed by patchID"
)
plt.show()
# Save the new occurences csv to be used as new data for training our models
# train_df = pd.read_csv(train_path, sep=";", header='infer', low_memory=True)
train_path_sampled = (
data_path + "Presence_only_occurrences/Presences_only_train_sampled.csv"
)
validation_path_sampled = (
data_path + "Presence_Absence_surveys/Presences_Absences_train_sampled.csv"
)
sample_filtered_train_df.to_csv(train_path_sampled, sep=";", index=False)
sample_validation_df.to_csv(validation_path_sampled, sep=";", index=False)
# TODO Maybe i should divide the dataset here so that it doesn't load everything ?
# configure providers
p_rgb = JpegPatchProvider(
root_path=data_path + "SatelliteImages/", size=20
) # take all sentinel imagery layers (r,g,b,nir = 4 layers)
p_hfp_s = RasterPatchProvider(
raster_path=data_path
+ "EnvironmentalRasters/HumanFootprint/summarized/HFP2009_WGS84.tif",
size=20,
fill_zero_if_error=True,
) # take the human footprint 2009 summurized raster (a single raster)
p_hfp_d = MultipleRasterPatchProvider(
rasters_folder=data_path + "EnvironmentalRasters/HumanFootprint/detailed/",
size=20,
fill_zero_if_error=True,
) # take all rasters from human footprint detailed (2 rasters here)
train_dataset = PatchesDataset(
occurrences=train_path_sampled, providers=(p_rgb, p_hfp_s, p_hfp_d)
)
test_dataset = PatchesDataset(
occurrences=validation_path_sampled, providers=(p_rgb, p_hfp_s, p_hfp_d)
)
# Appearently we don't always have all the data for all the observations (something the NIR & RGB) data are missing ... -> so we just feel the layers with zeros
test_dataset.plot_patch(1) # plot all the images of a patch
# # Create a torch dataloader
unique_label_for_all = sorted(
set([train_dataset[specieId][1] for specieId in range(train_dataset.__len__())])
)
unique_label_for_all = sample_validation_df.speciesId.unique().tolist()
# unique_label_for_all = [str(k) for k in unique_label_for_all]
class geolife_data_wrapper(Dataset):
def __init__(
self, custom_dataset, unique_labels=unique_label_for_all, transforms=None
):
self.transforms = transforms
samples = custom_dataset
# self.classes = sorted( set([ samples[specieId][1] for specieId in range(samples.__len__()) ])) #labels without duplicates
self.classes = unique_labels
self.imgs = []
self.annos = []
for sample in samples:
self.imgs.append(sample[0])
self.annos.append(sample[1])
for item_id in range(len(self.annos)):
item = self.annos[item_id]
vector = [
str(cls) == str(item) for cls in self.classes
] # est que peut utiliser "in" si item est un entier et pas une liste ?
self.annos[item_id] = np.array(vector, dtype=float)
def __getitem__(self, item):
anno = self.annos[item]
img = self.imgs[item]
if self.transforms is not None:
img = self.transforms(img)
return img, anno
def __len__(self):
return len(self.imgs)
# Use the torchvision's implementation of ResNeXt, but add FC layer for a different number of classes (96) and a Sigmoid instead of a default Softmax.
class Resnext50(nn.Module):
def __init__(self, n_classes):
super().__init__()
resnet = models.resnext50_32x4d(weights="ResNeXt50_32X4D_Weights.DEFAULT")
resnet.conv1 = nn.Conv2d(7, 64, kernel_size=7, stride=2, padding=3, bias=False)
resnet.fc = nn.Sequential(
nn.Dropout(p=0.2),
nn.Linear(in_features=resnet.fc.in_features, out_features=n_classes),
)
self.base_model = resnet
self.sigm = nn.Sigmoid()
def forward(self, x):
return self.sigm(self.base_model(x))
# Use threshold to define predicted labels and invoke sklearn's metrics with different averaging strategies.
def calculate_metrics(pred, target, threshold=0.5):
pred = np.array(pred > threshold, dtype=float)
return {
"micro/precision": precision_score(y_true=target, y_pred=pred, average="micro"),
"micro/recall": recall_score(y_true=target, y_pred=pred, average="micro"),
"micro/f1": f1_score(y_true=target, y_pred=pred, average="micro"),
#'macro/precision': precision_score(y_true=target, y_pred=pred, average='macro'),
#'macro/recall': recall_score(y_true=target, y_pred=pred, average='macro'),
#'macro/f1': f1_score(y_true=target, y_pred=pred, average='macro'),
#'samples/precision': precision_score(y_true=target, y_pred=pred, average='samples'),
"samples/recall": recall_score(y_true=target, y_pred=pred, average="samples"),
"samples/f1": f1_score(y_true=target, y_pred=pred, average="samples"),
}
# Initialize the training parameters.
num_workers = 8 # Number of CPU processes for data preprocessing
lr = 1e-4 # Learning rate
batch_size = 32
save_freq = 1 # Save checkpoint frequency (epochs)
test_freq = 1 # Test model frequency (iterations)
max_epoch_number = 10 # Number of epochs for training
# Note: on the small subset of data overfitting happens after 30-35 epochs
# TODO got to find a way to set these parameters
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
device = torch.device("cpu")
# Save path for checkpoints
save_path = "chekpoints/"
# Save path for logs
logdir = "logs/"
# Run tensorboard
# Here is an auxiliary function for checkpoint saving.
def checkpoint_save(model, save_path, epoch):
f = os.path.join(save_path, "checkpoint-{:06d}.pth".format(epoch))
if "module" in dir(model):
torch.save(model.module.state_dict(), f)
else:
torch.save(model.state_dict(), f)
print("saved checkpoint:", f)
# Test preprocessing
test_transform = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
# print(tuple(np.array(np.array(mean)*255).tolist()))
# Train preprocessing
train_transform = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.RandomHorizontalFlip(),
transforms.ColorJitter(),
transforms.RandomAffine(
degrees=20, translate=(0.2, 0.2), scale=(0.5, 1.5), shear=None
),
transforms.ToTensor(),
transforms.Normalize(mean, std),
]
)
# Initialize the dataloaders for training.
train_dataset = geolife_data_wrapper(
custom_dataset=train_dataset, transforms=None
) # tensors of shape 7, 128, 128
test_dataset = geolife_data_wrapper(
custom_dataset=test_dataset, transforms=None
) # tensors of shape 7, 128, 128
train_dataloader = DataLoader(
train_dataset, batch_size=batch_size, shuffle=True, drop_last=True
)
test_dataloader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)
# Gotta make sure the number of labels in the training set is equal to the one in the training set
imgs_test, batch_targets_test = next(iter(test_dataloader))
imgs_train, batch_targets_train = next(iter(train_dataloader))
print("The number of Unique labels detected in test set is: ", batch_targets_test.shape)
print(
"The number of Unique labels detected in the train set is :",
batch_targets_train.shape,
)
num_train_batches = int(np.ceil(len(train_dataset) / batch_size))
# Initialize the model
model = Resnext50(len(train_dataset.classes))
# Switch model to the training mode and move it to GPU.
model.train()
model = model.to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
# If more than one GPU is available we can use both to speed up the training.
if torch.cuda.device_count() > 1:
model = nn.DataParallel(model)
os.makedirs(save_path, exist_ok=True)
# Loss function
criterion = nn.BCELoss()
# Tensoboard logger
logger = SummaryWriter(logdir)
# Run training
epoch = 0
iteration = 0
while True:
batch_losses = []
for imgs, targets in train_dataloader:
imgs, targets = imgs.to(device), targets.to(device)
optimizer.zero_grad()
model_result = model(imgs)
loss = criterion(model_result, targets.type(torch.float))
batch_loss_value = loss.item()
loss.backward()
optimizer.step()
logger.add_scalar("train_loss", batch_loss_value, iteration)
batch_losses.append(batch_loss_value)
with torch.no_grad():
result = calculate_metrics(
model_result.cpu().numpy(), targets.cpu().numpy()
)
for metric in result:
logger.add_scalar("train/" + metric, result[metric], iteration)
if iteration % test_freq == 0:
model.eval()
with torch.no_grad():
model_result = []
targets = []
for imgs, batch_targets in test_dataloader:
imgs = imgs.to(device)
model_batch_result = model(imgs)
model_result.extend(model_batch_result.cpu().numpy())
targets.extend(batch_targets.cpu().numpy())
result = calculate_metrics(np.array(model_result), np.array(targets))
for metric in result:
logger.add_scalar("test/" + metric, result[metric], iteration)
print(
f"epoch:{epoch} iter:{iteration} precision: {result['micro/precision']} F1: {result['micro/f1']} Recall: {result['micro/recall']} sample F1: {result['samples/f1']} sample recall: {result['samples/recall']}"
)
model.train()
iteration += 1
loss_value = np.mean(batch_losses)
# print("epoch:{:2d} iter:{:3d} train: loss:{:.3f}".format(epoch, iteration, loss_value))
if epoch % save_freq == 0:
checkpoint_save(model, save_path, epoch)
epoch += 1
if max_epoch_number < epoch:
break
# Evaluation on the test data and creating the submission file
# Run inference on the test data
model.eval()
for sample_id in [1, 2, 3, 4, 6]:
test_img, test_labels = test_dataset[sample_id]
with torch.no_grad():
raw_pred = model(test_img.unsqueeze(0)).cpu().numpy()[0]
raw_pred = np.array(raw_pred > 0.5, dtype=float)
predicted_labels = np.array(train_dataset.classes)[
np.argwhere(raw_pred > 0)[:, 0]
] # carefull here
if not len(predicted_labels):
predicted_labels = ["no predictions"]
img_labels = np.array(train_dataset.classes)[np.argwhere(test_labels > 0)[:, 0]]
print(
"Predicted labels: {} \nGT labels: {}".format(predicted_labels, img_labels),
end="\n ------------ \n",
)
| false | 0 | 5,239 | 0 | 5,239 | 5,239 |
||
129370014
|
<jupyter_start><jupyter_text>Hotel Reservations Dataset
#### **Context**
The online hotel reservation channels have dramatically changed booking possibilities and customers’ behavior. A significant number of hotel reservations are called-off due to cancellations or no-shows. The typical reasons for cancellations include change of plans, scheduling conflicts, etc. This is often made easier by the option to do so free of charge or preferably at a low cost which is beneficial to hotel guests but it is a less desirable and possibly revenue-diminishing factor for hotels to deal with.
###### **Can you predict if the customer is going to honor the reservation or cancel it ?**
Kaggle dataset identifier: hotel-reservations-classification-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('hotel-reservations-classification-dataset/Hotel Reservations.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 36275 entries, 0 to 36274
Data columns (total 19 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Booking_ID 36275 non-null object
1 no_of_adults 36275 non-null int64
2 no_of_children 36275 non-null int64
3 no_of_weekend_nights 36275 non-null int64
4 no_of_week_nights 36275 non-null int64
5 type_of_meal_plan 36275 non-null object
6 required_car_parking_space 36275 non-null int64
7 room_type_reserved 36275 non-null object
8 lead_time 36275 non-null int64
9 arrival_year 36275 non-null int64
10 arrival_month 36275 non-null int64
11 arrival_date 36275 non-null int64
12 market_segment_type 36275 non-null object
13 repeated_guest 36275 non-null int64
14 no_of_previous_cancellations 36275 non-null int64
15 no_of_previous_bookings_not_canceled 36275 non-null int64
16 avg_price_per_room 36275 non-null float64
17 no_of_special_requests 36275 non-null int64
18 booking_status 36275 non-null object
dtypes: float64(1), int64(13), object(5)
memory usage: 5.3+ MB
<jupyter_text>Examples:
{
"Booking_ID": "INN00001",
"no_of_adults": 2,
"no_of_children": 0,
"no_of_weekend_nights": 1,
"no_of_week_nights": 2,
"type_of_meal_plan": "Meal Plan 1",
"required_car_parking_space": 0,
"room_type_reserved": "Room_Type 1",
"lead_time": 224,
"arrival_year": 2017,
"arrival_month": 10,
"arrival_date": 2,
"market_segment_type": "Offline",
"repeated_guest": 0,
"no_of_previous_cancellations": 0,
"no_of_previous_bookings_not_canceled": 0,
"avg_price_per_room": 65.0,
"no_of_special_requests": 0,
"booking_status": "Not_Canceled"
}
{
"Booking_ID": "INN00002",
"no_of_adults": 2,
"no_of_children": 0,
"no_of_weekend_nights": 2,
"no_of_week_nights": 3,
"type_of_meal_plan": "Not Selected",
"required_car_parking_space": 0,
"room_type_reserved": "Room_Type 1",
"lead_time": 5,
"arrival_year": 2018,
"arrival_month": 11,
"arrival_date": 6,
"market_segment_type": "Online",
"repeated_guest": 0,
"no_of_previous_cancellations": 0,
"no_of_previous_bookings_not_canceled": 0,
"avg_price_per_room": 106.68,
"no_of_special_requests": 1,
"booking_status": "Not_Canceled"
}
{
"Booking_ID": "INN00003",
"no_of_adults": 1,
"no_of_children": 0,
"no_of_weekend_nights": 2,
"no_of_week_nights": 1,
"type_of_meal_plan": "Meal Plan 1",
"required_car_parking_space": 0,
"room_type_reserved": "Room_Type 1",
"lead_time": 1,
"arrival_year": 2018,
"arrival_month": 2,
"arrival_date": 28,
"market_segment_type": "Online",
"repeated_guest": 0,
"no_of_previous_cancellations": 0,
"no_of_previous_bookings_not_canceled": 0,
"avg_price_per_room": 60.0,
"no_of_special_requests": 0,
"booking_status": "Canceled"
}
{
"Booking_ID": "INN00004",
"no_of_adults": 2,
"no_of_children": 0,
"no_of_weekend_nights": 0,
"no_of_week_nights": 2,
"type_of_meal_plan": "Meal Plan 1",
"required_car_parking_space": 0,
"room_type_reserved": "Room_Type 1",
"lead_time": 211,
"arrival_year": 2018,
"arrival_month": 5,
"arrival_date": 20,
"market_segment_type": "Online",
"repeated_guest": 0,
"no_of_previous_cancellations": 0,
"no_of_previous_bookings_not_canceled": 0,
"avg_price_per_room": 100.0,
"no_of_special_requests": 0,
"booking_status": "Canceled"
}
<jupyter_script>import numpy as np
import pandas as pd
import seaborn as sns
# ## Principais Tipos de Aprendizado de Máquina
# ## Dentro de Aprendizado Supervisionado
# ### Projeto do titanic se encaixa em: Aprendizado Supervisionado > Classificação
# #### Isso porque o objetivo do Titanic era predizer uma variável resposta Y do tipo classificação: 0 (não sobrevivente) , 1 (sobrevivente)
# # Criação de novas features
# #### Há algumas idéias nessa etapa de criação de features, como por exemplo, quebrar uma feature que contém mais de uma informação:
# #### o inverso também pode ser útil
# ### Há uma biblioteca para criação automática de features:
# https://medium.com/datarisk-io/automated-feature-engineering-como-utilizar-o-featuretools-25586438df3d
# ### Importante replicar as transformações no treino também no teste. Por exemplo transformar as features de categórico para numérico, pois é o que os modelos aceitam
# ## Base do titanic
df_train_titanic = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test_titanic = pd.read_csv("/kaggle/input/titanic/test.csv")
df_train_titanic
df_train_titanic.info()
# ### Uma primeira ideia de váriavel que é possível extrair outras features a partir dela: Cabin
# #### Pois na descrição dos dados em https://www.kaggle.com/competitions/titanic/data diz que é o número da cabine, nesse caso composto por letra + número, como visto a seguir:
df_train_titanic["Cabin"].unique()
# #### Assumindo que a letra da cabine possa ser relevante, extraímos uma coluna com isso:
df_train_titanic["Cabin_letter"] = df_train_titanic["Cabin"].str[0]
df_test_titanic["Cabin_letter"] = df_test_titanic["Cabin"].str[0]
df_train_titanic
df_train_titanic["Cabin_letter"].unique()
# ### Separar por sobrenome: ideia de ter algo relevante no sobrenome (detectar familiares)
df_train_titanic["Last_Name"] = df_train_titanic["Name"].str.split(",").str[0]
df_test_titanic["Last_Name"] = df_train_titanic["Name"].str.split(",").str[0]
df_train_titanic["Last_Name"]
df_train_titanic["Last_Name"].value_counts()
# #### É possível resumir Pclass e Fare na mesma informação?
sns.boxplot(df_train_titanic, x="Pclass", y="Fare")
# ## Base Reservas de hotéis
df_train_hotel = pd.read_csv(
"/kaggle/input/hotel-reservations-classification-dataset/Hotel Reservations.csv"
)
df_train_hotel.head()
# #### Olhando os dados, uma idéia é juntar adultos e children para nova feature "Família". Necessário analisar antes se as duas features não são relevantes com a variável resposta, ex: Com os gráficos ou correlação de features )
df_train_hotel["no_family"] = (
df_train_hotel["no_of_adults"] + df_train_hotel["no_of_children"]
)
df_train_hotel.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/370/129370014.ipynb
|
hotel-reservations-classification-dataset
|
ahsan81
|
[{"Id": 129370014, "ScriptId": 38424659, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5835064, "CreationDate": "05/13/2023 07:55:39", "VersionNumber": 2.0, "Title": "Features", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 74.0, "LinesInsertedFromPrevious": 43.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 31.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185354709, "KernelVersionId": 129370014, "SourceDatasetVersionId": 4807272}]
|
[{"Id": 4807272, "DatasetId": 2783627, "DatasourceVersionId": 4870756, "CreatorUserId": 10164878, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "01/04/2023 12:50:31", "VersionNumber": 1.0, "Title": "Hotel Reservations Dataset", "Slug": "hotel-reservations-classification-dataset", "Subtitle": "Can you predict if customer is going to cancel the reservation ?", "Description": "#### **Context**\nThe online hotel reservation channels have dramatically changed booking possibilities and customers\u2019 behavior. A significant number of hotel reservations are called-off due to cancellations or no-shows. The typical reasons for cancellations include change of plans, scheduling conflicts, etc. This is often made easier by the option to do so free of charge or preferably at a low cost which is beneficial to hotel guests but it is a less desirable and possibly revenue-diminishing factor for hotels to deal with.\n\n###### **Can you predict if the customer is going to honor the reservation or cancel it ?**", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2783627, "CreatorUserId": 10164878, "OwnerUserId": 10164878.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4807272.0, "CurrentDatasourceVersionId": 4870756.0, "ForumId": 2817657, "Type": 2, "CreationDate": "01/04/2023 12:50:31", "LastActivityDate": "01/04/2023", "TotalViews": 147406, "TotalDownloads": 26740, "TotalVotes": 513, "TotalKernels": 160}]
|
[{"Id": 10164878, "UserName": "ahsan81", "DisplayName": "Ahsan Raza", "RegisterDate": "04/06/2022", "PerformanceTier": 2}]
|
import numpy as np
import pandas as pd
import seaborn as sns
# ## Principais Tipos de Aprendizado de Máquina
# ## Dentro de Aprendizado Supervisionado
# ### Projeto do titanic se encaixa em: Aprendizado Supervisionado > Classificação
# #### Isso porque o objetivo do Titanic era predizer uma variável resposta Y do tipo classificação: 0 (não sobrevivente) , 1 (sobrevivente)
# # Criação de novas features
# #### Há algumas idéias nessa etapa de criação de features, como por exemplo, quebrar uma feature que contém mais de uma informação:
# #### o inverso também pode ser útil
# ### Há uma biblioteca para criação automática de features:
# https://medium.com/datarisk-io/automated-feature-engineering-como-utilizar-o-featuretools-25586438df3d
# ### Importante replicar as transformações no treino também no teste. Por exemplo transformar as features de categórico para numérico, pois é o que os modelos aceitam
# ## Base do titanic
df_train_titanic = pd.read_csv("/kaggle/input/titanic/train.csv")
df_test_titanic = pd.read_csv("/kaggle/input/titanic/test.csv")
df_train_titanic
df_train_titanic.info()
# ### Uma primeira ideia de váriavel que é possível extrair outras features a partir dela: Cabin
# #### Pois na descrição dos dados em https://www.kaggle.com/competitions/titanic/data diz que é o número da cabine, nesse caso composto por letra + número, como visto a seguir:
df_train_titanic["Cabin"].unique()
# #### Assumindo que a letra da cabine possa ser relevante, extraímos uma coluna com isso:
df_train_titanic["Cabin_letter"] = df_train_titanic["Cabin"].str[0]
df_test_titanic["Cabin_letter"] = df_test_titanic["Cabin"].str[0]
df_train_titanic
df_train_titanic["Cabin_letter"].unique()
# ### Separar por sobrenome: ideia de ter algo relevante no sobrenome (detectar familiares)
df_train_titanic["Last_Name"] = df_train_titanic["Name"].str.split(",").str[0]
df_test_titanic["Last_Name"] = df_train_titanic["Name"].str.split(",").str[0]
df_train_titanic["Last_Name"]
df_train_titanic["Last_Name"].value_counts()
# #### É possível resumir Pclass e Fare na mesma informação?
sns.boxplot(df_train_titanic, x="Pclass", y="Fare")
# ## Base Reservas de hotéis
df_train_hotel = pd.read_csv(
"/kaggle/input/hotel-reservations-classification-dataset/Hotel Reservations.csv"
)
df_train_hotel.head()
# #### Olhando os dados, uma idéia é juntar adultos e children para nova feature "Família". Necessário analisar antes se as duas features não são relevantes com a variável resposta, ex: Com os gráficos ou correlação de features )
df_train_hotel["no_family"] = (
df_train_hotel["no_of_adults"] + df_train_hotel["no_of_children"]
)
df_train_hotel.head()
|
[{"hotel-reservations-classification-dataset/Hotel Reservations.csv": {"column_names": "[\"Booking_ID\", \"no_of_adults\", \"no_of_children\", \"no_of_weekend_nights\", \"no_of_week_nights\", \"type_of_meal_plan\", \"required_car_parking_space\", \"room_type_reserved\", \"lead_time\", \"arrival_year\", \"arrival_month\", \"arrival_date\", \"market_segment_type\", \"repeated_guest\", \"no_of_previous_cancellations\", \"no_of_previous_bookings_not_canceled\", \"avg_price_per_room\", \"no_of_special_requests\", \"booking_status\"]", "column_data_types": "{\"Booking_ID\": \"object\", \"no_of_adults\": \"int64\", \"no_of_children\": \"int64\", \"no_of_weekend_nights\": \"int64\", \"no_of_week_nights\": \"int64\", \"type_of_meal_plan\": \"object\", \"required_car_parking_space\": \"int64\", \"room_type_reserved\": \"object\", \"lead_time\": \"int64\", \"arrival_year\": \"int64\", \"arrival_month\": \"int64\", \"arrival_date\": \"int64\", \"market_segment_type\": \"object\", \"repeated_guest\": \"int64\", \"no_of_previous_cancellations\": \"int64\", \"no_of_previous_bookings_not_canceled\": \"int64\", \"avg_price_per_room\": \"float64\", \"no_of_special_requests\": \"int64\", \"booking_status\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 36275 entries, 0 to 36274\nData columns (total 19 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Booking_ID 36275 non-null object \n 1 no_of_adults 36275 non-null int64 \n 2 no_of_children 36275 non-null int64 \n 3 no_of_weekend_nights 36275 non-null int64 \n 4 no_of_week_nights 36275 non-null int64 \n 5 type_of_meal_plan 36275 non-null object \n 6 required_car_parking_space 36275 non-null int64 \n 7 room_type_reserved 36275 non-null object \n 8 lead_time 36275 non-null int64 \n 9 arrival_year 36275 non-null int64 \n 10 arrival_month 36275 non-null int64 \n 11 arrival_date 36275 non-null int64 \n 12 market_segment_type 36275 non-null object \n 13 repeated_guest 36275 non-null int64 \n 14 no_of_previous_cancellations 36275 non-null int64 \n 15 no_of_previous_bookings_not_canceled 36275 non-null int64 \n 16 avg_price_per_room 36275 non-null float64\n 17 no_of_special_requests 36275 non-null int64 \n 18 booking_status 36275 non-null object \ndtypes: float64(1), int64(13), object(5)\nmemory usage: 5.3+ MB\n", "summary": "{\"no_of_adults\": {\"count\": 36275.0, \"mean\": 1.8449620951068229, \"std\": 0.51871483790129, \"min\": 0.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 2.0, \"max\": 4.0}, \"no_of_children\": {\"count\": 36275.0, \"mean\": 0.10527911784975878, \"std\": 0.402648063589278, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 10.0}, \"no_of_weekend_nights\": {\"count\": 36275.0, \"mean\": 0.810723638869745, \"std\": 0.8706436147600001, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 7.0}, \"no_of_week_nights\": {\"count\": 36275.0, \"mean\": 2.2043004824259134, \"std\": 1.4109048548240168, \"min\": 0.0, \"25%\": 1.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 17.0}, \"required_car_parking_space\": {\"count\": 36275.0, \"mean\": 0.0309855272226051, \"std\": 0.17328084736891503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"lead_time\": {\"count\": 36275.0, \"mean\": 85.23255685733976, \"std\": 85.93081669923257, \"min\": 0.0, \"25%\": 17.0, \"50%\": 57.0, \"75%\": 126.0, \"max\": 443.0}, \"arrival_year\": {\"count\": 36275.0, \"mean\": 2017.8204272915232, \"std\": 0.3838364395590784, \"min\": 2017.0, \"25%\": 2018.0, \"50%\": 2018.0, \"75%\": 2018.0, \"max\": 2018.0}, \"arrival_month\": {\"count\": 36275.0, \"mean\": 7.423652653342522, \"std\": 3.0698944112257687, \"min\": 1.0, \"25%\": 5.0, \"50%\": 8.0, \"75%\": 10.0, \"max\": 12.0}, \"arrival_date\": {\"count\": 36275.0, \"mean\": 15.596995175740869, \"std\": 8.740447368632898, \"min\": 1.0, \"25%\": 8.0, \"50%\": 16.0, \"75%\": 23.0, \"max\": 31.0}, \"repeated_guest\": {\"count\": 36275.0, \"mean\": 0.02563749138525155, \"std\": 0.15805346903513287, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"no_of_previous_cancellations\": {\"count\": 36275.0, \"mean\": 0.023349414197105445, \"std\": 0.3683314479197876, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 13.0}, \"no_of_previous_bookings_not_canceled\": {\"count\": 36275.0, \"mean\": 0.15341144038594073, \"std\": 1.7541707114426388, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 58.0}, \"avg_price_per_room\": {\"count\": 36275.0, \"mean\": 103.42353907649897, \"std\": 35.08942403637036, \"min\": 0.0, \"25%\": 80.3, \"50%\": 99.45, \"75%\": 120.0, \"max\": 540.0}, \"no_of_special_requests\": {\"count\": 36275.0, \"mean\": 0.6196554100620262, \"std\": 0.7862358983956064, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 5.0}}", "examples": "{\"Booking_ID\":{\"0\":\"INN00001\",\"1\":\"INN00002\",\"2\":\"INN00003\",\"3\":\"INN00004\"},\"no_of_adults\":{\"0\":2,\"1\":2,\"2\":1,\"3\":2},\"no_of_children\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"no_of_weekend_nights\":{\"0\":1,\"1\":2,\"2\":2,\"3\":0},\"no_of_week_nights\":{\"0\":2,\"1\":3,\"2\":1,\"3\":2},\"type_of_meal_plan\":{\"0\":\"Meal Plan 1\",\"1\":\"Not Selected\",\"2\":\"Meal Plan 1\",\"3\":\"Meal Plan 1\"},\"required_car_parking_space\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"room_type_reserved\":{\"0\":\"Room_Type 1\",\"1\":\"Room_Type 1\",\"2\":\"Room_Type 1\",\"3\":\"Room_Type 1\"},\"lead_time\":{\"0\":224,\"1\":5,\"2\":1,\"3\":211},\"arrival_year\":{\"0\":2017,\"1\":2018,\"2\":2018,\"3\":2018},\"arrival_month\":{\"0\":10,\"1\":11,\"2\":2,\"3\":5},\"arrival_date\":{\"0\":2,\"1\":6,\"2\":28,\"3\":20},\"market_segment_type\":{\"0\":\"Offline\",\"1\":\"Online\",\"2\":\"Online\",\"3\":\"Online\"},\"repeated_guest\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"no_of_previous_cancellations\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"no_of_previous_bookings_not_canceled\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"avg_price_per_room\":{\"0\":65.0,\"1\":106.68,\"2\":60.0,\"3\":100.0},\"no_of_special_requests\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"booking_status\":{\"0\":\"Not_Canceled\",\"1\":\"Not_Canceled\",\"2\":\"Canceled\",\"3\":\"Canceled\"}}"}}]
| true | 3 |
<start_data_description><data_path>hotel-reservations-classification-dataset/Hotel Reservations.csv:
<column_names>
['Booking_ID', 'no_of_adults', 'no_of_children', 'no_of_weekend_nights', 'no_of_week_nights', 'type_of_meal_plan', 'required_car_parking_space', 'room_type_reserved', 'lead_time', 'arrival_year', 'arrival_month', 'arrival_date', 'market_segment_type', 'repeated_guest', 'no_of_previous_cancellations', 'no_of_previous_bookings_not_canceled', 'avg_price_per_room', 'no_of_special_requests', 'booking_status']
<column_types>
{'Booking_ID': 'object', 'no_of_adults': 'int64', 'no_of_children': 'int64', 'no_of_weekend_nights': 'int64', 'no_of_week_nights': 'int64', 'type_of_meal_plan': 'object', 'required_car_parking_space': 'int64', 'room_type_reserved': 'object', 'lead_time': 'int64', 'arrival_year': 'int64', 'arrival_month': 'int64', 'arrival_date': 'int64', 'market_segment_type': 'object', 'repeated_guest': 'int64', 'no_of_previous_cancellations': 'int64', 'no_of_previous_bookings_not_canceled': 'int64', 'avg_price_per_room': 'float64', 'no_of_special_requests': 'int64', 'booking_status': 'object'}
<dataframe_Summary>
{'no_of_adults': {'count': 36275.0, 'mean': 1.8449620951068229, 'std': 0.51871483790129, 'min': 0.0, '25%': 2.0, '50%': 2.0, '75%': 2.0, 'max': 4.0}, 'no_of_children': {'count': 36275.0, 'mean': 0.10527911784975878, 'std': 0.402648063589278, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 10.0}, 'no_of_weekend_nights': {'count': 36275.0, 'mean': 0.810723638869745, 'std': 0.8706436147600001, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 7.0}, 'no_of_week_nights': {'count': 36275.0, 'mean': 2.2043004824259134, 'std': 1.4109048548240168, 'min': 0.0, '25%': 1.0, '50%': 2.0, '75%': 3.0, 'max': 17.0}, 'required_car_parking_space': {'count': 36275.0, 'mean': 0.0309855272226051, 'std': 0.17328084736891503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'lead_time': {'count': 36275.0, 'mean': 85.23255685733976, 'std': 85.93081669923257, 'min': 0.0, '25%': 17.0, '50%': 57.0, '75%': 126.0, 'max': 443.0}, 'arrival_year': {'count': 36275.0, 'mean': 2017.8204272915232, 'std': 0.3838364395590784, 'min': 2017.0, '25%': 2018.0, '50%': 2018.0, '75%': 2018.0, 'max': 2018.0}, 'arrival_month': {'count': 36275.0, 'mean': 7.423652653342522, 'std': 3.0698944112257687, 'min': 1.0, '25%': 5.0, '50%': 8.0, '75%': 10.0, 'max': 12.0}, 'arrival_date': {'count': 36275.0, 'mean': 15.596995175740869, 'std': 8.740447368632898, 'min': 1.0, '25%': 8.0, '50%': 16.0, '75%': 23.0, 'max': 31.0}, 'repeated_guest': {'count': 36275.0, 'mean': 0.02563749138525155, 'std': 0.15805346903513287, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'no_of_previous_cancellations': {'count': 36275.0, 'mean': 0.023349414197105445, 'std': 0.3683314479197876, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 13.0}, 'no_of_previous_bookings_not_canceled': {'count': 36275.0, 'mean': 0.15341144038594073, 'std': 1.7541707114426388, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 58.0}, 'avg_price_per_room': {'count': 36275.0, 'mean': 103.42353907649897, 'std': 35.08942403637036, 'min': 0.0, '25%': 80.3, '50%': 99.45, '75%': 120.0, 'max': 540.0}, 'no_of_special_requests': {'count': 36275.0, 'mean': 0.6196554100620262, 'std': 0.7862358983956064, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 5.0}}
<dataframe_info>
RangeIndex: 36275 entries, 0 to 36274
Data columns (total 19 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Booking_ID 36275 non-null object
1 no_of_adults 36275 non-null int64
2 no_of_children 36275 non-null int64
3 no_of_weekend_nights 36275 non-null int64
4 no_of_week_nights 36275 non-null int64
5 type_of_meal_plan 36275 non-null object
6 required_car_parking_space 36275 non-null int64
7 room_type_reserved 36275 non-null object
8 lead_time 36275 non-null int64
9 arrival_year 36275 non-null int64
10 arrival_month 36275 non-null int64
11 arrival_date 36275 non-null int64
12 market_segment_type 36275 non-null object
13 repeated_guest 36275 non-null int64
14 no_of_previous_cancellations 36275 non-null int64
15 no_of_previous_bookings_not_canceled 36275 non-null int64
16 avg_price_per_room 36275 non-null float64
17 no_of_special_requests 36275 non-null int64
18 booking_status 36275 non-null object
dtypes: float64(1), int64(13), object(5)
memory usage: 5.3+ MB
<some_examples>
{'Booking_ID': {'0': 'INN00001', '1': 'INN00002', '2': 'INN00003', '3': 'INN00004'}, 'no_of_adults': {'0': 2, '1': 2, '2': 1, '3': 2}, 'no_of_children': {'0': 0, '1': 0, '2': 0, '3': 0}, 'no_of_weekend_nights': {'0': 1, '1': 2, '2': 2, '3': 0}, 'no_of_week_nights': {'0': 2, '1': 3, '2': 1, '3': 2}, 'type_of_meal_plan': {'0': 'Meal Plan 1', '1': 'Not Selected', '2': 'Meal Plan 1', '3': 'Meal Plan 1'}, 'required_car_parking_space': {'0': 0, '1': 0, '2': 0, '3': 0}, 'room_type_reserved': {'0': 'Room_Type 1', '1': 'Room_Type 1', '2': 'Room_Type 1', '3': 'Room_Type 1'}, 'lead_time': {'0': 224, '1': 5, '2': 1, '3': 211}, 'arrival_year': {'0': 2017, '1': 2018, '2': 2018, '3': 2018}, 'arrival_month': {'0': 10, '1': 11, '2': 2, '3': 5}, 'arrival_date': {'0': 2, '1': 6, '2': 28, '3': 20}, 'market_segment_type': {'0': 'Offline', '1': 'Online', '2': 'Online', '3': 'Online'}, 'repeated_guest': {'0': 0, '1': 0, '2': 0, '3': 0}, 'no_of_previous_cancellations': {'0': 0, '1': 0, '2': 0, '3': 0}, 'no_of_previous_bookings_not_canceled': {'0': 0, '1': 0, '2': 0, '3': 0}, 'avg_price_per_room': {'0': 65.0, '1': 106.68, '2': 60.0, '3': 100.0}, 'no_of_special_requests': {'0': 0, '1': 1, '2': 0, '3': 0}, 'booking_status': {'0': 'Not_Canceled', '1': 'Not_Canceled', '2': 'Canceled', '3': 'Canceled'}}
<end_description>
| 930 | 0 | 2,650 | 930 |
129370511
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import adfuller
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
plt.rcParams["figure.figsize"] = (12, 6)
df = pd.read_csv("/kaggle/input/chosen/BRACBANK_data.csv")
df["Date"] = pd.to_datetime(df.Date)
df.head()
plt.plot(df["Date"], df["Close"])
plt.title("Bank Stock Trend")
plt.xlabel("Year")
plt.ylabel("Price")
plt.show()
df = df[["Date", "Close", "Vol"]]
df.head()
df["returns"] = df.Close.pct_change() * 100
df.dropna(inplace=True)
df.reset_index(drop=True, inplace=True)
df.head()
# High level overview of Closing price to see if it is stationary or not
rolling_mean = df["Close"].rolling(12).mean()
rolling_std = df["Close"].rolling(12).std()
plt.title("Rolling Mean & Standard Deviation")
plt.plot(df["Date"], df["Close"], label="Actual")
plt.plot(df["Date"], rolling_mean, label="Rolling Mean")
plt.plot(df["Date"], rolling_std, label="Rolling Std")
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# Autocorrelation Plot (ACF) without any differencing
plot_acf(df.Close)
plt.show()
len(rolling_mean), len(df)
# Check whether the target variable is stationary or not
# Ho - It is non stationary (Null Hypothesis)
# H1 - It is stationary (Alternate Hypothesis)
result = adfuller(df["Close"])
print(f"ADF Statistic: {result[0]}, p-value: {result[1]}")
print(
"H1: The series is stationary"
if result[1] <= 0.05
else "H0: Theseries is non stationary"
)
# Testing the series for stationarity with differencing of 1
result = adfuller(df["Close"].diff().dropna())
print(f"ADF Statistic: {result[0]}, p-value: {result[1]}")
print(
"H1: The series is stationary"
if result[1] <= 0.05
else "H0: Theseries is non stationary"
)
# Identifying d: the number of times that the raw observations are differenced
# The series was stationary with differencing of 1
d = 1
print(f"Degree of differencing: {d}")
# Partial Autocorrelation Function (PACF)
diff = df.Close.diff().dropna()
plot_pacf(diff)
plt.show()
# Here lag 1 stands out and is well above the significance line. Hence, p is 1
p = 1
# Identifying q: the size of the moving average window
# Autocorrelation Function (ACF)
diff = df.Close.diff().dropna()
plot_acf(diff)
plt.show()
# Here lag 1 stands out and is well above the significance line. Hence, q is 1
q = 1
print(f"ARIMA order is ({p}, {d}, {q})")
# Train/Test Split
# Split Train and Test set into 70/30 percent
X_train, X_test = train_test_split(df.Close, test_size=0.30, shuffle=False)
print(X_train.shape, X_test.shape)
X_train.shape
# Fit ARIMA Model
import warnings
warnings.filterwarnings("ignore")
# ARIMA Model
model = ARIMA(X_train, order=(p, d, q))
result = model.fit(disp=0)
result.summary()
# Plotting residual errors
residuals = pd.DataFrame(result.resid)
plt.plot(residuals)
plt.show()
# The residual mean is centered around 0
# ACF and PACF plots of the residuals
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_acf(residuals, ax=axes[0])
plot_pacf(residuals, ax=axes[1])
plt.show()
# Predict Train set
result.plot_predict(
start=1,
end=30,
dynamic=False,
)
plt.show()
# this coloumn --------------------------
yhat_train = result.predict(start=1)
returns_train = df.returns[1 : X_train.shape[0]]
trainScore = mean_squared_error(returns_train, yhat_train, squared=False)
print("Train Score: %.2f RMSE" % (trainScore))
yhat_train
# Forecasting Test Set
model = ARIMA(X_train, order=(p, d, q))
result = model.fit(disp=0)
yhat_test, se, conf = result.forecast(X_test.shape[0])
test_date = df.Date[X_train.shape[0] :]
yhat_test = pd.Series(yhat_test)
lower = pd.Series(conf[:, 0])
upper = pd.Series(conf[:, 1])
returns_test = df.returns[-X_test.shape[0] :]
testScore = mean_squared_error(X_test, yhat_test, squared=False)
print("Test Score: %.2f RMSE" % (testScore))
plt.title("Forecast vs Actual")
plt.plot(test_date, X_test, label="Actual")
plt.plot(test_date, yhat_test, label="Forecast")
plt.fill_between(test_date, lower, upper, color="k", alpha=0.2)
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
yhat_test
# Here, ARIMA model is used to forecast all of the test data based on the model fittedon training set.
# The forecasted price is not that great. However, the confidence interval covers theactual price
# Rolling Forecast on Test Set
# Rolling forecast on test set
# loop through test set and append train set with each element of testset
# train arima model then forecast out-ofsample for one day ahead in the future
# keep list of forecasts
x_train = list(X_train)
x_test = list(X_test)
forecasts = list()
for i in range(X_test.shape[0]):
model = ARIMA(x_train, order=(p, d, q))
result = model.fit(disp=0)
yhat = list(result.forecast()[0])[0]
forecasts.append(yhat)
x_train.append(x_test[i])
testScore = mean_squared_error(x_test, forecasts, squared=False)
print("Test Score: %.2f RMSE" % (testScore))
plt.title("Rolling Forecast vs Actual")
plt.plot(test_date, X_test, label="Actual")
plt.plot(test_date, forecasts, label="Rolling Forecast")
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
# Significant improvement in RMSE on rolling forecasts
forecasts
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/370/129370511.ipynb
| null | null |
[{"Id": 129370511, "ScriptId": 38466026, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8142128, "CreationDate": "05/13/2023 08:01:17", "VersionNumber": 1.0, "Title": "Arima 1 Brac", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 183.0, "LinesInsertedFromPrevious": 183.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from statsmodels.tsa.arima_model import ARIMA
from statsmodels.tsa.stattools import adfuller
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
plt.rcParams["figure.figsize"] = (12, 6)
df = pd.read_csv("/kaggle/input/chosen/BRACBANK_data.csv")
df["Date"] = pd.to_datetime(df.Date)
df.head()
plt.plot(df["Date"], df["Close"])
plt.title("Bank Stock Trend")
plt.xlabel("Year")
plt.ylabel("Price")
plt.show()
df = df[["Date", "Close", "Vol"]]
df.head()
df["returns"] = df.Close.pct_change() * 100
df.dropna(inplace=True)
df.reset_index(drop=True, inplace=True)
df.head()
# High level overview of Closing price to see if it is stationary or not
rolling_mean = df["Close"].rolling(12).mean()
rolling_std = df["Close"].rolling(12).std()
plt.title("Rolling Mean & Standard Deviation")
plt.plot(df["Date"], df["Close"], label="Actual")
plt.plot(df["Date"], rolling_mean, label="Rolling Mean")
plt.plot(df["Date"], rolling_std, label="Rolling Std")
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# Autocorrelation Plot (ACF) without any differencing
plot_acf(df.Close)
plt.show()
len(rolling_mean), len(df)
# Check whether the target variable is stationary or not
# Ho - It is non stationary (Null Hypothesis)
# H1 - It is stationary (Alternate Hypothesis)
result = adfuller(df["Close"])
print(f"ADF Statistic: {result[0]}, p-value: {result[1]}")
print(
"H1: The series is stationary"
if result[1] <= 0.05
else "H0: Theseries is non stationary"
)
# Testing the series for stationarity with differencing of 1
result = adfuller(df["Close"].diff().dropna())
print(f"ADF Statistic: {result[0]}, p-value: {result[1]}")
print(
"H1: The series is stationary"
if result[1] <= 0.05
else "H0: Theseries is non stationary"
)
# Identifying d: the number of times that the raw observations are differenced
# The series was stationary with differencing of 1
d = 1
print(f"Degree of differencing: {d}")
# Partial Autocorrelation Function (PACF)
diff = df.Close.diff().dropna()
plot_pacf(diff)
plt.show()
# Here lag 1 stands out and is well above the significance line. Hence, p is 1
p = 1
# Identifying q: the size of the moving average window
# Autocorrelation Function (ACF)
diff = df.Close.diff().dropna()
plot_acf(diff)
plt.show()
# Here lag 1 stands out and is well above the significance line. Hence, q is 1
q = 1
print(f"ARIMA order is ({p}, {d}, {q})")
# Train/Test Split
# Split Train and Test set into 70/30 percent
X_train, X_test = train_test_split(df.Close, test_size=0.30, shuffle=False)
print(X_train.shape, X_test.shape)
X_train.shape
# Fit ARIMA Model
import warnings
warnings.filterwarnings("ignore")
# ARIMA Model
model = ARIMA(X_train, order=(p, d, q))
result = model.fit(disp=0)
result.summary()
# Plotting residual errors
residuals = pd.DataFrame(result.resid)
plt.plot(residuals)
plt.show()
# The residual mean is centered around 0
# ACF and PACF plots of the residuals
fig, axes = plt.subplots(1, 2, figsize=(16, 4))
plot_acf(residuals, ax=axes[0])
plot_pacf(residuals, ax=axes[1])
plt.show()
# Predict Train set
result.plot_predict(
start=1,
end=30,
dynamic=False,
)
plt.show()
# this coloumn --------------------------
yhat_train = result.predict(start=1)
returns_train = df.returns[1 : X_train.shape[0]]
trainScore = mean_squared_error(returns_train, yhat_train, squared=False)
print("Train Score: %.2f RMSE" % (trainScore))
yhat_train
# Forecasting Test Set
model = ARIMA(X_train, order=(p, d, q))
result = model.fit(disp=0)
yhat_test, se, conf = result.forecast(X_test.shape[0])
test_date = df.Date[X_train.shape[0] :]
yhat_test = pd.Series(yhat_test)
lower = pd.Series(conf[:, 0])
upper = pd.Series(conf[:, 1])
returns_test = df.returns[-X_test.shape[0] :]
testScore = mean_squared_error(X_test, yhat_test, squared=False)
print("Test Score: %.2f RMSE" % (testScore))
plt.title("Forecast vs Actual")
plt.plot(test_date, X_test, label="Actual")
plt.plot(test_date, yhat_test, label="Forecast")
plt.fill_between(test_date, lower, upper, color="k", alpha=0.2)
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
yhat_test
# Here, ARIMA model is used to forecast all of the test data based on the model fittedon training set.
# The forecasted price is not that great. However, the confidence interval covers theactual price
# Rolling Forecast on Test Set
# Rolling forecast on test set
# loop through test set and append train set with each element of testset
# train arima model then forecast out-ofsample for one day ahead in the future
# keep list of forecasts
x_train = list(X_train)
x_test = list(X_test)
forecasts = list()
for i in range(X_test.shape[0]):
model = ARIMA(x_train, order=(p, d, q))
result = model.fit(disp=0)
yhat = list(result.forecast()[0])[0]
forecasts.append(yhat)
x_train.append(x_test[i])
testScore = mean_squared_error(x_test, forecasts, squared=False)
print("Test Score: %.2f RMSE" % (testScore))
plt.title("Rolling Forecast vs Actual")
plt.plot(test_date, X_test, label="Actual")
plt.plot(test_date, forecasts, label="Rolling Forecast")
plt.xlabel("Year")
plt.ylabel("Price")
plt.legend()
plt.show()
# Significant improvement in RMSE on rolling forecasts
forecasts
| false | 0 | 1,856 | 0 | 1,856 | 1,856 |
||
129914235
|
# # Imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import cv2
# Data Directories
ROOT_DIR = "/kaggle/input/airbus-ship-detection"
train_image_dir = os.path.join(ROOT_DIR, "train_v2")
test_image_dir = os.path.join(ROOT_DIR, "test_v2")
sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
# Data Loading
train = os.listdir(train_image_dir)
test = os.listdir(test_image_dir)
sample_submission_df = pd.read_csv(sample_submission_dir)
train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir)
# # Data View
first_train_image_path = os.path.join(train_image_dir, train[0])
first_train_image = cv2.imread(first_train_image_path)
first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB)
print(f"{first_train_image.shape = }\n")
plt.title(f"{train[0]}")
plt.imshow(first_train_image)
display(train_ship_segmentations_df.head(10))
num_of_total_images = train_ship_segmentations_df.ImageId.nunique()
not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels)
num_of_empty_images = (~not_empty).sum()
num_of_non_empty_images = not_empty.sum()
num_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique()
print(
f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {num_of_total_masks = }"
)
# **In conclusion the train_ship_segmentations_df contains:**
# * 192556 unique images
# * 150000 empty images
# * 42556 images with total of 81723 masks
# # Deal With RLE (run-length encoding)
# create masks data frame
masks = train_ship_segmentations_df.dropna(subset=["EncodedPixels"]).set_index(
"ImageId"
)
display(masks.head())
# we can see that the image '000155de5.jpg' has only one mask
masks["EncodedPixels"]["000155de5.jpg"]
# turn rle example into a list of ints
rle = [int(i) for i in masks["EncodedPixels"]["000155de5.jpg"].split()]
# turn list of ints into a list of (`start`, `length`) `pairs`
pairs = list(zip(rle[0:-1:2], rle[1::2]))
pairs[:3]
# **Explenation**
# The first pair (264661, 17) indicates that there is a run of 17 pixels starting from the position 264,661 in the mask. Similarly, the next two pairs represent runs of 33 pixels starting from positions 265,429 and 266,197, respectively.
# convert into (x, y) coordinate where x is the column and y is the row
first_pixel = pairs[0][0]
print(f"first pixel = {first_pixel}")
first_pixel_coordinates = (first_pixel % 768, first_pixel // 768)
print(f"first pixel (x, y) coordinates = {first_pixel_coordinates}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/914/129914235.ipynb
| null | null |
[{"Id": 129914235, "ScriptId": 38642628, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12676283, "CreationDate": "05/17/2023 11:54:46", "VersionNumber": 4.0, "Title": "Ido_Ronen_Ship_Detection", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 76.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 76.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Imports
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from PIL import Image
import cv2
# Data Directories
ROOT_DIR = "/kaggle/input/airbus-ship-detection"
train_image_dir = os.path.join(ROOT_DIR, "train_v2")
test_image_dir = os.path.join(ROOT_DIR, "test_v2")
sample_submission_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
train_ship_segmentations_dir = os.path.join(ROOT_DIR, "train_ship_segmentations_v2.csv")
# Data Loading
train = os.listdir(train_image_dir)
test = os.listdir(test_image_dir)
sample_submission_df = pd.read_csv(sample_submission_dir)
train_ship_segmentations_df = pd.read_csv(train_ship_segmentations_dir)
# # Data View
first_train_image_path = os.path.join(train_image_dir, train[0])
first_train_image = cv2.imread(first_train_image_path)
first_train_image = cv2.cvtColor(first_train_image, cv2.COLOR_BGR2RGB)
print(f"{first_train_image.shape = }\n")
plt.title(f"{train[0]}")
plt.imshow(first_train_image)
display(train_ship_segmentations_df.head(10))
num_of_total_images = train_ship_segmentations_df.ImageId.nunique()
not_empty = pd.notna(train_ship_segmentations_df.EncodedPixels)
num_of_empty_images = (~not_empty).sum()
num_of_non_empty_images = not_empty.sum()
num_of_total_masks = train_ship_segmentations_df[not_empty].ImageId.nunique()
print(
f"{num_of_total_images = } | {num_of_empty_images = } | {num_of_non_empty_images = } | {num_of_total_masks = }"
)
# **In conclusion the train_ship_segmentations_df contains:**
# * 192556 unique images
# * 150000 empty images
# * 42556 images with total of 81723 masks
# # Deal With RLE (run-length encoding)
# create masks data frame
masks = train_ship_segmentations_df.dropna(subset=["EncodedPixels"]).set_index(
"ImageId"
)
display(masks.head())
# we can see that the image '000155de5.jpg' has only one mask
masks["EncodedPixels"]["000155de5.jpg"]
# turn rle example into a list of ints
rle = [int(i) for i in masks["EncodedPixels"]["000155de5.jpg"].split()]
# turn list of ints into a list of (`start`, `length`) `pairs`
pairs = list(zip(rle[0:-1:2], rle[1::2]))
pairs[:3]
# **Explenation**
# The first pair (264661, 17) indicates that there is a run of 17 pixels starting from the position 264,661 in the mask. Similarly, the next two pairs represent runs of 33 pixels starting from positions 265,429 and 266,197, respectively.
# convert into (x, y) coordinate where x is the column and y is the row
first_pixel = pairs[0][0]
print(f"first pixel = {first_pixel}")
first_pixel_coordinates = (first_pixel % 768, first_pixel // 768)
print(f"first pixel (x, y) coordinates = {first_pixel_coordinates}")
| false | 0 | 963 | 0 | 963 | 963 |
||
129914399
|
<jupyter_start><jupyter_text>Pokemon Image Dataset
### Context
Images of all Pokemon from generation 1 to generation 7, along with their types (primary and secondary) as a csv.
### Inspiration
New evolution forms from two different Pokemon. (Create new Pokemon)
Predict Pokemon primary and secondary types from the images. Identify what types the evolution form will have based on the pre-evolved forms. Eg. from Pichu and Pikachu predict for Raichu.
### Future work/Ideas:
Merge with other information such as moves, generation, strong/weak against etc, and use the images to classify.
Kaggle dataset identifier: pokemon-images-and-types
<jupyter_script>import pandas as pd
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow import keras
from tensorflow.keras.utils import img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
# Charger le fichier CSV
data = pd.read_csv("/kaggle/input/pokemon-images-and-types/pokemon.csv")
# Trier les pokemons par ordre alphabéthique
data.sort_values(by="Name", inplace=True)
data.reset_index(inplace=True, drop=True)
# Récupérer les images
img_path = "/kaggle/input/pokemon-images-and-types/images/images/"
pokemons_dir = os.listdir(img_path)
# Trier les images par ordre alphabétique
pokemons_dir.sort()
# Ajout du chemin complet pour accéder à l'image
for i in range(0, len(pokemons_dir)):
pokemons_dir[i] = img_path + pokemons_dir[i]
# Ajout d'une colonne dans le dataframe contenant les chemins vers les images. Les datas et les images étant triés de la même manière, elles correspondront.
data["image_name"] = pokemons_dir
data.head(5)
# Prétraitement des données
image_paths = data["image_name"].values
types = data["Type1"].values
# Encoder les labels en valeurs numériques
label_encoder = LabelEncoder()
types = label_encoder.fit_transform(types)
X_train, X_test, y_train, y_test = train_test_split(
image_paths, types, test_size=0.2, random_state=42
)
# Créer un générateur d'images pour effectuer une augmentation de données
datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2)
# Définir le modèle CNN
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(120, 120, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(18, activation="softmax")) # Nombre de types de Pokémon
# Compiler le modèle
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Charger les images d'entraînement
train_images = []
for image_path in X_train:
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
img_array = tf.keras.utils.img_to_array(img)
train_images.append(img_array)
train_images = np.array(train_images)
# Charger les images de test
test_images = []
for image_path in X_test:
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
img_array = tf.keras.utils.img_to_array(img)
test_images.append(img_array)
test_images = np.array(test_images)
train_images = train_images / 255.0
test_images = test_images / 255.0
history = model.fit(
train_images, y_train, epochs=10, batch_size=32, validation_split=0.2
)
history = model.fit_generator(
generator=train_generator,
steps_per_epoch=len(train_generator),
epochs=10,
validation_data=validation_generator,
validation_steps=len(validation_generator),
)
def plot_scores(history):
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "b", label="Score apprentissage")
plt.plot(epochs, val_accuracy, "r", label="Score validation")
plt.title("Scores")
plt.legend()
plt.show()
plot_scores(history)
# Faire des prédictions sur de nouvelles images
def predict_pokemon_type(image_path):
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
plt.imshow(img)
img_array = tf.keras.utils.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = img_array / 255.0 # Normalisation
predictions = model.predict(img_array)
predicted_class = label_encoder.inverse_transform([np.argmax(predictions)])
return predicted_class[0]
# Exemple d'utilisation de la fonction predict_pokemon_type()
image_path = img_path + "squirtle" + ".png"
predicted_type = predict_pokemon_type(image_path)
print("Predicted Pokemon type:", predicted_type)
# Exemple d'utilisation de la fonction predict_pokemon_type()
image_path = img_path + "pikachu" + ".png"
predicted_type = predict_pokemon_type(image_path)
print("Predicted Pokemon type:", predicted_type)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/914/129914399.ipynb
|
pokemon-images-and-types
|
vishalsubbiah
|
[{"Id": 129914399, "ScriptId": 38611639, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297639, "CreationDate": "05/17/2023 11:56:26", "VersionNumber": 1.0, "Title": "MM LA2 Pokemon v2", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 140.0, "LinesInsertedFromPrevious": 140.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186331408, "KernelVersionId": 129914399, "SourceDatasetVersionId": 215866}]
|
[{"Id": 215866, "DatasetId": 92703, "DatasourceVersionId": 227412, "CreatorUserId": 523894, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "12/17/2018 01:01:39", "VersionNumber": 3.0, "Title": "Pokemon Image Dataset", "Slug": "pokemon-images-and-types", "Subtitle": "Pokemon image dataset", "Description": "### Context\n\nImages of all Pokemon from generation 1 to generation 7, along with their types (primary and secondary) as a csv. \n\n\n### Inspiration\nNew evolution forms from two different Pokemon. (Create new Pokemon) \n\nPredict Pokemon primary and secondary types from the images. Identify what types the evolution form will have based on the pre-evolved forms. Eg. from Pichu and Pikachu predict for Raichu. \n\n\n\n### Future work/Ideas: \nMerge with other information such as moves, generation, strong/weak against etc, and use the images to classify.\n\n### Acknowledgements\n\ndata scrapped from https://pokemondb.net/pokedex/national\n\ncover image from https://www.hjackets.com/blog/pikachu-costume-for-kids-and-adult/", "VersionNotes": "adding type 1 and 2", "TotalCompressedBytes": 2590969.0, "TotalUncompressedBytes": 2590969.0}]
|
[{"Id": 92703, "CreatorUserId": 523894, "OwnerUserId": 523894.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 215866.0, "CurrentDatasourceVersionId": 227412.0, "ForumId": 102320, "Type": 2, "CreationDate": "12/17/2018 00:55:52", "LastActivityDate": "12/17/2018", "TotalViews": 191462, "TotalDownloads": 25680, "TotalVotes": 469, "TotalKernels": 86}]
|
[{"Id": 523894, "UserName": "vishalsubbiah", "DisplayName": "Vishal Subbiah", "RegisterDate": "02/08/2016", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
import os
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow import keras
from tensorflow.keras.utils import img_to_array, load_img
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from keras.utils import to_categorical
from keras.preprocessing.image import ImageDataGenerator
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
# Charger le fichier CSV
data = pd.read_csv("/kaggle/input/pokemon-images-and-types/pokemon.csv")
# Trier les pokemons par ordre alphabéthique
data.sort_values(by="Name", inplace=True)
data.reset_index(inplace=True, drop=True)
# Récupérer les images
img_path = "/kaggle/input/pokemon-images-and-types/images/images/"
pokemons_dir = os.listdir(img_path)
# Trier les images par ordre alphabétique
pokemons_dir.sort()
# Ajout du chemin complet pour accéder à l'image
for i in range(0, len(pokemons_dir)):
pokemons_dir[i] = img_path + pokemons_dir[i]
# Ajout d'une colonne dans le dataframe contenant les chemins vers les images. Les datas et les images étant triés de la même manière, elles correspondront.
data["image_name"] = pokemons_dir
data.head(5)
# Prétraitement des données
image_paths = data["image_name"].values
types = data["Type1"].values
# Encoder les labels en valeurs numériques
label_encoder = LabelEncoder()
types = label_encoder.fit_transform(types)
X_train, X_test, y_train, y_test = train_test_split(
image_paths, types, test_size=0.2, random_state=42
)
# Créer un générateur d'images pour effectuer une augmentation de données
datagen = ImageDataGenerator(rescale=1.0 / 255.0, validation_split=0.2)
# Définir le modèle CNN
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(120, 120, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D((2, 2)))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(18, activation="softmax")) # Nombre de types de Pokémon
# Compiler le modèle
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
# Charger les images d'entraînement
train_images = []
for image_path in X_train:
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
img_array = tf.keras.utils.img_to_array(img)
train_images.append(img_array)
train_images = np.array(train_images)
# Charger les images de test
test_images = []
for image_path in X_test:
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
img_array = tf.keras.utils.img_to_array(img)
test_images.append(img_array)
test_images = np.array(test_images)
train_images = train_images / 255.0
test_images = test_images / 255.0
history = model.fit(
train_images, y_train, epochs=10, batch_size=32, validation_split=0.2
)
history = model.fit_generator(
generator=train_generator,
steps_per_epoch=len(train_generator),
epochs=10,
validation_data=validation_generator,
validation_steps=len(validation_generator),
)
def plot_scores(history):
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "b", label="Score apprentissage")
plt.plot(epochs, val_accuracy, "r", label="Score validation")
plt.title("Scores")
plt.legend()
plt.show()
plot_scores(history)
# Faire des prédictions sur de nouvelles images
def predict_pokemon_type(image_path):
img = tf.keras.utils.load_img(image_path, target_size=(120, 120))
plt.imshow(img)
img_array = tf.keras.utils.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = img_array / 255.0 # Normalisation
predictions = model.predict(img_array)
predicted_class = label_encoder.inverse_transform([np.argmax(predictions)])
return predicted_class[0]
# Exemple d'utilisation de la fonction predict_pokemon_type()
image_path = img_path + "squirtle" + ".png"
predicted_type = predict_pokemon_type(image_path)
print("Predicted Pokemon type:", predicted_type)
# Exemple d'utilisation de la fonction predict_pokemon_type()
image_path = img_path + "pikachu" + ".png"
predicted_type = predict_pokemon_type(image_path)
print("Predicted Pokemon type:", predicted_type)
| false | 1 | 1,582 | 0 | 1,739 | 1,582 |
||
129880952
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("/kaggle/input/playground-series-s3e9/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e9/test.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e9/sample_submission.csv")
train.head(4)
train.describe(include="all")
# checking the null values
var1 = train.isna().sum().sum()
var2 = test.isna().sum().sum()
if var1 == 0 and var2 == 0:
print("There are no null values in train and test.")
else:
print(f"There are {var1} null values in train")
print(f"There are {var2} null values in train")
print()
print(f"Duplicate values in train: {train[test.columns].duplicated().sum()}")
print(f"Duplicate values in test: {test.duplicated().sum()}")
print(
f"Duplicate values in train and test together: {pd.concat([train, test])[test.columns].duplicated().sum()}"
)
print()
print("Sample lines from train:")
train.tail(5)
display(train[0:6].T)
print(train.columns.tolist())
target = "Strength"
# View Train Gemstone Price Distribution
plt.figure(figsize=(10, 6))
plt.title("Train Concrete Strength Distribution", fontsize=20)
sns.histplot(data=train[target], color="teal", kde=True, bins=100)
# Mapping the correlation between the data
plt.figure(figsize=(10, 10), dpi=80)
sns.heatmap(train.corr(), cmap="PiYG", annot=True)
plt.title("Correlation Heatmap")
plt.show()
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from xgboost import XGBRegressor
from sklearn import metrics
# normalizing the data
train_scaled = preprocessing.normalize(train)
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
train_scaled.head()
y = train_scaled["Strength"]
X = train_scaled.drop("Strength", axis=1)
print(X)
print(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
print(X.shape, X_train.shape, X_test.shape)
model = XGBRegressor()
model.fit(X_train, y_train)
Train_predict = model.predict(X_train)
# Evaluation
# R squared Error
score_1 = metrics.r2_score(y_train, Train_predict)
# Mean Squared Error
score_2 = metrics.mean_absolute_error(y_train, Train_predict)
print("The R Squared error is :", score_1)
print("The Mean Absolute error is : ", score_2)
plt.scatter(y_train, Train_predict)
plt.xlabel("Actual ")
plt.ylabel("Predicted ")
plt.title("ACTUAL vs PREDICTED CONCRETE STRENGTH")
plt.show()
test_predict = model.predict(X_test)
Test_1 = metrics.r2_score(y_test, test_predict)
Test_2 = metrics.mean_absolute_error(y_test, test_predict)
print("For Test data R squared error is : ", Test_1)
print("Mean Absolute Error is : ", Test_2)
plt.scatter(y_test, test_predict)
plt.xlabel("Actual ")
plt.ylabel("Predicted ")
plt.title("ACTUAL vs PREDICTED CONCRETE STRENGTH")
plt.show()
preds_test = pd.DataFrame(test_predict).mode(axis=0).loc[0,]
submission["Strength"] = preds_test.astype(int)
submission.to_csv("regsubmission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/880/129880952.ipynb
| null | null |
[{"Id": 129880952, "ScriptId": 38583566, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11565882, "CreationDate": "05/17/2023 06:51:49", "VersionNumber": 3.0, "Title": "Regression with tabular concrete strength data", "EvaluationDate": "05/17/2023", "IsChange": false, "TotalLines": 117.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
train = pd.read_csv("/kaggle/input/playground-series-s3e9/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e9/test.csv")
submission = pd.read_csv("/kaggle/input/playground-series-s3e9/sample_submission.csv")
train.head(4)
train.describe(include="all")
# checking the null values
var1 = train.isna().sum().sum()
var2 = test.isna().sum().sum()
if var1 == 0 and var2 == 0:
print("There are no null values in train and test.")
else:
print(f"There are {var1} null values in train")
print(f"There are {var2} null values in train")
print()
print(f"Duplicate values in train: {train[test.columns].duplicated().sum()}")
print(f"Duplicate values in test: {test.duplicated().sum()}")
print(
f"Duplicate values in train and test together: {pd.concat([train, test])[test.columns].duplicated().sum()}"
)
print()
print("Sample lines from train:")
train.tail(5)
display(train[0:6].T)
print(train.columns.tolist())
target = "Strength"
# View Train Gemstone Price Distribution
plt.figure(figsize=(10, 6))
plt.title("Train Concrete Strength Distribution", fontsize=20)
sns.histplot(data=train[target], color="teal", kde=True, bins=100)
# Mapping the correlation between the data
plt.figure(figsize=(10, 10), dpi=80)
sns.heatmap(train.corr(), cmap="PiYG", annot=True)
plt.title("Correlation Heatmap")
plt.show()
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from xgboost import XGBRegressor
from sklearn import metrics
# normalizing the data
train_scaled = preprocessing.normalize(train)
train_scaled = pd.DataFrame(train_scaled, columns=train.columns)
train_scaled.head()
y = train_scaled["Strength"]
X = train_scaled.drop("Strength", axis=1)
print(X)
print(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
print(X.shape, X_train.shape, X_test.shape)
model = XGBRegressor()
model.fit(X_train, y_train)
Train_predict = model.predict(X_train)
# Evaluation
# R squared Error
score_1 = metrics.r2_score(y_train, Train_predict)
# Mean Squared Error
score_2 = metrics.mean_absolute_error(y_train, Train_predict)
print("The R Squared error is :", score_1)
print("The Mean Absolute error is : ", score_2)
plt.scatter(y_train, Train_predict)
plt.xlabel("Actual ")
plt.ylabel("Predicted ")
plt.title("ACTUAL vs PREDICTED CONCRETE STRENGTH")
plt.show()
test_predict = model.predict(X_test)
Test_1 = metrics.r2_score(y_test, test_predict)
Test_2 = metrics.mean_absolute_error(y_test, test_predict)
print("For Test data R squared error is : ", Test_1)
print("Mean Absolute Error is : ", Test_2)
plt.scatter(y_test, test_predict)
plt.xlabel("Actual ")
plt.ylabel("Predicted ")
plt.title("ACTUAL vs PREDICTED CONCRETE STRENGTH")
plt.show()
preds_test = pd.DataFrame(test_predict).mode(axis=0).loc[0,]
submission["Strength"] = preds_test.astype(int)
submission.to_csv("regsubmission.csv", index=False)
| false | 0 | 1,028 | 0 | 1,028 | 1,028 |
||
129880822
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import seaborn as sn
import os
import cv2 as cv
train_normal_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL"
train_pneumonia_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA"
test_normal_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL"
test_pneumonia_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA"
train_normal_lst = []
train_pneumonia_lst = []
test_normal_lst = []
test_pneumonia_lst = []
train_normal_len = len(os.listdir(train_normal_path))
train_pneumonia_len = len(os.listdir(train_pneumonia_path))
test_normal_len = len(os.listdir(test_normal_path))
test_pneumonia_len = len(os.listdir(test_pneumonia_path))
train_normal_len, train_pneumonia_len, test_normal_len, test_pneumonia_len
count_1 = 0
count_2 = 0
count_3 = 0
count_4 = 0
for pics in os.listdir(train_normal_path):
if count_1 <= train_normal_len:
img = cv.imread(os.path.join(train_normal_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_1 = count_1 + 1
train_normal_lst.append(img)
for pics in os.listdir(train_pneumonia_path):
if count_2 <= train_normal_len:
img = cv.imread(os.path.join(train_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_2 = count_2 + 1
train_pneumonia_lst.append(img)
for pics in os.listdir(test_normal_path):
if count_3 <= test_normal_len:
img = cv.imread(os.path.join(test_normal_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_3 = count_3 + 1
test_normal_lst.append(img)
for pics in os.listdir(test_pneumonia_path):
if count_4 <= test_normal_len:
img = cv.imread(os.path.join(test_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_4 = count_4 + 1
test_pneumonia_lst.append(img)
train_normal_np = np.array(train_normal_lst)
train_pneumonia_np = np.array(train_pneumonia_lst)
test_normal_np = np.array(test_normal_lst)
test_pneumonia_np = np.array(test_pneumonia_lst)
np.save("train_normal_np.npy", train_normal_np)
np.save("train_pneumonia_np.npy", train_pneumonia_np)
np.save("test_normal_np.npy", test_normal_np)
np.save("test_pneumonia_np.npy", test_pneumonia_np)
train_normal_np.shape, train_pneumonia_np.shape, test_normal_np.shape, test_pneumonia_np.shape
dict_label = {0: "NORMAL", 1: "PNEUMONIA"}
train_normal_label = np.ones(train_normal_np.shape[0]) * 0
train_pneumonia_label = np.ones(train_pneumonia_np.shape[0]) * 1
test_normal_label = np.ones(test_normal_np.shape[0]) * 0
test_pneumonia_label = np.ones(test_pneumonia_np.shape[0]) * 1
train_normal_label.shape, train_pneumonia_label.shape, test_normal_label.shape, test_pneumonia_label.shape
x_train = np.concatenate((train_normal_np, train_pneumonia_np), axis=0)
x_test = np.concatenate((test_normal_np, test_pneumonia_np), axis=0)
y_train = np.concatenate((train_normal_label, train_pneumonia_label), axis=0)
y_test = np.concatenate((test_normal_label, test_pneumonia_label), axis=0)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(50, activation="relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(20, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer="adam", loss=tf.keras.losses.binary_crossentropy, metrics=["accuracy"]
)
model.fit(x_train, y_train, epochs=8)
model.evaluate(x_test, y_test)
model.save("pneumonia.h5", model)
model.summary()
check_pneumonia_path = "/kaggle/input/chest-x-rays"
list_pics_folder_lst = []
for pics in os.listdir(check_pneumonia_path):
img = cv.imread(os.path.join(check_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
list_pics_folder_lst.append(img)
list_pics_folder_np = np.array(list_pics_folder_lst)
list_pics_folder_np.shape
model_pneumonia = tf.keras.models.load_model("pneumonia.h5")
model_pneumonia.summary()
import numpy as np
import h5py
import tensorflow as tf
import os
import cv2 as cv
pneumonia_img_predicted = model_pneumonia.predict(list_pics_folder_np)
pneumonia_img_predicted = np.reshape(
pneumonia_img_predicted, pneumonia_img_predicted.shape[0]
)
pneumonia_img_predicted = np.round(pneumonia_img_predicted)
pneumonia_img_predicted
count = 1
for num in pneumonia_img_predicted:
disease = dict_label[num]
print(f"img-->{count} <==> {disease}")
count = count + 1
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/880/129880822.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 129880822, "ScriptId": 38539406, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12753753, "CreationDate": "05/17/2023 06:50:35", "VersionNumber": 1.0, "Title": "Project work 4", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 193.0, "LinesInsertedFromPrevious": 193.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186286224, "KernelVersionId": 129880822, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix
import tensorflow as tf
import seaborn as sn
import os
import cv2 as cv
train_normal_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/NORMAL"
train_pneumonia_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/train/PNEUMONIA"
test_normal_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/NORMAL"
test_pneumonia_path = "/kaggle/input/chest-xray-pneumonia/chest_xray/test/PNEUMONIA"
train_normal_lst = []
train_pneumonia_lst = []
test_normal_lst = []
test_pneumonia_lst = []
train_normal_len = len(os.listdir(train_normal_path))
train_pneumonia_len = len(os.listdir(train_pneumonia_path))
test_normal_len = len(os.listdir(test_normal_path))
test_pneumonia_len = len(os.listdir(test_pneumonia_path))
train_normal_len, train_pneumonia_len, test_normal_len, test_pneumonia_len
count_1 = 0
count_2 = 0
count_3 = 0
count_4 = 0
for pics in os.listdir(train_normal_path):
if count_1 <= train_normal_len:
img = cv.imread(os.path.join(train_normal_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_1 = count_1 + 1
train_normal_lst.append(img)
for pics in os.listdir(train_pneumonia_path):
if count_2 <= train_normal_len:
img = cv.imread(os.path.join(train_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_2 = count_2 + 1
train_pneumonia_lst.append(img)
for pics in os.listdir(test_normal_path):
if count_3 <= test_normal_len:
img = cv.imread(os.path.join(test_normal_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_3 = count_3 + 1
test_normal_lst.append(img)
for pics in os.listdir(test_pneumonia_path):
if count_4 <= test_normal_len:
img = cv.imread(os.path.join(test_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
count_4 = count_4 + 1
test_pneumonia_lst.append(img)
train_normal_np = np.array(train_normal_lst)
train_pneumonia_np = np.array(train_pneumonia_lst)
test_normal_np = np.array(test_normal_lst)
test_pneumonia_np = np.array(test_pneumonia_lst)
np.save("train_normal_np.npy", train_normal_np)
np.save("train_pneumonia_np.npy", train_pneumonia_np)
np.save("test_normal_np.npy", test_normal_np)
np.save("test_pneumonia_np.npy", test_pneumonia_np)
train_normal_np.shape, train_pneumonia_np.shape, test_normal_np.shape, test_pneumonia_np.shape
dict_label = {0: "NORMAL", 1: "PNEUMONIA"}
train_normal_label = np.ones(train_normal_np.shape[0]) * 0
train_pneumonia_label = np.ones(train_pneumonia_np.shape[0]) * 1
test_normal_label = np.ones(test_normal_np.shape[0]) * 0
test_pneumonia_label = np.ones(test_pneumonia_np.shape[0]) * 1
train_normal_label.shape, train_pneumonia_label.shape, test_normal_label.shape, test_pneumonia_label.shape
x_train = np.concatenate((train_normal_np, train_pneumonia_np), axis=0)
x_test = np.concatenate((test_normal_np, test_pneumonia_np), axis=0)
y_train = np.concatenate((train_normal_label, train_pneumonia_label), axis=0)
y_test = np.concatenate((test_normal_label, test_pneumonia_label), axis=0)
x_train.shape, x_test.shape, y_train.shape, y_test.shape
model = tf.keras.Sequential(
[
tf.keras.layers.Conv2D(filters=16, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=32, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Conv2D(filters=64, kernel_size=(3, 3), activation="relu"),
tf.keras.layers.MaxPool2D(),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(50, activation="relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(20, activation="relu"),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer="adam", loss=tf.keras.losses.binary_crossentropy, metrics=["accuracy"]
)
model.fit(x_train, y_train, epochs=8)
model.evaluate(x_test, y_test)
model.save("pneumonia.h5", model)
model.summary()
check_pneumonia_path = "/kaggle/input/chest-x-rays"
list_pics_folder_lst = []
for pics in os.listdir(check_pneumonia_path):
img = cv.imread(os.path.join(check_pneumonia_path, pics))
img = cv.resize(img, (27, 27), cv.INTER_LINEAR)
img = img / 255
list_pics_folder_lst.append(img)
list_pics_folder_np = np.array(list_pics_folder_lst)
list_pics_folder_np.shape
model_pneumonia = tf.keras.models.load_model("pneumonia.h5")
model_pneumonia.summary()
import numpy as np
import h5py
import tensorflow as tf
import os
import cv2 as cv
pneumonia_img_predicted = model_pneumonia.predict(list_pics_folder_np)
pneumonia_img_predicted = np.reshape(
pneumonia_img_predicted, pneumonia_img_predicted.shape[0]
)
pneumonia_img_predicted = np.round(pneumonia_img_predicted)
pneumonia_img_predicted
count = 1
for num in pneumonia_img_predicted:
disease = dict_label[num]
print(f"img-->{count} <==> {disease}")
count = count + 1
| false | 0 | 2,198 | 0 | 2,675 | 2,198 |
||
129539074
|
<jupyter_start><jupyter_text>Football/Soccer | Bundesliga Player Database
The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset.
Kaggle dataset identifier: bundesliga-soccer-player
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
from sklearn.preprocessing import OneHotEncoder
# # Data Light Exploration
df = pd.read_csv("/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv")
df.shape
df.info()
df.head()
df.describe()
df.isnull().sum() / df.shape[0]
# # Quality
# * Index column Unnamed: 0
# * Contract_expires , Join_club are objects not data type
# * Some players have more than one nationality
# * Price and max_price have very high std
# * Check if different special characters in place of birth counts different values
# * Position split to main and specific position
# * Missing values in some columns
# * Droping columns with high null values
# * Drop rows with missing both price and max_price
# * Adding category Unknown to null values in player_agent, outfitter and place of birth
# * Filling null values in foot with the mode of the column
#
df.set_index("Unnamed: 0", inplace=True)
df.drop(columns="full_name", inplace=True)
df.contract_expires = pd.to_datetime(df.contract_expires)
df.joined_club = pd.to_datetime(df.joined_club)
diff = (df.contract_expires - df.joined_club).mode()
diff
df.contract_expires[df.contract_expires.isnull()] = df.joined_club[
df.contract_expires.isnull()
] + pd.Timedelta(days=1460)
df.contract_expires.isnull().sum()
df.nationality.value_counts()
df.place_of_birth.value_counts()
df.place_of_birth.str.contains(r"-\$").sum()
df["position"].value_counts()
df.outfitter.value_counts()
df.player_agent.value_counts()
df.player_agent.fillna("Unknown", inplace=True)
df.drop(df[df.price.isnull()].index, inplace=True)
df.outfitter.fillna("Unknown", inplace=True)
df.foot.fillna(df.foot.mode()[0], inplace=True)
df[df.foot.isnull()]
df.place_of_birth.fillna("Unknown", inplace=True)
df.club.value_counts()
df[["gen_pos", "spec_pos"]] = df.position.str.split("-", n=1, expand=True)
df.spec_pos.fillna("GK", inplace=True)
df.drop(columns="position", inplace=True)
# Players have multiple nationalities
df["nat_count"] = df.nationality.apply(lambda x: len(x.split(" ")))
df["nat_count"].value_counts()
df["nationality"] = df["nationality"].str.split(r"\xa0\xa0", expand=True)[0]
df["nationality"].value_counts()
df.info()
num_col = ["age", "height", "price", "max_price"]
cat_col = [
"nationality",
"place_of_birth",
"gen_pos",
"spec_pos",
"shirt_nr",
"foot",
"club",
"player_agent",
"outfitter",
"nat_count",
]
for col in num_col:
plt.title(col)
plt.hist(df[col])
plt.show()
df[df["price"] > 9].head(16)
df[num_col].corr()
sns.heatmap(df[num_col].corr())
sns.scatterplot(df, x="age", y="price")
plt.title("Age")
plt.show()
sns.scatterplot(df, x="height", y="price")
plt.title("Height")
plt.show()
for col in cat_col:
plt.title(col)
sns.barplot(
y=df[col].value_counts().nlargest(30).index,
x=df[col].value_counts().nlargest(30),
orient="h",
)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/539/129539074.ipynb
|
bundesliga-soccer-player
|
oles04
|
[{"Id": 129539074, "ScriptId": 38518392, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15023203, "CreationDate": "05/14/2023 16:35:22", "VersionNumber": 1.0, "Title": "notebook6f026010fd", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 129.0, "LinesInsertedFromPrevious": 129.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185703592, "KernelVersionId": 129539074, "SourceDatasetVersionId": 5668174}]
|
[{"Id": 5668174, "DatasetId": 3258253, "DatasourceVersionId": 5743664, "CreatorUserId": 12065933, "LicenseName": "Other (specified in description)", "CreationDate": "05/12/2023 07:42:13", "VersionNumber": 1.0, "Title": "Football/Soccer | Bundesliga Player Database", "Slug": "bundesliga-soccer-player", "Subtitle": "Bundesliga Player Database: Complete Profiles, Stats, and Clubs of each Player", "Description": "The Bundesliga Players dataset provides a comprehensive collection of information on every player in the German Bundesliga football league. From renowned goalkeepers to talented defenders, this dataset offers an extensive range of player details including their names, full names, ages, heights, nationalities, places of birth, prices, maximum prices, positions, shirt numbers, preferred foot, current clubs, contract expiration dates, dates of joining the clubs, player agents, and outfitters. Whether you're a passionate football fan, a sports analyst, or a fantasy football enthusiast, this dataset serves as a valuable resource for exploring and analyzing the profiles of Bundesliga players, enabling you to delve into their backgrounds, performance statistics, and club affiliations. Discover the stars of German football and gain insights into their careers with this comprehensive Bundesliga Players dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3258253, "CreatorUserId": 12065933, "OwnerUserId": 12065933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5668174.0, "CurrentDatasourceVersionId": 5743664.0, "ForumId": 3323776, "Type": 2, "CreationDate": "05/12/2023 07:42:13", "LastActivityDate": "05/12/2023", "TotalViews": 7284, "TotalDownloads": 1339, "TotalVotes": 37, "TotalKernels": 11}]
|
[{"Id": 12065933, "UserName": "oles04", "DisplayName": "Ole", "RegisterDate": "10/23/2022", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re
from sklearn.preprocessing import OneHotEncoder
# # Data Light Exploration
df = pd.read_csv("/kaggle/input/bundesliga-soccer-player/bundesliga_player.csv")
df.shape
df.info()
df.head()
df.describe()
df.isnull().sum() / df.shape[0]
# # Quality
# * Index column Unnamed: 0
# * Contract_expires , Join_club are objects not data type
# * Some players have more than one nationality
# * Price and max_price have very high std
# * Check if different special characters in place of birth counts different values
# * Position split to main and specific position
# * Missing values in some columns
# * Droping columns with high null values
# * Drop rows with missing both price and max_price
# * Adding category Unknown to null values in player_agent, outfitter and place of birth
# * Filling null values in foot with the mode of the column
#
df.set_index("Unnamed: 0", inplace=True)
df.drop(columns="full_name", inplace=True)
df.contract_expires = pd.to_datetime(df.contract_expires)
df.joined_club = pd.to_datetime(df.joined_club)
diff = (df.contract_expires - df.joined_club).mode()
diff
df.contract_expires[df.contract_expires.isnull()] = df.joined_club[
df.contract_expires.isnull()
] + pd.Timedelta(days=1460)
df.contract_expires.isnull().sum()
df.nationality.value_counts()
df.place_of_birth.value_counts()
df.place_of_birth.str.contains(r"-\$").sum()
df["position"].value_counts()
df.outfitter.value_counts()
df.player_agent.value_counts()
df.player_agent.fillna("Unknown", inplace=True)
df.drop(df[df.price.isnull()].index, inplace=True)
df.outfitter.fillna("Unknown", inplace=True)
df.foot.fillna(df.foot.mode()[0], inplace=True)
df[df.foot.isnull()]
df.place_of_birth.fillna("Unknown", inplace=True)
df.club.value_counts()
df[["gen_pos", "spec_pos"]] = df.position.str.split("-", n=1, expand=True)
df.spec_pos.fillna("GK", inplace=True)
df.drop(columns="position", inplace=True)
# Players have multiple nationalities
df["nat_count"] = df.nationality.apply(lambda x: len(x.split(" ")))
df["nat_count"].value_counts()
df["nationality"] = df["nationality"].str.split(r"\xa0\xa0", expand=True)[0]
df["nationality"].value_counts()
df.info()
num_col = ["age", "height", "price", "max_price"]
cat_col = [
"nationality",
"place_of_birth",
"gen_pos",
"spec_pos",
"shirt_nr",
"foot",
"club",
"player_agent",
"outfitter",
"nat_count",
]
for col in num_col:
plt.title(col)
plt.hist(df[col])
plt.show()
df[df["price"] > 9].head(16)
df[num_col].corr()
sns.heatmap(df[num_col].corr())
sns.scatterplot(df, x="age", y="price")
plt.title("Age")
plt.show()
sns.scatterplot(df, x="height", y="price")
plt.title("Height")
plt.show()
for col in cat_col:
plt.title(col)
sns.barplot(
y=df[col].value_counts().nlargest(30).index,
x=df[col].value_counts().nlargest(30),
orient="h",
)
plt.show()
| false | 1 | 1,004 | 0 | 1,247 | 1,004 |
||
129539906
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script># !pip install -q kaggle
# !echo ' {"username":"xxshubhamxx","key":"68e5ce493b1319f94ed00cf9af01bc96"} ' > kaggle.json
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 /root/.kaggle/kaggle.json
# !kaggle datasets download -d paultimothymooney/chest-xray-pneumonia
# !unzip -q chest-xray-pneumonia.zip
# !rm -rf chest-xray-pneumonia.zip kaggle.json
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Activation,
Conv2D,
MaxPooling2D,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import (
precision_recall_curve,
roc_curve,
accuracy_score,
confusion_matrix,
precision_score,
recall_score,
)
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# plt.style.use('fivethirtyeight')
import pickle
import os
import numpy as np
import cv2
labels = ["PNEUMONIA", "NORMAL"]
img_size = 224
def get_training_data(data_dir):
data = []
for label in labels:
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
train = get_training_data("/kaggle/input/chest-xray-pneumonia//chest_xray/train")
test = get_training_data("/kaggle/input/chest-xray-pneumonia/chest_xray/test")
val = get_training_data("/kaggle/input/chest-xray-pneumonia/chest_xray/val")
pnenumonia = 0
normal = 0
for i, j in train:
if j == 0:
pnenumonia += 1
else:
normal += 1
print("Pneumonia:", pnenumonia)
print("Normal:", normal)
print("Pneumonia - Normal:", pnenumonia - normal)
X = []
y = []
for feature, label in train:
X.append(feature)
y.append(label)
for feature, label in test:
X.append(feature)
y.append(label)
for feature, label in val:
X.append(feature)
y.append(label)
# resize data for deep learning
X = np.array(X).reshape(-1, img_size, img_size, 1)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=32
)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.20, random_state=32
)
X_train = X_train / 255
X_test = X_test / 255
X_val = X_val / 255
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=90,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
)
datagen.fit(X_train)
import autokeras as ak
clf = ak.ImageClassifier(overwrite=True, max_trials=10, num_classes=2)
# Feed the image classifier with training data.
clf.fit(X_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(X_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(X_test, y_test))
model = clf.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
try:
model.save("model_autokeras", save_format="tf")
except Exception:
model.save("model_autokeras.h5")
model.save("model_autokeras_working.h5")
# GRU is Gated Reccurent Unit Layer
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(224, 224)),
tf.keras.layers.GRU(128),
tf.keras.layers.Dense(
128,
activation="relu",
input_shape=(
28,
28,
),
),
tf.keras.layers.Dropout(0.2, input_shape=(128,)),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
def scheduler(epoch, lr):
if epoch < 8:
return lr
else:
return lr * tf.math.exp(-0.1)
my_callbacks = [
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=2),
tf.keras.callbacks.LearningRateScheduler(scheduler),
]
trainer = model.fit(
X_train,
y_train,
validation_data=(X_test, y_test),
epochs=20,
callbacks=my_callbacks,
)
plt.plot(trainer.history["loss"], label="loss")
plt.plot(trainer.history["val_loss"], label="val_loss")
plt.legend()
plt.plot(trainer.history["accuracy"], label="acc")
plt.plot(trainer.history["val_accuracy"], label="val_acc")
plt.legend()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/539/129539906.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 129539906, "ScriptId": 38518246, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8755294, "CreationDate": "05/14/2023 16:43:42", "VersionNumber": 1.0, "Title": "trial", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 171.0, "LinesInsertedFromPrevious": 171.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185705153, "KernelVersionId": 129539906, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
# !pip install -q kaggle
# !echo ' {"username":"xxshubhamxx","key":"68e5ce493b1319f94ed00cf9af01bc96"} ' > kaggle.json
# !mkdir -p ~/.kaggle
# !cp kaggle.json ~/.kaggle/
# !chmod 600 /root/.kaggle/kaggle.json
# !kaggle datasets download -d paultimothymooney/chest-xray-pneumonia
# !unzip -q chest-xray-pneumonia.zip
# !rm -rf chest-xray-pneumonia.zip kaggle.json
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Dense,
Activation,
Conv2D,
MaxPooling2D,
Flatten,
Dropout,
BatchNormalization,
)
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.metrics import (
precision_recall_curve,
roc_curve,
accuracy_score,
confusion_matrix,
precision_score,
recall_score,
)
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
# plt.style.use('fivethirtyeight')
import pickle
import os
import numpy as np
import cv2
labels = ["PNEUMONIA", "NORMAL"]
img_size = 224
def get_training_data(data_dir):
data = []
for label in labels:
path = os.path.join(data_dir, label)
class_num = labels.index(label)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_GRAYSCALE)
resized_arr = cv2.resize(img_arr, (img_size, img_size))
data.append([resized_arr, class_num])
except Exception as e:
print(e)
return np.array(data)
train = get_training_data("/kaggle/input/chest-xray-pneumonia//chest_xray/train")
test = get_training_data("/kaggle/input/chest-xray-pneumonia/chest_xray/test")
val = get_training_data("/kaggle/input/chest-xray-pneumonia/chest_xray/val")
pnenumonia = 0
normal = 0
for i, j in train:
if j == 0:
pnenumonia += 1
else:
normal += 1
print("Pneumonia:", pnenumonia)
print("Normal:", normal)
print("Pneumonia - Normal:", pnenumonia - normal)
X = []
y = []
for feature, label in train:
X.append(feature)
y.append(label)
for feature, label in test:
X.append(feature)
y.append(label)
for feature, label in val:
X.append(feature)
y.append(label)
# resize data for deep learning
X = np.array(X).reshape(-1, img_size, img_size, 1)
y = np.array(y)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=32
)
X_train, X_val, y_train, y_val = train_test_split(
X_train, y_train, test_size=0.20, random_state=32
)
X_train = X_train / 255
X_test = X_test / 255
X_val = X_val / 255
datagen = ImageDataGenerator(
featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=90,
zoom_range=0.1,
width_shift_range=0.1,
height_shift_range=0.1,
horizontal_flip=True,
vertical_flip=True,
)
datagen.fit(X_train)
import autokeras as ak
clf = ak.ImageClassifier(overwrite=True, max_trials=10, num_classes=2)
# Feed the image classifier with training data.
clf.fit(X_train, y_train, epochs=10)
# Predict with the best model.
predicted_y = clf.predict(X_test)
print(predicted_y)
# Evaluate the best model with testing data.
print(clf.evaluate(X_test, y_test))
model = clf.export_model()
print(type(model)) # <class 'tensorflow.python.keras.engine.training.Model'>
try:
model.save("model_autokeras", save_format="tf")
except Exception:
model.save("model_autokeras.h5")
model.save("model_autokeras_working.h5")
# GRU is Gated Reccurent Unit Layer
model = tf.keras.Sequential(
[
tf.keras.Input(shape=(224, 224)),
tf.keras.layers.GRU(128),
tf.keras.layers.Dense(
128,
activation="relu",
input_shape=(
28,
28,
),
),
tf.keras.layers.Dropout(0.2, input_shape=(128,)),
tf.keras.layers.Dense(10, activation="softmax"),
]
)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
def scheduler(epoch, lr):
if epoch < 8:
return lr
else:
return lr * tf.math.exp(-0.1)
my_callbacks = [
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=2),
tf.keras.callbacks.LearningRateScheduler(scheduler),
]
trainer = model.fit(
X_train,
y_train,
validation_data=(X_test, y_test),
epochs=20,
callbacks=my_callbacks,
)
plt.plot(trainer.history["loss"], label="loss")
plt.plot(trainer.history["val_loss"], label="val_loss")
plt.legend()
plt.plot(trainer.history["accuracy"], label="acc")
plt.plot(trainer.history["val_accuracy"], label="val_acc")
plt.legend()
| false | 0 | 1,629 | 0 | 2,105 | 1,629 |
||
129539449
|
<jupyter_start><jupyter_text>Top 3 snakes Eastern Europe
This dataset is a comprehensive collection of high-quality images of three of the most common and dangerous snakes found in Eastern Europe - Natrix, Viperas, and Copperheads. This dataset includes approximately 475 images for each class, providing a rich and diverse set of images for use in training and testing classification models.
The dataset also includes a random subset of images, which can be used for a range of purposes, such as data augmentation or as an additional class for a more complex classification problem. The images are of varying sizes and resolutions, ensuring that the dataset is representative of real-world scenarios.
Kaggle dataset identifier: top-3-snakes-eastern-europe
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os, urllib, itertools, shutil, random
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from sklearn.metrics import confusion_matrix
import PIL
path_to_folder = "/kaggle/input/top-3-snakes-eastern-europe/snakes_dataset"
os.chdir(path_to_folder)
# Function to plot the confusion matrix
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
# Function to get random samples from test set
def get_random_test_imgs():
random_test_imgs = []
for idx, _ in enumerate(classes):
if idx == 0:
row = random.sample(range(0, test_for_class - 1), columns)
else:
row = random.sample(
range(test_for_class * idx, test_for_class * (idx + 1) - 1), columns
)
random_test_imgs.extend(row)
return random_test_imgs
classes = ["natrix", "vipera", "coronella", "random"]
# Get images distribution between classes and train/test/validation
def get_sets_amount(valid_x, test_x):
count_images = 0
folders = [x for x in os.listdir(path_to_folder) if not x.startswith(".")]
for folder in folders:
path = os.path.join(path_to_folder, folder)
for image in os.listdir(path):
image_path = os.path.join(path, image)
if os.path.isfile(image_path) and not image.startswith("."):
count_images += 1
valid_amount = int(count_images * valid_x)
test_amount = int(count_images * test_x)
train_amount = count_images - valid_amount - test_amount
return train_amount, valid_amount, test_amount
train_amount, valid_amount, test_amount = get_sets_amount(0.15, 0.05)
valid_for_class = round(valid_amount / len(classes))
test_for_class = round(test_amount / len(classes))
print(
f"Train images: {train_amount}\nValid images: {valid_amount}\nTest images: {test_amount}"
)
# Divide images into train, valid and test
# Distribute between folders
os.chdir(path_to_folder)
if os.path.isdir("/kaggle/working/train") is False:
if not os.path.exists("/kaggle/working/valid"):
os.mkdir("/kaggle/working/valid")
if not os.path.exists("/kaggle/working/test"):
os.mkdir("/kaggle/working/test")
for name in classes:
shutil.copytree(f"{name}_images", f"/kaggle/working/train/{name}")
if not os.path.exists(f"/kaggle/working/valid/{name}"):
os.mkdir(f"/kaggle/working/valid/{name}")
if not os.path.exists(f"/kaggle/working/test/{name}"):
os.mkdir(f"/kaggle/working/test/{name}")
valid_samples = random.sample(
os.listdir(f"/kaggle/working/train/{name}"),
round(valid_amount / len(classes)),
)
for j in valid_samples:
shutil.move(
f"/kaggle/working/train/{name}/{j}", f"/kaggle/working/valid/{name}"
)
test_samples = random.sample(
os.listdir(f"/kaggle/working/train/{name}"),
round(test_amount / len(classes)),
)
for k in test_samples:
shutil.move(
f"/kaggle/working/train/{name}/{k}", f"/kaggle/working/test/{name}"
)
print("Created train, valid and test directories")
os.chdir("../..")
# Create generators train, valid and test
# Preprocess images for mobilenet
batch_size = 20
train_path = os.path.join(path_to_folder, "/kaggle/working/train")
valid_path = os.path.join(path_to_folder, "/kaggle/working/valid")
test_path = os.path.join(path_to_folder, "/kaggle/working/test")
target_size = (224, 224)
train_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=train_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
)
valid_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=valid_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
)
test_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=test_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
shuffle=False,
)
# Load pretrained model
mobile_net = keras.applications.DenseNet121()
# Let's look at this model, choose which last layers to replace
mobile_net.summary()
# Leave all layers except for last 5
# Add output layer with activation function softmax for defining the class
x = mobile_net.layers[-1].output
output = Dense(len(classes), activation="softmax")(x)
model = Model(inputs=mobile_net.input, outputs=output)
# When training we will work with weights of last 23 layers
for layer in model.layers[:-24]:
layer.trainable = False
# Define early stopping
early_stopping = keras.callbacks.EarlyStopping(
patience=7,
min_delta=0.001,
restore_best_weights=True,
)
# Compile the model
model.compile(optimizer="Adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Train model
history = model.fit(
x=train_batches,
validation_data=valid_batches,
epochs=20,
callbacks=[early_stopping],
)
# Analyze loss functions (train and valid)
# We need to minimize val_loss
# without overfitting it on train set
history_df = pd.DataFrame(history.history)
history_df.loc[:, ["loss", "val_loss"]].plot(title="Cross-entropy")
history_df.loc[:, ["accuracy", "val_accuracy"]].plot(title="Accuracy")
# Check accuracy for images, which model hasn't seen before
scores = model.evaluate(test_batches)
print(f"Accuracy: {round((scores[1]*100), 2)}%")
# Plot confusion matrix
preds = model.predict(test_batches)
cm = confusion_matrix(y_true=test_batches.classes, y_pred=np.argmax(preds, axis=1))
plot_confusion_matrix(cm=cm, classes=classes, title="Confusion Matrix")
preds_catigorical = pd.Series(np.argmax(preds, axis=1)).replace(
{0: "Natrix", 1: "Vipera", 2: "Coronella", 3: "No snakes"}
)
columns = 5
fig, ax = plt.subplots(nrows=len(classes), ncols=columns, figsize=[16, 10])
random_test_imgs = get_random_test_imgs()
for i, axi in enumerate(ax.flat):
image = PIL.Image.open(test_batches.filepaths[random_test_imgs[i]])
axi.imshow(image)
axi.set_title(preds_catigorical[random_test_imgs[i]])
axi.axis("off")
plt.tight_layout()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/539/129539449.ipynb
|
top-3-snakes-eastern-europe
|
tornadoski
|
[{"Id": 129539449, "ScriptId": 38514328, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14589472, "CreationDate": "05/14/2023 16:38:54", "VersionNumber": 1.0, "Title": "Classifying Snakes", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 185.0, "LinesInsertedFromPrevious": 185.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185704318, "KernelVersionId": 129539449, "SourceDatasetVersionId": 5566072}]
|
[{"Id": 5566072, "DatasetId": 3204559, "DatasourceVersionId": 5640875, "CreatorUserId": 14589472, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "04/30/2023 13:37:14", "VersionNumber": 1.0, "Title": "Top 3 snakes Eastern Europe", "Slug": "top-3-snakes-eastern-europe", "Subtitle": "Natrix, Viperas and Copperheads images", "Description": "This dataset is a comprehensive collection of high-quality images of three of the most common and dangerous snakes found in Eastern Europe - Natrix, Viperas, and Copperheads. This dataset includes approximately 475 images for each class, providing a rich and diverse set of images for use in training and testing classification models.\n\nThe dataset also includes a random subset of images, which can be used for a range of purposes, such as data augmentation or as an additional class for a more complex classification problem. The images are of varying sizes and resolutions, ensuring that the dataset is representative of real-world scenarios.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3204559, "CreatorUserId": 14589472, "OwnerUserId": 14589472.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5566072.0, "CurrentDatasourceVersionId": 5640875.0, "ForumId": 3269236, "Type": 2, "CreationDate": "04/30/2023 13:37:14", "LastActivityDate": "04/30/2023", "TotalViews": 104, "TotalDownloads": 10, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 14589472, "UserName": "tornadoski", "DisplayName": "Ivan Smirnov", "RegisterDate": "04/11/2023", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os, urllib, itertools, shutil, random
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Model
from sklearn.metrics import confusion_matrix
import PIL
path_to_folder = "/kaggle/input/top-3-snakes-eastern-europe/snakes_dataset"
os.chdir(path_to_folder)
# Function to plot the confusion matrix
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
cm[i, j],
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
# Function to get random samples from test set
def get_random_test_imgs():
random_test_imgs = []
for idx, _ in enumerate(classes):
if idx == 0:
row = random.sample(range(0, test_for_class - 1), columns)
else:
row = random.sample(
range(test_for_class * idx, test_for_class * (idx + 1) - 1), columns
)
random_test_imgs.extend(row)
return random_test_imgs
classes = ["natrix", "vipera", "coronella", "random"]
# Get images distribution between classes and train/test/validation
def get_sets_amount(valid_x, test_x):
count_images = 0
folders = [x for x in os.listdir(path_to_folder) if not x.startswith(".")]
for folder in folders:
path = os.path.join(path_to_folder, folder)
for image in os.listdir(path):
image_path = os.path.join(path, image)
if os.path.isfile(image_path) and not image.startswith("."):
count_images += 1
valid_amount = int(count_images * valid_x)
test_amount = int(count_images * test_x)
train_amount = count_images - valid_amount - test_amount
return train_amount, valid_amount, test_amount
train_amount, valid_amount, test_amount = get_sets_amount(0.15, 0.05)
valid_for_class = round(valid_amount / len(classes))
test_for_class = round(test_amount / len(classes))
print(
f"Train images: {train_amount}\nValid images: {valid_amount}\nTest images: {test_amount}"
)
# Divide images into train, valid and test
# Distribute between folders
os.chdir(path_to_folder)
if os.path.isdir("/kaggle/working/train") is False:
if not os.path.exists("/kaggle/working/valid"):
os.mkdir("/kaggle/working/valid")
if not os.path.exists("/kaggle/working/test"):
os.mkdir("/kaggle/working/test")
for name in classes:
shutil.copytree(f"{name}_images", f"/kaggle/working/train/{name}")
if not os.path.exists(f"/kaggle/working/valid/{name}"):
os.mkdir(f"/kaggle/working/valid/{name}")
if not os.path.exists(f"/kaggle/working/test/{name}"):
os.mkdir(f"/kaggle/working/test/{name}")
valid_samples = random.sample(
os.listdir(f"/kaggle/working/train/{name}"),
round(valid_amount / len(classes)),
)
for j in valid_samples:
shutil.move(
f"/kaggle/working/train/{name}/{j}", f"/kaggle/working/valid/{name}"
)
test_samples = random.sample(
os.listdir(f"/kaggle/working/train/{name}"),
round(test_amount / len(classes)),
)
for k in test_samples:
shutil.move(
f"/kaggle/working/train/{name}/{k}", f"/kaggle/working/test/{name}"
)
print("Created train, valid and test directories")
os.chdir("../..")
# Create generators train, valid and test
# Preprocess images for mobilenet
batch_size = 20
train_path = os.path.join(path_to_folder, "/kaggle/working/train")
valid_path = os.path.join(path_to_folder, "/kaggle/working/valid")
test_path = os.path.join(path_to_folder, "/kaggle/working/test")
target_size = (224, 224)
train_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=train_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
)
valid_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=valid_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
)
test_batches = ImageDataGenerator(
preprocessing_function=keras.applications.densenet.preprocess_input
).flow_from_directory(
directory=test_path,
target_size=target_size,
classes=classes,
batch_size=batch_size,
shuffle=False,
)
# Load pretrained model
mobile_net = keras.applications.DenseNet121()
# Let's look at this model, choose which last layers to replace
mobile_net.summary()
# Leave all layers except for last 5
# Add output layer with activation function softmax for defining the class
x = mobile_net.layers[-1].output
output = Dense(len(classes), activation="softmax")(x)
model = Model(inputs=mobile_net.input, outputs=output)
# When training we will work with weights of last 23 layers
for layer in model.layers[:-24]:
layer.trainable = False
# Define early stopping
early_stopping = keras.callbacks.EarlyStopping(
patience=7,
min_delta=0.001,
restore_best_weights=True,
)
# Compile the model
model.compile(optimizer="Adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Train model
history = model.fit(
x=train_batches,
validation_data=valid_batches,
epochs=20,
callbacks=[early_stopping],
)
# Analyze loss functions (train and valid)
# We need to minimize val_loss
# without overfitting it on train set
history_df = pd.DataFrame(history.history)
history_df.loc[:, ["loss", "val_loss"]].plot(title="Cross-entropy")
history_df.loc[:, ["accuracy", "val_accuracy"]].plot(title="Accuracy")
# Check accuracy for images, which model hasn't seen before
scores = model.evaluate(test_batches)
print(f"Accuracy: {round((scores[1]*100), 2)}%")
# Plot confusion matrix
preds = model.predict(test_batches)
cm = confusion_matrix(y_true=test_batches.classes, y_pred=np.argmax(preds, axis=1))
plot_confusion_matrix(cm=cm, classes=classes, title="Confusion Matrix")
preds_catigorical = pd.Series(np.argmax(preds, axis=1)).replace(
{0: "Natrix", 1: "Vipera", 2: "Coronella", 3: "No snakes"}
)
columns = 5
fig, ax = plt.subplots(nrows=len(classes), ncols=columns, figsize=[16, 10])
random_test_imgs = get_random_test_imgs()
for i, axi in enumerate(ax.flat):
image = PIL.Image.open(test_batches.filepaths[random_test_imgs[i]])
axi.imshow(image)
axi.set_title(preds_catigorical[random_test_imgs[i]])
axi.axis("off")
plt.tight_layout()
plt.show()
| false | 0 | 2,130 | 1 | 2,296 | 2,130 |
||
129963401
|
<jupyter_start><jupyter_text>MIAS Mammography ROIs
This dataset is a preprocessed version of the original MIAS (Mammographic Image Analysis Society) dataset. It contains 1,679 images with the labels:
- normal (0)
- benign (1)
- malignant (2).
All images were preprocessed by removing artifacts, such as labels and enhancing the images using CLAHE (Contrast Limited AHE). For abnormal images (benign and malignant), the region of interest (ROI) was extracted using the x/y coordinates and radius provided by the original MIAS dataset, and a central breast area was used for normal images.
All training images were augmented to increase the dataset size by using rotation (90°, 180°, 270°), vertical flipping, random bightness and contrast changes, augmenting the training data by a factor of 16. Finally, the training dataset was balanced, resulting in 528 training images per class.
The dataset consists of a total of 1584 training images, 47 validation images, and 48 testing images.
The images were resized to 224 x 224 pixels and are available in .npy format.
The original authors are Suckling et al. (2015) and a modified version, published on https://www.kaggle.com/datasets/kmader/mias-mammography was used to create this dataset.
The dataset was obtained under the CC BY 2.0 license (https://creativecommons.org/licenses/by/2.0/)
**Acknowledgements/LICENCE**
MAMMOGRAPHIC IMAGE ANALYSIS SOCIETY
MiniMammographic Database
LICENCE AGREEMENT
This is a legal agreement between you, the end user and the
Mammographic Image Analysis Society ("MIAS"). Upon installing the
MiniMammographic database (the "DATABASE") on your system you are
agreeing to be bound by the terms of this Agreement.
GRANT OF LICENCE
MIAS grants you the right to use the DATABASE, for research purposes
ONLY. For this purpose, you may edit, format, or otherwise modify the
DATABASE provided that the unmodified portions of the DATABASE included
in a modified work shall remain subject to the terms of this Agreement.
COPYRIGHT
The DATABASE is owned by MIAS and is protected by United Kingdom
copyright laws, international treaty provisions and all other
applicable national laws. Therefore you must treat the DATABASE
like any other copyrighted material. If the DATABASE is used in any
publications then reference must be made to the DATABASE within that
publication.
OTHER RESTRICTIONS
You may not rent, lease or sell the DATABASE.
LIABILITY
To the maximum extent permitted by applicable law, MIAS shall not
be liable for damages, other than death or personal injury,
whatsoever (including without limitation, damages for negligence,
loss of business, profits, business interruption, loss of
business information, or other pecuniary loss) arising out of the
use of or inability to use this DATABASE, even if MIAS has been
advised of the possibility of such damages. In any case, MIAS's
entire liability under this Agreement shall be limited to the
amount actually paid by you or your assignor, as the case may be,
for the DATABASE.
Kaggle dataset identifier: mias-mammography-rois
<jupyter_script># ## CNN model
## setting random seed
import random
random.seed(44)
print(random.random())
# ## Gridsearch over class_weights
## import modules
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy as np
from keras.utils import np_utils
from imblearn.over_sampling import RandomOverSampler
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten
from keras import optimizers
from keras import losses
from tensorflow.keras.layers import AveragePooling2D
import cv2
from sklearn import metrics
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from keras.models import load_model
from tensorflow.keras.initializers import he_uniform
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, balanced_accuracy_score
from scikeras.wrappers import KerasClassifier
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import pickle
import matplotlib.pyplot as plt
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import balanced_accuracy_score
## loading data
x_train = np.load("created_data/MIAS/patches/MIAS_X_train_roi_multi.npy") / 255
y_train = np.load("created_data/MIAS/patches/MIAS_y_train_roi_multi.npy")
x_valid = np.load("created_data/MIAS/patches/MIAS_X_valid_roi_multi.npy") / 255
y_valid = np.load("created_data/MIAS/patches/MIAS_y_valid_roi_multi.npy")
x_test = np.load("created_data/MIAS/patches/MIAS_X_test_roi_multi.npy") / 255
y_test = np.load("created_data/MIAS/patches/MIAS_y_test_roi_multi.npy")
## printing class counts
print(np.unique(y_train, return_counts=True))
# One-hot encode labels
num_classes = 3
y_train = np_utils.to_categorical(y_train, num_classes)
y_valid = np_utils.to_categorical(y_valid, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
## define class_weights to search over
class_weights = [{0: 1, 1: 1, 2: 1}, {0: 1, 1: 2, 2: 3}, {0: 2, 1: 3, 2: 4}]
## define model names for each class_weight choice
model_names = ["model1.h5", "model2.h5", "model3.h5"]
## store best balanced accuracy and class_weight
best_acc = 0
best_class_weight = None
for w, name in zip(class_weights, model_names):
print(f"testing weight: {w}")
# Define model
model = Sequential()
# Add layers to model
## first convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
input_shape=(224, 224, 3),
strides=1,
kernel_initializer=he_uniform(),
name="conv2d",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d"))
## second convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
strides=1,
kernel_initializer=he_uniform(),
name="conv2d_1",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_1"))
## third convolutional layer and 2 maxpooling layers
model.add(
Conv2D(
14,
(3, 3),
activation="relu",
strides=1,
padding="SAME",
kernel_initializer=he_uniform(),
name="conv2d_2",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_2"))
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_3"))
## flattening model
model.add(Flatten(name="flatten"))
## 3 dense layers
model.add(
Dense(512, activation="relu", kernel_initializer=he_uniform(), name="dense")
)
model.add(
Dense(256, activation="relu", kernel_initializer=he_uniform(), name="dense_1")
)
model.add(
Dense(128, activation="relu", kernel_initializer=he_uniform(), name="dense_2")
)
## dropout
model.add(Dropout(0.5))
## output layer
model.add(
Dense(3, activation="softmax", kernel_initializer=he_uniform(), name="dense_3")
)
lr = 0.0001 # set the learning rate
adam = Adam(learning_rate=lr) ## adam optimizer
## compile model
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
# Define early stopping callback
early_stop = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
## save model with best validation loss
model_checkpoint = ModelCheckpoint(name, save_best_only=True, verbose=1)
# Train model with early stopping
history = model.fit(
x_train,
y_train,
epochs=50,
batch_size=16,
validation_data=(x_valid, y_valid),
class_weight=w,
callbacks=[early_stop, model_checkpoint],
)
## load best model
model = load_model(name)
# Evaluate model on validation set using loss and overall accuracy
valid_loss, valid_acc = model.evaluate(x_valid, y_valid, verbose=0)
print("validation loss: ", valid_loss)
print("validation accuracy: ", valid_acc)
## getting predictions for validation set
y_pred = model.predict(x_valid)
preds_class = np.argmax(y_pred, axis=1)
obs = np.argmax(y_valid, axis=1)
## compute balanced accuracy
balanced_acc = balanced_accuracy_score(obs, preds_class)
print("balanced accuracy: ", balanced_acc)
## compute confusion matrix
cf = confusion_matrix(obs, preds_class)
print(cf)
## compare balanced_accuracy and store best weight and best balanced accuracy
if balanced_acc > best_acc:
best_acc = balanced_acc
best_class_weight = w
best_model = name
print("new best weight is:", w)
print("new best balanced acc is: ", balanced_acc)
# ## Results of best model on validation set
## extracting best weights: {0:1, 1:2, 2:3 } with best balanced accuracy 0.634
print(
f"the best balanced accuracy is {best_acc} with the class weight {best_class_weight}"
)
## loading the best model
model = load_model("model2.h5")
## evaluate model on validation set
report = evaluate_model(model, x_valid, y_valid)
print(report)
## getting predictions on validation set
y_pred = model.predict(x_valid)
preds_class = np.argmax(y_pred, axis=1)
obs = np.argmax(y_valid, axis=1)
## compute balanced accuracy
balanced_acc = balanced_accuracy_score(obs, preds_class)
print("balanced accuracy: ", balanced_acc)
## confusion matrix
cf = confusion_matrix(obs, preds_class)
print(cf)
# ## Running model with best weights and averaging the confusion matrix
## fitting best model and averaging confusion matrix over 10 runs
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy as np
from keras.utils import np_utils
from imblearn.over_sampling import RandomOverSampler
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten
from keras import optimizers
from keras import losses
from tensorflow.keras.layers import AveragePooling2D
import cv2
from sklearn import metrics
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import pickle
## loading data
x_train = np.load("created_data/MIAS/patches/MIAS_X_train_roi_multi.npy") / 255
y_train = np.load("created_data/MIAS/patches/MIAS_y_train_roi_multi.npy")
x_valid = np.load("created_data/MIAS/patches/MIAS_X_valid_roi_multi.npy") / 255
y_valid = np.load("created_data/MIAS/patches/MIAS_y_valid_roi_multi.npy")
x_test = np.load("created_data/MIAS/patches/MIAS_X_test_roi_multi.npy") / 255
y_test = np.load("created_data/MIAS/patches/MIAS_y_test_roi_multi.npy")
## printing class counts for training set
print(np.unique(y_train, return_counts=True))
# One-hot encode labels
num_classes = 3
y_train = np_utils.to_categorical(y_train, num_classes)
y_valid = np_utils.to_categorical(y_valid, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
## define number of runs
n_runs = 10
## list for appending confusion matrices and metrices over n_runs
confusion_matrices = []
metrices = []
## run the model n_runs times
for n in range(n_runs):
print(f"run number {n}")
# Define model
model = Sequential()
# Add layers to model
## first convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
input_shape=(224, 224, 3),
strides=1,
kernel_initializer=he_uniform(),
name="conv2d",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d"))
## second convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
strides=1,
kernel_initializer=he_uniform(),
name="conv2d_1",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_1"))
## third convolutional layer and one maxpooling layer
model.add(
Conv2D(
14,
(3, 3),
activation="relu",
strides=1,
padding="SAME",
kernel_initializer=he_uniform(),
name="conv2d_2",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_2"))
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_3"))
## Flattening layer
model.add(Flatten(name="flatten"))
## Three dense layers
model.add(
Dense(512, activation="relu", kernel_initializer=he_uniform(), name="dense")
)
model.add(
Dense(256, activation="relu", kernel_initializer=he_uniform(), name="dense_1")
)
model.add(
Dense(128, activation="relu", kernel_initializer=he_uniform(), name="dense_2")
)
## Dropout
model.add(Dropout(0.5))
## Output layer
model.add(
Dense(3, activation="softmax", kernel_initializer=he_uniform(), name="dense_3")
)
lr = 0.0001 # set the learning rate
adam = Adam(learning_rate=lr)
# Compile model
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
# Define early stopping callback
early_stop = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
## store first model separately
if n == 0:
model_checkpoint = ModelCheckpoint(
"best_model.h5", save_best_only=True, verbose=1
)
else: ## other models get stored per run but overwrite each other
model_checkpoint = ModelCheckpoint("mod_.h5", save_best_only=True, verbose=1)
# Train model with early stopping
history = model.fit(
x_train,
y_train,
epochs=50,
batch_size=16,
validation_data=(x_valid, y_valid),
class_weight=best_class_weight,
callbacks=[early_stop, model_checkpoint],
)
## load best model
from keras.models import load_model
if n == 0:
model = load_model("best_model.h5")
else:
model = load_model("mod_.h5")
# Evaluate model
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print("Test loss: ", test_loss)
print("Test accuracy: ", test_acc)
if n == 0: ## store history for first model
with open("history_best_model.pkl", "wb") as f:
pickle.dump(history.history, f)
## evaluate model on test set
report = evaluate_model(model, x_test, y_test)
print(report)
## Get predictions for test est
y_pred = model.predict(x_test)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = np.argmax(y_test, axis=1)
# Calculate confusion matrix for test data
cm = confusion_matrix(y_true, y_pred_classes)
print(cm)
class_names = ["Class 0", "Class 1", "Class 2"]
## calculate classification report for test data
report = classification_report(
y_true, y_pred_classes, target_names=class_names, output_dict=True
)
## append confusion matrices and classification report
confusion_matrices.append(cm)
metrices.append(report)
# ## Create dataframe of averaged confusion matrices
class_names = ["Class 0", "Class 1", "Class 2"]
total_cm = np.zeros((len(class_names), len(class_names)))
# Loop over the classification reports and confusion matrices and add them to the running totals
for cm in confusion_matrices:
# Add the confusion matrix to the running total
total_cm += cm
avg_cm = total_cm / len(confusion_matrices)
print(avg_cm)
print(type(avg_cm))
# Create confusion matrix
plt.figure(figsize=(5, 5))
class_names = ["Normal", "Benign", "Malignant"]
sns.heatmap(
avg_cm,
annot=True,
fmt="g",
cmap="Blues",
square=True,
cbar=False,
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted classes")
plt.ylabel("True classes")
plt.show()
## save avg confusion matrix
# Save the matrix to a file
np.save("avg_cm.npy", avg_cm)
# Load the matrix from the file
avg_cm = np.load("avg_cm.npy")
print(avg_cm)
# ## Calculating some metrices based on the averaged confusion matrix
## loading confusion matrix
conf_mat = np.load("avg_cm.npy")
print(conf_mat)
# Calculate total number of predictions
total_predictions = np.sum(conf_mat)
# Calculate number of correct predictions
correct_predictions = np.sum(np.diag(conf_mat))
# Calculate overall accuracy
accuracy = correct_predictions / total_predictions
print("Overall Accuracy: {:.3f}".format(accuracy))
# Calculate balanced accuracy
class_counts = np.sum(
conf_mat, axis=1
) ## true instances per class (row marginal - sum over cols)
class_correct_predictions = np.diag(conf_mat) ## true positives (sum over diagoal)
class_accuracy = (
class_correct_predictions / class_counts.flatten()
) ## calculate accuracy per class = tp/tp + fn
balanced_accuracy = np.mean(
class_accuracy
) ## averaging over all class_accuracies = balanced accuracy
print("Balanced Accuracy: {:.3f}".format(balanced_accuracy))
# Compute metrics for each class
metrics_per_class = {} ## dict to store metrices
for i in range(len(conf_mat)):
tp = conf_mat[i, i] # true positive
fp = (
np.sum(conf_mat[:, i]) - tp
) # false positive (predicted class i but not class i)
fn = (
np.sum(conf_mat[i, :]) - tp
) # false negatives (not predicted class i but class i)
precision = tp / (
tp + fp
) ## precision of class i = tp / (tp + fp) -> correctly classified out of all predicted
recall = tp / (
tp + fn
) ## recall of class i = tp / (tp + fn) -> correctly classified out of all instances of class i
f1_score = (
2 * (precision * recall) / (precision + recall)
) ## f1-score = harmonic mean of precision and recall
## store metrices per class
metrics_per_class[f"Class {i}"] = {
"Precision": precision,
"Recall": recall,
"F1-score": f1_score,
}
## define dict to store macro averages
macro_metrics = {}
# loop over classes
for class_name, metrics in metrics_per_class.items():
print(f"{class_name}:")
for metric_name, metric_value in metrics.items(): ## loop over metrices
print(f"\t{metric_name}: {metric_value:.3f}") ## print metrics for each class
macro_metrics[metric_name] = (
macro_metrics.get(metric_name, 0) + metric_value
) ## add class-wise metrics to total count
## loop over metrices and print macro average
for k, v in macro_metrics.items():
macro_metrics[k] = v / len(conf_mat[0])
print(f"Macro {k}, {round(macro_metrics[k],3)} ")
# ## Generate confusion matrix of first model and plot history
## load model
model = load_model("best_model.h5")
## get predictions on test set
preds = model.predict(x_test)
preds_class = np.argmax(preds, axis=1)
obs = np.argmax(y_test, axis=1)
## generate confusion matrix for best model
cf = confusion_matrix(obs, preds_class)
print(cf)
## [[29 1 1]
## [ 0 6 3]
## [ 1 4 3]]
## plot model accuracy on train and validation set
with open("history_best_model.pkl", "rb") as f:
history = pickle.load(f)
plt.plot(history["accuracy"])
plt.plot(history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
## plot model loss on train and validation set
with open("history_best_model.pkl", "rb") as f:
history = pickle.load(f)
plt.plot(history["loss"])
plt.plot(history["val_loss"])
plt.title("Model loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# ## Evaluate first model
report = evaluate_model(model, x_test, y_test)
print(report)
# ## Get model summary
# ###### model.summary()
# ## Plot ROC curve of model on test set
plot_roc_curve(model, x_test, y_test)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/963/129963401.ipynb
|
mias-mammography-rois
|
annkristinbalve
|
[{"Id": 129963401, "ScriptId": 38659947, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12818448, "CreationDate": "05/17/2023 18:40:57", "VersionNumber": 2.0, "Title": "notebook92bdb9132c", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 463.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 463.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186400075, "KernelVersionId": 129963401, "SourceDatasetVersionId": 5708327}]
|
[{"Id": 5708327, "DatasetId": 3281666, "DatasourceVersionId": 5784400, "CreatorUserId": 12818448, "LicenseName": "Other (specified in description)", "CreationDate": "05/17/2023 13:16:48", "VersionNumber": 1.0, "Title": "MIAS Mammography ROIs", "Slug": "mias-mammography-rois", "Subtitle": "A modified version of the MIAS dataset containing preprocessed, augmented ROIs", "Description": "This dataset is a preprocessed version of the original MIAS (Mammographic Image Analysis Society) dataset. It contains 1,679 images with the labels: \n- normal (0)\n- benign (1)\n- malignant (2). \n\nAll images were preprocessed by removing artifacts, such as labels and enhancing the images using CLAHE (Contrast Limited AHE). For abnormal images (benign and malignant), the region of interest (ROI) was extracted using the x/y coordinates and radius provided by the original MIAS dataset, and a central breast area was used for normal images. \n\nAll training images were augmented to increase the dataset size by using rotation (90\u00b0, 180\u00b0, 270\u00b0), vertical flipping, random bightness and contrast changes, augmenting the training data by a factor of 16. Finally, the training dataset was balanced, resulting in 528 training images per class.\n\nThe dataset consists of a total of 1584 training images, 47 validation images, and 48 testing images. \n\nThe images were resized to 224 x 224 pixels and are available in .npy format. \n\nThe original authors are Suckling et al. (2015) and a modified version, published on https://www.kaggle.com/datasets/kmader/mias-mammography was used to create this dataset. \n\nThe dataset was obtained under the CC BY 2.0 license (https://creativecommons.org/licenses/by/2.0/) \n\n**Acknowledgements/LICENCE**\n\nMAMMOGRAPHIC IMAGE ANALYSIS SOCIETY\nMiniMammographic Database\nLICENCE AGREEMENT\nThis is a legal agreement between you, the end user and the\nMammographic Image Analysis Society (\"MIAS\"). Upon installing the\nMiniMammographic database (the \"DATABASE\") on your system you are\nagreeing to be bound by the terms of this Agreement.\n\nGRANT OF LICENCE\nMIAS grants you the right to use the DATABASE, for research purposes\nONLY. For this purpose, you may edit, format, or otherwise modify the\nDATABASE provided that the unmodified portions of the DATABASE included\nin a modified work shall remain subject to the terms of this Agreement.\nCOPYRIGHT\nThe DATABASE is owned by MIAS and is protected by United Kingdom\ncopyright laws, international treaty provisions and all other\napplicable national laws. Therefore you must treat the DATABASE\nlike any other copyrighted material. If the DATABASE is used in any\npublications then reference must be made to the DATABASE within that\npublication.\nOTHER RESTRICTIONS\nYou may not rent, lease or sell the DATABASE.\nLIABILITY\nTo the maximum extent permitted by applicable law, MIAS shall not\nbe liable for damages, other than death or personal injury,\nwhatsoever (including without limitation, damages for negligence,\nloss of business, profits, business interruption, loss of\nbusiness information, or other pecuniary loss) arising out of the\nuse of or inability to use this DATABASE, even if MIAS has been\nadvised of the possibility of such damages. In any case, MIAS's\nentire liability under this Agreement shall be limited to the\namount actually paid by you or your assignor, as the case may be,\nfor the DATABASE.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3281666, "CreatorUserId": 12818448, "OwnerUserId": 12818448.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5708327.0, "CurrentDatasourceVersionId": 5784400.0, "ForumId": 3347402, "Type": 2, "CreationDate": "05/17/2023 13:16:48", "LastActivityDate": "05/17/2023", "TotalViews": 331, "TotalDownloads": 25, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 12818448, "UserName": "annkristinbalve", "DisplayName": "Ann-Kristin Balve", "RegisterDate": "12/09/2022", "PerformanceTier": 0}]
|
# ## CNN model
## setting random seed
import random
random.seed(44)
print(random.random())
# ## Gridsearch over class_weights
## import modules
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy as np
from keras.utils import np_utils
from imblearn.over_sampling import RandomOverSampler
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten
from keras import optimizers
from keras import losses
from tensorflow.keras.layers import AveragePooling2D
import cv2
from sklearn import metrics
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from keras.models import load_model
from tensorflow.keras.initializers import he_uniform
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, balanced_accuracy_score
from scikeras.wrappers import KerasClassifier
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import pickle
import matplotlib.pyplot as plt
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from keras.models import load_model
from sklearn.metrics import balanced_accuracy_score
## loading data
x_train = np.load("created_data/MIAS/patches/MIAS_X_train_roi_multi.npy") / 255
y_train = np.load("created_data/MIAS/patches/MIAS_y_train_roi_multi.npy")
x_valid = np.load("created_data/MIAS/patches/MIAS_X_valid_roi_multi.npy") / 255
y_valid = np.load("created_data/MIAS/patches/MIAS_y_valid_roi_multi.npy")
x_test = np.load("created_data/MIAS/patches/MIAS_X_test_roi_multi.npy") / 255
y_test = np.load("created_data/MIAS/patches/MIAS_y_test_roi_multi.npy")
## printing class counts
print(np.unique(y_train, return_counts=True))
# One-hot encode labels
num_classes = 3
y_train = np_utils.to_categorical(y_train, num_classes)
y_valid = np_utils.to_categorical(y_valid, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
## define class_weights to search over
class_weights = [{0: 1, 1: 1, 2: 1}, {0: 1, 1: 2, 2: 3}, {0: 2, 1: 3, 2: 4}]
## define model names for each class_weight choice
model_names = ["model1.h5", "model2.h5", "model3.h5"]
## store best balanced accuracy and class_weight
best_acc = 0
best_class_weight = None
for w, name in zip(class_weights, model_names):
print(f"testing weight: {w}")
# Define model
model = Sequential()
# Add layers to model
## first convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
input_shape=(224, 224, 3),
strides=1,
kernel_initializer=he_uniform(),
name="conv2d",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d"))
## second convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
strides=1,
kernel_initializer=he_uniform(),
name="conv2d_1",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_1"))
## third convolutional layer and 2 maxpooling layers
model.add(
Conv2D(
14,
(3, 3),
activation="relu",
strides=1,
padding="SAME",
kernel_initializer=he_uniform(),
name="conv2d_2",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_2"))
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_3"))
## flattening model
model.add(Flatten(name="flatten"))
## 3 dense layers
model.add(
Dense(512, activation="relu", kernel_initializer=he_uniform(), name="dense")
)
model.add(
Dense(256, activation="relu", kernel_initializer=he_uniform(), name="dense_1")
)
model.add(
Dense(128, activation="relu", kernel_initializer=he_uniform(), name="dense_2")
)
## dropout
model.add(Dropout(0.5))
## output layer
model.add(
Dense(3, activation="softmax", kernel_initializer=he_uniform(), name="dense_3")
)
lr = 0.0001 # set the learning rate
adam = Adam(learning_rate=lr) ## adam optimizer
## compile model
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
# Define early stopping callback
early_stop = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
## save model with best validation loss
model_checkpoint = ModelCheckpoint(name, save_best_only=True, verbose=1)
# Train model with early stopping
history = model.fit(
x_train,
y_train,
epochs=50,
batch_size=16,
validation_data=(x_valid, y_valid),
class_weight=w,
callbacks=[early_stop, model_checkpoint],
)
## load best model
model = load_model(name)
# Evaluate model on validation set using loss and overall accuracy
valid_loss, valid_acc = model.evaluate(x_valid, y_valid, verbose=0)
print("validation loss: ", valid_loss)
print("validation accuracy: ", valid_acc)
## getting predictions for validation set
y_pred = model.predict(x_valid)
preds_class = np.argmax(y_pred, axis=1)
obs = np.argmax(y_valid, axis=1)
## compute balanced accuracy
balanced_acc = balanced_accuracy_score(obs, preds_class)
print("balanced accuracy: ", balanced_acc)
## compute confusion matrix
cf = confusion_matrix(obs, preds_class)
print(cf)
## compare balanced_accuracy and store best weight and best balanced accuracy
if balanced_acc > best_acc:
best_acc = balanced_acc
best_class_weight = w
best_model = name
print("new best weight is:", w)
print("new best balanced acc is: ", balanced_acc)
# ## Results of best model on validation set
## extracting best weights: {0:1, 1:2, 2:3 } with best balanced accuracy 0.634
print(
f"the best balanced accuracy is {best_acc} with the class weight {best_class_weight}"
)
## loading the best model
model = load_model("model2.h5")
## evaluate model on validation set
report = evaluate_model(model, x_valid, y_valid)
print(report)
## getting predictions on validation set
y_pred = model.predict(x_valid)
preds_class = np.argmax(y_pred, axis=1)
obs = np.argmax(y_valid, axis=1)
## compute balanced accuracy
balanced_acc = balanced_accuracy_score(obs, preds_class)
print("balanced accuracy: ", balanced_acc)
## confusion matrix
cf = confusion_matrix(obs, preds_class)
print(cf)
# ## Running model with best weights and averaging the confusion matrix
## fitting best model and averaging confusion matrix over 10 runs
import pandas as pd
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import numpy as np
from keras.utils import np_utils
from imblearn.over_sampling import RandomOverSampler
from PIL import Image
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Conv2D, MaxPooling2D, Flatten
from keras import optimizers
from keras import losses
from tensorflow.keras.layers import AveragePooling2D
import cv2
from sklearn import metrics
import tensorflow as tf
from sklearn.metrics import confusion_matrix
from keras.callbacks import EarlyStopping
from tensorflow.keras.initializers import he_uniform
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam
import pickle
## loading data
x_train = np.load("created_data/MIAS/patches/MIAS_X_train_roi_multi.npy") / 255
y_train = np.load("created_data/MIAS/patches/MIAS_y_train_roi_multi.npy")
x_valid = np.load("created_data/MIAS/patches/MIAS_X_valid_roi_multi.npy") / 255
y_valid = np.load("created_data/MIAS/patches/MIAS_y_valid_roi_multi.npy")
x_test = np.load("created_data/MIAS/patches/MIAS_X_test_roi_multi.npy") / 255
y_test = np.load("created_data/MIAS/patches/MIAS_y_test_roi_multi.npy")
## printing class counts for training set
print(np.unique(y_train, return_counts=True))
# One-hot encode labels
num_classes = 3
y_train = np_utils.to_categorical(y_train, num_classes)
y_valid = np_utils.to_categorical(y_valid, num_classes)
y_test = np_utils.to_categorical(y_test, num_classes)
## define number of runs
n_runs = 10
## list for appending confusion matrices and metrices over n_runs
confusion_matrices = []
metrices = []
## run the model n_runs times
for n in range(n_runs):
print(f"run number {n}")
# Define model
model = Sequential()
# Add layers to model
## first convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
input_shape=(224, 224, 3),
strides=1,
kernel_initializer=he_uniform(),
name="conv2d",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d"))
## second convolutional layer and one maxpooling layer
model.add(
Conv2D(
16,
(5, 5),
activation="relu",
strides=1,
kernel_initializer=he_uniform(),
name="conv2d_1",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_1"))
## third convolutional layer and one maxpooling layer
model.add(
Conv2D(
14,
(3, 3),
activation="relu",
strides=1,
padding="SAME",
kernel_initializer=he_uniform(),
name="conv2d_2",
)
)
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_2"))
model.add(MaxPooling2D((2, 2), strides=2, name="max_pooling2d_3"))
## Flattening layer
model.add(Flatten(name="flatten"))
## Three dense layers
model.add(
Dense(512, activation="relu", kernel_initializer=he_uniform(), name="dense")
)
model.add(
Dense(256, activation="relu", kernel_initializer=he_uniform(), name="dense_1")
)
model.add(
Dense(128, activation="relu", kernel_initializer=he_uniform(), name="dense_2")
)
## Dropout
model.add(Dropout(0.5))
## Output layer
model.add(
Dense(3, activation="softmax", kernel_initializer=he_uniform(), name="dense_3")
)
lr = 0.0001 # set the learning rate
adam = Adam(learning_rate=lr)
# Compile model
model.compile(optimizer=adam, loss="categorical_crossentropy", metrics=["accuracy"])
# Define early stopping callback
early_stop = EarlyStopping(
monitor="val_loss", patience=10, restore_best_weights=True
)
## store first model separately
if n == 0:
model_checkpoint = ModelCheckpoint(
"best_model.h5", save_best_only=True, verbose=1
)
else: ## other models get stored per run but overwrite each other
model_checkpoint = ModelCheckpoint("mod_.h5", save_best_only=True, verbose=1)
# Train model with early stopping
history = model.fit(
x_train,
y_train,
epochs=50,
batch_size=16,
validation_data=(x_valid, y_valid),
class_weight=best_class_weight,
callbacks=[early_stop, model_checkpoint],
)
## load best model
from keras.models import load_model
if n == 0:
model = load_model("best_model.h5")
else:
model = load_model("mod_.h5")
# Evaluate model
test_loss, test_acc = model.evaluate(x_test, y_test, verbose=0)
print("Test loss: ", test_loss)
print("Test accuracy: ", test_acc)
if n == 0: ## store history for first model
with open("history_best_model.pkl", "wb") as f:
pickle.dump(history.history, f)
## evaluate model on test set
report = evaluate_model(model, x_test, y_test)
print(report)
## Get predictions for test est
y_pred = model.predict(x_test)
y_pred_classes = np.argmax(y_pred, axis=1)
y_true = np.argmax(y_test, axis=1)
# Calculate confusion matrix for test data
cm = confusion_matrix(y_true, y_pred_classes)
print(cm)
class_names = ["Class 0", "Class 1", "Class 2"]
## calculate classification report for test data
report = classification_report(
y_true, y_pred_classes, target_names=class_names, output_dict=True
)
## append confusion matrices and classification report
confusion_matrices.append(cm)
metrices.append(report)
# ## Create dataframe of averaged confusion matrices
class_names = ["Class 0", "Class 1", "Class 2"]
total_cm = np.zeros((len(class_names), len(class_names)))
# Loop over the classification reports and confusion matrices and add them to the running totals
for cm in confusion_matrices:
# Add the confusion matrix to the running total
total_cm += cm
avg_cm = total_cm / len(confusion_matrices)
print(avg_cm)
print(type(avg_cm))
# Create confusion matrix
plt.figure(figsize=(5, 5))
class_names = ["Normal", "Benign", "Malignant"]
sns.heatmap(
avg_cm,
annot=True,
fmt="g",
cmap="Blues",
square=True,
cbar=False,
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted classes")
plt.ylabel("True classes")
plt.show()
## save avg confusion matrix
# Save the matrix to a file
np.save("avg_cm.npy", avg_cm)
# Load the matrix from the file
avg_cm = np.load("avg_cm.npy")
print(avg_cm)
# ## Calculating some metrices based on the averaged confusion matrix
## loading confusion matrix
conf_mat = np.load("avg_cm.npy")
print(conf_mat)
# Calculate total number of predictions
total_predictions = np.sum(conf_mat)
# Calculate number of correct predictions
correct_predictions = np.sum(np.diag(conf_mat))
# Calculate overall accuracy
accuracy = correct_predictions / total_predictions
print("Overall Accuracy: {:.3f}".format(accuracy))
# Calculate balanced accuracy
class_counts = np.sum(
conf_mat, axis=1
) ## true instances per class (row marginal - sum over cols)
class_correct_predictions = np.diag(conf_mat) ## true positives (sum over diagoal)
class_accuracy = (
class_correct_predictions / class_counts.flatten()
) ## calculate accuracy per class = tp/tp + fn
balanced_accuracy = np.mean(
class_accuracy
) ## averaging over all class_accuracies = balanced accuracy
print("Balanced Accuracy: {:.3f}".format(balanced_accuracy))
# Compute metrics for each class
metrics_per_class = {} ## dict to store metrices
for i in range(len(conf_mat)):
tp = conf_mat[i, i] # true positive
fp = (
np.sum(conf_mat[:, i]) - tp
) # false positive (predicted class i but not class i)
fn = (
np.sum(conf_mat[i, :]) - tp
) # false negatives (not predicted class i but class i)
precision = tp / (
tp + fp
) ## precision of class i = tp / (tp + fp) -> correctly classified out of all predicted
recall = tp / (
tp + fn
) ## recall of class i = tp / (tp + fn) -> correctly classified out of all instances of class i
f1_score = (
2 * (precision * recall) / (precision + recall)
) ## f1-score = harmonic mean of precision and recall
## store metrices per class
metrics_per_class[f"Class {i}"] = {
"Precision": precision,
"Recall": recall,
"F1-score": f1_score,
}
## define dict to store macro averages
macro_metrics = {}
# loop over classes
for class_name, metrics in metrics_per_class.items():
print(f"{class_name}:")
for metric_name, metric_value in metrics.items(): ## loop over metrices
print(f"\t{metric_name}: {metric_value:.3f}") ## print metrics for each class
macro_metrics[metric_name] = (
macro_metrics.get(metric_name, 0) + metric_value
) ## add class-wise metrics to total count
## loop over metrices and print macro average
for k, v in macro_metrics.items():
macro_metrics[k] = v / len(conf_mat[0])
print(f"Macro {k}, {round(macro_metrics[k],3)} ")
# ## Generate confusion matrix of first model and plot history
## load model
model = load_model("best_model.h5")
## get predictions on test set
preds = model.predict(x_test)
preds_class = np.argmax(preds, axis=1)
obs = np.argmax(y_test, axis=1)
## generate confusion matrix for best model
cf = confusion_matrix(obs, preds_class)
print(cf)
## [[29 1 1]
## [ 0 6 3]
## [ 1 4 3]]
## plot model accuracy on train and validation set
with open("history_best_model.pkl", "rb") as f:
history = pickle.load(f)
plt.plot(history["accuracy"])
plt.plot(history["val_accuracy"])
plt.title("Model Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
## plot model loss on train and validation set
with open("history_best_model.pkl", "rb") as f:
history = pickle.load(f)
plt.plot(history["loss"])
plt.plot(history["val_loss"])
plt.title("Model loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend(["train", "validation"], loc="upper left")
plt.show()
# ## Evaluate first model
report = evaluate_model(model, x_test, y_test)
print(report)
# ## Get model summary
# ###### model.summary()
# ## Plot ROC curve of model on test set
plot_roc_curve(model, x_test, y_test)
| false | 0 | 5,120 | 0 | 5,908 | 5,120 |
||
129963881
|
<jupyter_start><jupyter_text>Brian Tumor Dataset
### Context
This dataset consists of the scanned images of brain of patient diagnosed of brain tumour.
### Content
Separated files for train and test data with separating features and labels
Kaggle dataset identifier: brian-tumor-dataset
<jupyter_script># # Brain Tumour Classifier
# This image classifier was built as an experiment for lesson 2 of Fast.ai's ML course. I'm using it to learn the basics of their library on a meaningful dataset.
# ## Initialising the Dataloader
from fastai.vision.all import *
# Defines a label function for the data loader. The label can either be "Healthy" if the image is labelled "Not Cancer" or "Tumour".
def is_tumour(x):
if not x.startswith("Not"):
return "Healthy"
else:
return "Tumour"
# Defines an ImageDataLoader with a 30% train / validation split. Images are resized to 128x128.
path = "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set"
dataloaders = ImageDataLoaders.from_name_func(
"./",
get_image_files(path),
valid_pct=0.3,
seed=32,
label_func=is_tumour,
item_tfms=Resize(128, method="pad"),
)
# A vision learner based on ResNet-18 is defined and the model is fine tuned for 3 epochs.
learn = vision_learner(dataloaders, resnet18, metrics=error_rate)
learn.fine_tune(3)
learn.export("tumour_classifier_model.pkl")
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
interp.plot_top_losses(15)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/963/129963881.ipynb
|
brian-tumor-dataset
|
preetviradiya
|
[{"Id": 129963881, "ScriptId": 38614513, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15100726, "CreationDate": "05/17/2023 18:45:43", "VersionNumber": 3.0, "Title": "ResNet-18 Brain Tumour Classifier", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 38.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 31.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186400646, "KernelVersionId": 129963881, "SourceDatasetVersionId": 2236708}]
|
[{"Id": 2236708, "DatasetId": 1343913, "DatasourceVersionId": 2278530, "CreatorUserId": 5456766, "LicenseName": "GPL 2", "CreationDate": "05/16/2021 10:20:25", "VersionNumber": 1.0, "Title": "Brian Tumor Dataset", "Slug": "brian-tumor-dataset", "Subtitle": "X-Ray images of Brain", "Description": "### Context\n\nThis dataset consists of the scanned images of brain of patient diagnosed of brain tumour.\n\n### Content\nSeparated files for train and test data with separating features and labels\n\n### Acknowledgements\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.\n\n### Inspiration\nYour data will be in front of the world's largest data science community. What questions do you want to see answered?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1343913, "CreatorUserId": 5456766, "OwnerUserId": 5456766.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2236708.0, "CurrentDatasourceVersionId": 2278530.0, "ForumId": 1362909, "Type": 2, "CreationDate": "05/16/2021 10:20:25", "LastActivityDate": "05/16/2021", "TotalViews": 42814, "TotalDownloads": 5355, "TotalVotes": 87, "TotalKernels": 38}]
|
[{"Id": 5456766, "UserName": "preetviradiya", "DisplayName": "Preet Viradiya", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
# # Brain Tumour Classifier
# This image classifier was built as an experiment for lesson 2 of Fast.ai's ML course. I'm using it to learn the basics of their library on a meaningful dataset.
# ## Initialising the Dataloader
from fastai.vision.all import *
# Defines a label function for the data loader. The label can either be "Healthy" if the image is labelled "Not Cancer" or "Tumour".
def is_tumour(x):
if not x.startswith("Not"):
return "Healthy"
else:
return "Tumour"
# Defines an ImageDataLoader with a 30% train / validation split. Images are resized to 128x128.
path = "/kaggle/input/brian-tumor-dataset/Brain Tumor Data Set/Brain Tumor Data Set"
dataloaders = ImageDataLoaders.from_name_func(
"./",
get_image_files(path),
valid_pct=0.3,
seed=32,
label_func=is_tumour,
item_tfms=Resize(128, method="pad"),
)
# A vision learner based on ResNet-18 is defined and the model is fine tuned for 3 epochs.
learn = vision_learner(dataloaders, resnet18, metrics=error_rate)
learn.fine_tune(3)
learn.export("tumour_classifier_model.pkl")
interp = ClassificationInterpretation.from_learner(learn)
interp.plot_confusion_matrix()
interp.plot_top_losses(15)
| false | 0 | 380 | 0 | 450 | 380 |
||
129443976
|
# (These notes were taken from LikedIn courses: *Programming Foundations: Algorithms* and *Programming Foundations: Data Structures*)
# # **Overview: Data Structures and Algorithms** 👀
# ## Data Structures
# * **Array**: Identified by index or key. \
# Calculate index item $O(1)$, insert or delete item at any location $O(n)$.
# * **Linked List**: Each element, called nodes, pointing to the next one. \
# There is also double linked list. \
# Item lookup is $O(n)$.
# * **Queue**: First In First Out (FIFO). First element to be inputted will be the first to be out. When we add an item it's called *enqueue* and when remove one it's called *dequeue*.
# 
# * **Stack**: Last In First Out (LIFO). Last element to be inputted will be the first to be out. It works through the methods push (in) and pop (out). This process is $O(1)$ in time.
# 
# * **Hash**: Hashing is a way of convert row data in a small piece of data, asociated to an unique hash value through a hash function, which receives a type of value and outputs an integer, which of course could be the same for another input data, this is called *collision*. Thus, any input will have the same output, however the process cannot be executed in reverse. The implentation of this is made throguh **hash table**. It is a form of an associative array. It maps an unique key to a value. This doesn't order the items in a specific way.
# * **Set**: It is a set of elements that cannot be repeated, in which, as in hash tables, one can define a key and a value, however, in this case the jey is the value.
# * **Tree**: It is like an extension of linked list. For this, each node could be connected to several nodes. There is a root node, from where other nodes are connected and so the consequent ones. Those previous to a node are called its parent and the next ones, their childs. The structure might be better understood looking at the image.
# 
# * **Binary Search Tree:** In this, each node must have a maximum of 2 childs. Beside, it must have an order. The left child must be less than its parent, whilst the right must be more than.
# * **Heap:** It is implemented as a binary tree. Its order will depend on the demand of how we would like to access de data, for instance, if would like to have the grater values at first, on the first nodes.
# ## Algorithms
# **Complexity**
# * Space -> Memory
# * Time -> Time to complete
# **Classification**
# * Serial / Paralel
# * Exact / Approximate
# * Deterministic / Not Deterministic
# **Types of**
# * Searching
# * Sorting
# * Computational
# * Collection
# **Performance** \
# To measure how an algorithm responds to dataset size, it's used Big-O notation (Order of Operation: Time scale to perform an operation). Relates the input data and how it grows. Examples:
# * $O(1)$: Constant
# * $O(\log n)$: Logarithmic
# * $O(n)$: Linear
# * $O(n\log n)$: Log-linear
# * $O(n^2)$: Quadratic
# * ...
# **Recursion** \
# When a function calls itself from its own code. It needs a break condition to avoid infinite loops. Also it must store the old values, called *call stack¨*.
# **Sorting Data** \
# In general languages already has built in sorting methods.
# * Bubble sort: Compares two elements and, depending of if it matches a condition, they are swapped or keep the same order. Then pass to compare the next two elements. When it finishes the process start again, without the last element because it is already where it should be. Performance: $O(n^2)$, so it is not the best.
# * Merge sort: Divide-and-conquer algorithm. Split the dataset into different pieces, using recursion and then merges it while it compares elements. Perform well on large datasets. Performance: $O(n \log n)$.
# * Quick sort: This is also a divide-and-conquer algorithm and use recursion, with performance $O(n\log n)$. The diference with merge, and the reason why it performs better is that it operates in place on the data. The case where it won't perform well would be if the array is almost sorted. \
# To implement it we select a pivot element (pivot point), which will be used to compare with the first and last element of the list, if they are lesser than the pivot, then we take the second element and compare again, so on. When one already finds a number greater than the pivot, it must be exchanged by the last element, and then go to the penultimate. The process is repeated till the elements find aside of each other. At that point one change the pivot for the grater value lesser than it. Such that in one side of the array are all the numbers lesser than the pivot, and in the another one the greaters. Thus, the array is splitted at that point and start again de process.
# * ...
# **Searching data** \
# * Unorder list search: Compares each item in the list, so it is $O(n)$.
# * Order list search: Establish a mid point and compare the first element with it, and the last too. Such that one can know where the value is located. Thus, a new mid point is calculated and start a recursive process till find the desired item.
# # **Python Data Structures and Algorithms** 🐍
# # **Data structures**
# ## Linked List
#
# ----- Linked List -----
# Node class
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
def get_data(self):
return self.val
def set_data(self, val):
self.val = val
def get_next(self):
return self.next
def set_next(self, next):
self.next = next
# Linked List class
class LinkedList(object):
def __init__(self, head=None):
self.head = head
self.count = 0
def get_count(self):
return self.count
def insert(self, data):
# TODO: insert a new node
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
self.count += 1
def find(self, val):
# TODO: The first item with a given value
item = self.head
while item != None:
if item.get_data() == val:
return True
else:
item = item.get_next()
return False
def deleteAt(self, idx):
# TODO: Delete an item with a given index
if idx > self.count - 1:
return
if idx == 0:
self.head = self.head.get_next()
else:
tempIdx = 0
node = self.head
while tempIdx < idx - 1:
node = node.get_next()
tempIdx += 1
node.set_next(node.get_next().get_next())
self.count -= 1
def dump_list(self):
tempnode = self.head
while tempnode != None:
print("Node: ", tempnode.get_data())
tempnode = tempnode.get_next()
# Creating a Linked List and insert items
itemList = LinkedList()
itemList.insert(2)
itemList.insert(3)
itemList.insert(5)
itemList.insert(7)
itemList.dump_list()
# -------------------------------------------
print("Item count: ", itemList.get_count())
print("Finding 2: ", itemList.find(2))
print("Finding 10: ", itemList.find(10))
# Deleting an item at some position
itemList.deleteAt(2)
itemList.dump_list()
print("Item count: ", itemList.get_count())
# ## Stacks and queues
# In python one can use the regular list as an **stack**.
# ----- Stack -----
# Empty stack
stack = []
# Push
stack.append("Element 1")
stack.append("Element 2")
stack.append("Element 3")
print(stack)
# Pop
stack.pop()
print(stack)
# To make a **queue** one can use again a regular list, but it is no recommended because requires $O(n)$ time complexity when it removes elements from the front. However there is a module in Python called Collections, where one can use deque to implement a queue.
# ----- Queue -----
from collections import deque
# Empty deque object
queue = deque()
# Add elements
queue.append("Element 1")
queue.append("Element 2")
queue.append("Element 3")
queue.append("Element 4")
print(queue)
# Remove elements
queue.popleft()
print(queue)
# ## Hash table
# The most common way is using dictionaries.
# ----- Hash table -----
# Creating a dictionary
hash1 = {"key1": 1, "key2": 2, "key3": 3}
print(hash)
# Empty hash
hash2 = {}
# Add items
hash2["key1"] = 1
hash2["key2"] = 2
hash2["key3"] = 3
print(hash2)
# Replace an item
hash2["key2"] = "Replaced value"
print(hash2)
# Remove an item
hash2.pop("key2")
print(hash2)
# Obtain a list of keys, values and items tuples
listKeys = hash2.keys()
listValues = hash2.values()
listItems = hash2.items()
print(listKeys)
print(listValues)
print(listItems)
# One can iterate from this structure too
for key, val in hash2.items():
print(f"{key}: {val}")
# ## Sets
# # **Algorithms**
# ## Recursion
#
# A simple exmaple would be a countdown function
def countdown(x):
if x == 0:
print("Done!")
return
else:
print(x, "...")
countdown(x - 1)
countdown(5)
# Another example could be a factorial function
def factorial(x):
if x == 1:
return 1
else:
return x * factorial(x - 1)
print(factorial(13))
# # Sorting Data
# ##Bubble sort
# ----- Bubble sort -----
def bubbleSort(dataset):
for i in range(len(dataset) - 1, 0, -1):
for j in range(i):
if dataset[j] > dataset[j + 1]:
temp = dataset[j]
dataset[j] = dataset[j + 1]
dataset[j + 1] = temp
return dataset
exList = [3, 4, 1, 2, 5, 6]
print(bubbleSort(exList))
# ## Merge sort
# ----- Merge sort -----
def mergeSort(dataset):
if len(dataset) > 1:
mid = len(dataset) // 2
larr = dataset[:mid]
rarr = dataset[mid:]
# Recursion
mergeSort(larr)
mergeSort(rarr)
i = 0 # index into larr
j = 0 # index into rarr
k = 0 # index into merge array
# Comparing both arrays
while i < len(larr) and j < len(rarr):
if larr[i] < rarr[j]:
dataset[k] = larr[i]
i += 1
else:
dataset[k] = rarr[j]
j += 1
k += 1
# For the values that remains in the larr
while i < len(larr):
dataset[k] = larr[i]
i += 1
k += 1
# For the values that remains in the rarr
while j < len(rarr):
dataset[k] = rarr[j]
j += 1
k += 1
return dataset
# Implementing the algorithm
exList = [4, 6, 2, 5, 3, 1]
print(mergeSort(exList))
# ##Quick sort
# ----- Quick sort -----
def quickSort(data, first, last):
if first < last:
# Calculate the pivot point
pivotIdx = partition(data, first, last)
# Sort the two partitions
quickSort(data, first, pivotIdx - 1)
quickSort(data, pivotIdx + 1, last)
def partition(data, first, last):
# Select the first element as pivot
pivot = data[first]
# Establish the lower and upper indexes
lower = first + 1
upper = last
# Searching for the crossing point
done = False
while not done:
while lower <= upper and data[lower] <= pivot:
lower += 1
while upper >= lower and data[upper] >= pivot:
upper -= 1
if lower > upper:
done = True
else:
temp = data[lower]
data[lower] = data[upper]
data[upper] = temp
# When the split point is found, exchange the pivot value
temp = data[first]
data[first] = data[upper]
data[upper] = temp
# Return the split point index
return upper
# See it implemented
exList = [4, 6, 2, 8, 1, 9, 3, 5, 7]
print(exList)
quickSort(exList, 0, len(exList) - 1)
print(exList)
# # Searching data
# ## Unorder list search
# ----- Unorder list search ------
def find_unorder(arr, item):
for i in range(0, len(arr)):
if item == arr[i]:
return i # Return index where it is located
return None
arr = [1, 5, 2, 50, 11, 7]
print(find_unorder(arr, 5))
# ## Order list search
# Here it's used the techinque of binary search.
# ----- Order list seach -----> Binary search
def binary_search(arr, item):
listSize = len(arr) - 1
lowerIdx = 0
upperIdx = listSize
while arr[lowerIdx] <= arr[upperIdx]:
mid = (lowerIdx + upperIdx) // 2
if arr[mid] == item:
return mid
if item > arr[mid]:
lowerIdx = mid + 1
else:
upperIdx = mid - 1
if arr[lowerIdx] > arr[upperIdx]:
return None
arr = [1, 2, 3, 4, 5, 6, 7]
print(binary_search(arr, 5))
# Could be useful if there an algorithm to determine if the list is already sorted, so...
def isSorted(arr):
for i in range(0, len(arr) - 1):
if arr[i] > arr[i + 1]:
return False
return True
arr1 = [2, 3, 4, 8, 3]
print(isSorted(arr1))
arr2 = [1, 2, 3, 4, 5]
print(isSorted(arr2))
# Another way is using list comprehensions
def isSorted2(arr):
return all(arr[i] <= arr[i + 1] for i in range(len(arr) - 1))
print(isSorted2(arr1))
print(isSorted2(arr2))
# #Other algorithms...
# ## Unique filtering with hash tables
# The hash tables, in this case, python diciontaries, let apply their feature of uniqueness to do different algorithms, for example, eliminate duplicates from a list. Performance: $O(n)$.
filter = dict()
exList = [
"red",
"blue",
"green",
"green",
"yellow",
"black",
"white",
"yellow",
"yellow",
]
listIdx = [i for i in range(len(exList))]
# It can be used a dictionary comprehension
filter = {k: v for (k, v) in zip(exList, listIdx)}
result = set(filter.keys()) # Result is saved in a set data structure
print(result)
# ## Value counting with hash tables
counter = dict()
for item in exList: # we are using the list from the past chunk of code
if item in counter.keys():
counter[item] += 1
else:
counter[item] = 1
print(counter)
# ## Max value recursively
def findMax(arr):
# Breaking condition
if len(arr) == 1:
return arr[0]
# Recursion
n1 = arr[0]
n2 = findMax(arr[1:]) # It reduces till the length of the array of 1 element
# Then the next code runs and start to compares values
if n1 > n2:
return n1
else:
return n2
arr = [2, 100, 45, 3, 7, 67, 8]
print(findMax(arr))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/443/129443976.ipynb
| null | null |
[{"Id": 129443976, "ScriptId": 38488482, "ParentScriptVersionId": NaN, "ScriptLanguageId": 12, "AuthorUserId": 6441390, "CreationDate": "05/13/2023 21:42:22", "VersionNumber": 2.0, "Title": "Python Data Structures and Algorithms", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 505.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 504.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# (These notes were taken from LikedIn courses: *Programming Foundations: Algorithms* and *Programming Foundations: Data Structures*)
# # **Overview: Data Structures and Algorithms** 👀
# ## Data Structures
# * **Array**: Identified by index or key. \
# Calculate index item $O(1)$, insert or delete item at any location $O(n)$.
# * **Linked List**: Each element, called nodes, pointing to the next one. \
# There is also double linked list. \
# Item lookup is $O(n)$.
# * **Queue**: First In First Out (FIFO). First element to be inputted will be the first to be out. When we add an item it's called *enqueue* and when remove one it's called *dequeue*.
# 
# * **Stack**: Last In First Out (LIFO). Last element to be inputted will be the first to be out. It works through the methods push (in) and pop (out). This process is $O(1)$ in time.
# 
# * **Hash**: Hashing is a way of convert row data in a small piece of data, asociated to an unique hash value through a hash function, which receives a type of value and outputs an integer, which of course could be the same for another input data, this is called *collision*. Thus, any input will have the same output, however the process cannot be executed in reverse. The implentation of this is made throguh **hash table**. It is a form of an associative array. It maps an unique key to a value. This doesn't order the items in a specific way.
# * **Set**: It is a set of elements that cannot be repeated, in which, as in hash tables, one can define a key and a value, however, in this case the jey is the value.
# * **Tree**: It is like an extension of linked list. For this, each node could be connected to several nodes. There is a root node, from where other nodes are connected and so the consequent ones. Those previous to a node are called its parent and the next ones, their childs. The structure might be better understood looking at the image.
# 
# * **Binary Search Tree:** In this, each node must have a maximum of 2 childs. Beside, it must have an order. The left child must be less than its parent, whilst the right must be more than.
# * **Heap:** It is implemented as a binary tree. Its order will depend on the demand of how we would like to access de data, for instance, if would like to have the grater values at first, on the first nodes.
# ## Algorithms
# **Complexity**
# * Space -> Memory
# * Time -> Time to complete
# **Classification**
# * Serial / Paralel
# * Exact / Approximate
# * Deterministic / Not Deterministic
# **Types of**
# * Searching
# * Sorting
# * Computational
# * Collection
# **Performance** \
# To measure how an algorithm responds to dataset size, it's used Big-O notation (Order of Operation: Time scale to perform an operation). Relates the input data and how it grows. Examples:
# * $O(1)$: Constant
# * $O(\log n)$: Logarithmic
# * $O(n)$: Linear
# * $O(n\log n)$: Log-linear
# * $O(n^2)$: Quadratic
# * ...
# **Recursion** \
# When a function calls itself from its own code. It needs a break condition to avoid infinite loops. Also it must store the old values, called *call stack¨*.
# **Sorting Data** \
# In general languages already has built in sorting methods.
# * Bubble sort: Compares two elements and, depending of if it matches a condition, they are swapped or keep the same order. Then pass to compare the next two elements. When it finishes the process start again, without the last element because it is already where it should be. Performance: $O(n^2)$, so it is not the best.
# * Merge sort: Divide-and-conquer algorithm. Split the dataset into different pieces, using recursion and then merges it while it compares elements. Perform well on large datasets. Performance: $O(n \log n)$.
# * Quick sort: This is also a divide-and-conquer algorithm and use recursion, with performance $O(n\log n)$. The diference with merge, and the reason why it performs better is that it operates in place on the data. The case where it won't perform well would be if the array is almost sorted. \
# To implement it we select a pivot element (pivot point), which will be used to compare with the first and last element of the list, if they are lesser than the pivot, then we take the second element and compare again, so on. When one already finds a number greater than the pivot, it must be exchanged by the last element, and then go to the penultimate. The process is repeated till the elements find aside of each other. At that point one change the pivot for the grater value lesser than it. Such that in one side of the array are all the numbers lesser than the pivot, and in the another one the greaters. Thus, the array is splitted at that point and start again de process.
# * ...
# **Searching data** \
# * Unorder list search: Compares each item in the list, so it is $O(n)$.
# * Order list search: Establish a mid point and compare the first element with it, and the last too. Such that one can know where the value is located. Thus, a new mid point is calculated and start a recursive process till find the desired item.
# # **Python Data Structures and Algorithms** 🐍
# # **Data structures**
# ## Linked List
#
# ----- Linked List -----
# Node class
class Node(object):
def __init__(self, val):
self.val = val
self.next = None
def get_data(self):
return self.val
def set_data(self, val):
self.val = val
def get_next(self):
return self.next
def set_next(self, next):
self.next = next
# Linked List class
class LinkedList(object):
def __init__(self, head=None):
self.head = head
self.count = 0
def get_count(self):
return self.count
def insert(self, data):
# TODO: insert a new node
new_node = Node(data)
new_node.set_next(self.head)
self.head = new_node
self.count += 1
def find(self, val):
# TODO: The first item with a given value
item = self.head
while item != None:
if item.get_data() == val:
return True
else:
item = item.get_next()
return False
def deleteAt(self, idx):
# TODO: Delete an item with a given index
if idx > self.count - 1:
return
if idx == 0:
self.head = self.head.get_next()
else:
tempIdx = 0
node = self.head
while tempIdx < idx - 1:
node = node.get_next()
tempIdx += 1
node.set_next(node.get_next().get_next())
self.count -= 1
def dump_list(self):
tempnode = self.head
while tempnode != None:
print("Node: ", tempnode.get_data())
tempnode = tempnode.get_next()
# Creating a Linked List and insert items
itemList = LinkedList()
itemList.insert(2)
itemList.insert(3)
itemList.insert(5)
itemList.insert(7)
itemList.dump_list()
# -------------------------------------------
print("Item count: ", itemList.get_count())
print("Finding 2: ", itemList.find(2))
print("Finding 10: ", itemList.find(10))
# Deleting an item at some position
itemList.deleteAt(2)
itemList.dump_list()
print("Item count: ", itemList.get_count())
# ## Stacks and queues
# In python one can use the regular list as an **stack**.
# ----- Stack -----
# Empty stack
stack = []
# Push
stack.append("Element 1")
stack.append("Element 2")
stack.append("Element 3")
print(stack)
# Pop
stack.pop()
print(stack)
# To make a **queue** one can use again a regular list, but it is no recommended because requires $O(n)$ time complexity when it removes elements from the front. However there is a module in Python called Collections, where one can use deque to implement a queue.
# ----- Queue -----
from collections import deque
# Empty deque object
queue = deque()
# Add elements
queue.append("Element 1")
queue.append("Element 2")
queue.append("Element 3")
queue.append("Element 4")
print(queue)
# Remove elements
queue.popleft()
print(queue)
# ## Hash table
# The most common way is using dictionaries.
# ----- Hash table -----
# Creating a dictionary
hash1 = {"key1": 1, "key2": 2, "key3": 3}
print(hash)
# Empty hash
hash2 = {}
# Add items
hash2["key1"] = 1
hash2["key2"] = 2
hash2["key3"] = 3
print(hash2)
# Replace an item
hash2["key2"] = "Replaced value"
print(hash2)
# Remove an item
hash2.pop("key2")
print(hash2)
# Obtain a list of keys, values and items tuples
listKeys = hash2.keys()
listValues = hash2.values()
listItems = hash2.items()
print(listKeys)
print(listValues)
print(listItems)
# One can iterate from this structure too
for key, val in hash2.items():
print(f"{key}: {val}")
# ## Sets
# # **Algorithms**
# ## Recursion
#
# A simple exmaple would be a countdown function
def countdown(x):
if x == 0:
print("Done!")
return
else:
print(x, "...")
countdown(x - 1)
countdown(5)
# Another example could be a factorial function
def factorial(x):
if x == 1:
return 1
else:
return x * factorial(x - 1)
print(factorial(13))
# # Sorting Data
# ##Bubble sort
# ----- Bubble sort -----
def bubbleSort(dataset):
for i in range(len(dataset) - 1, 0, -1):
for j in range(i):
if dataset[j] > dataset[j + 1]:
temp = dataset[j]
dataset[j] = dataset[j + 1]
dataset[j + 1] = temp
return dataset
exList = [3, 4, 1, 2, 5, 6]
print(bubbleSort(exList))
# ## Merge sort
# ----- Merge sort -----
def mergeSort(dataset):
if len(dataset) > 1:
mid = len(dataset) // 2
larr = dataset[:mid]
rarr = dataset[mid:]
# Recursion
mergeSort(larr)
mergeSort(rarr)
i = 0 # index into larr
j = 0 # index into rarr
k = 0 # index into merge array
# Comparing both arrays
while i < len(larr) and j < len(rarr):
if larr[i] < rarr[j]:
dataset[k] = larr[i]
i += 1
else:
dataset[k] = rarr[j]
j += 1
k += 1
# For the values that remains in the larr
while i < len(larr):
dataset[k] = larr[i]
i += 1
k += 1
# For the values that remains in the rarr
while j < len(rarr):
dataset[k] = rarr[j]
j += 1
k += 1
return dataset
# Implementing the algorithm
exList = [4, 6, 2, 5, 3, 1]
print(mergeSort(exList))
# ##Quick sort
# ----- Quick sort -----
def quickSort(data, first, last):
if first < last:
# Calculate the pivot point
pivotIdx = partition(data, first, last)
# Sort the two partitions
quickSort(data, first, pivotIdx - 1)
quickSort(data, pivotIdx + 1, last)
def partition(data, first, last):
# Select the first element as pivot
pivot = data[first]
# Establish the lower and upper indexes
lower = first + 1
upper = last
# Searching for the crossing point
done = False
while not done:
while lower <= upper and data[lower] <= pivot:
lower += 1
while upper >= lower and data[upper] >= pivot:
upper -= 1
if lower > upper:
done = True
else:
temp = data[lower]
data[lower] = data[upper]
data[upper] = temp
# When the split point is found, exchange the pivot value
temp = data[first]
data[first] = data[upper]
data[upper] = temp
# Return the split point index
return upper
# See it implemented
exList = [4, 6, 2, 8, 1, 9, 3, 5, 7]
print(exList)
quickSort(exList, 0, len(exList) - 1)
print(exList)
# # Searching data
# ## Unorder list search
# ----- Unorder list search ------
def find_unorder(arr, item):
for i in range(0, len(arr)):
if item == arr[i]:
return i # Return index where it is located
return None
arr = [1, 5, 2, 50, 11, 7]
print(find_unorder(arr, 5))
# ## Order list search
# Here it's used the techinque of binary search.
# ----- Order list seach -----> Binary search
def binary_search(arr, item):
listSize = len(arr) - 1
lowerIdx = 0
upperIdx = listSize
while arr[lowerIdx] <= arr[upperIdx]:
mid = (lowerIdx + upperIdx) // 2
if arr[mid] == item:
return mid
if item > arr[mid]:
lowerIdx = mid + 1
else:
upperIdx = mid - 1
if arr[lowerIdx] > arr[upperIdx]:
return None
arr = [1, 2, 3, 4, 5, 6, 7]
print(binary_search(arr, 5))
# Could be useful if there an algorithm to determine if the list is already sorted, so...
def isSorted(arr):
for i in range(0, len(arr) - 1):
if arr[i] > arr[i + 1]:
return False
return True
arr1 = [2, 3, 4, 8, 3]
print(isSorted(arr1))
arr2 = [1, 2, 3, 4, 5]
print(isSorted(arr2))
# Another way is using list comprehensions
def isSorted2(arr):
return all(arr[i] <= arr[i + 1] for i in range(len(arr) - 1))
print(isSorted2(arr1))
print(isSorted2(arr2))
# #Other algorithms...
# ## Unique filtering with hash tables
# The hash tables, in this case, python diciontaries, let apply their feature of uniqueness to do different algorithms, for example, eliminate duplicates from a list. Performance: $O(n)$.
filter = dict()
exList = [
"red",
"blue",
"green",
"green",
"yellow",
"black",
"white",
"yellow",
"yellow",
]
listIdx = [i for i in range(len(exList))]
# It can be used a dictionary comprehension
filter = {k: v for (k, v) in zip(exList, listIdx)}
result = set(filter.keys()) # Result is saved in a set data structure
print(result)
# ## Value counting with hash tables
counter = dict()
for item in exList: # we are using the list from the past chunk of code
if item in counter.keys():
counter[item] += 1
else:
counter[item] = 1
print(counter)
# ## Max value recursively
def findMax(arr):
# Breaking condition
if len(arr) == 1:
return arr[0]
# Recursion
n1 = arr[0]
n2 = findMax(arr[1:]) # It reduces till the length of the array of 1 element
# Then the next code runs and start to compares values
if n1 > n2:
return n1
else:
return n2
arr = [2, 100, 45, 3, 7, 67, 8]
print(findMax(arr))
| false | 0 | 35,288 | 0 | 35,288 | 35,288 |
||
129443258
|
<jupyter_start><jupyter_text>Sample
Kaggle dataset identifier: sample
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import spark session
from pyspark.sql import SparkSession
# Create Spark Object
spark = SparkSession.builder.appName("Data_PreProcessing").getOrCreate()
df = spark.read.csv(
"/kaggle/input/sample/chapter_2_Data_Processing/sample_data.csv",
header=True,
inferSchema=True,
)
# Columns of Dataframe
df.columns
# Check number of Columns
len(df.columns)
# Number of records in dataframe
df.count()
# Shape of dataset
print(df.count(), len(df.columns))
# printSchema
df.printSchema()
# First few rows of dataframe
df.show(5)
# Select 2 Columns
df.select("age", "experience", "mobile").show(5)
# Info about dataframe
df.describe().show()
from pyspark.sql.types import StringType, DoubleType, IntegerType
df.withColumn("age_after_10Years", (df.age + 10)).show(10, False)
# cast as double (age)
df.withColumn("age_doubleFormat", (df.age.cast("double"))).show(10, False)
# filter the records
df.filter(df["mobile"] == "Vivo").show()
df.filter(df["mobile"] == "Apple").select("ratings", "age", "experience").show(5, False)
df.filter(df["age"] == 32).show()
# Filter with multiple conditions
df.filter(df["age"] == 27).filter(df["mobile"] == "Oppo").show()
df.filter(df["mobile"] == "Apple").filter(df["experience"] > 10.0).show()
# Select Distinct mobile
df.select("mobile").distinct().show()
# Count distinct mobile
df.select("mobile").distinct().count()
# # **GroupBy**
df.groupBy("age").count().orderBy("count", ascending=False).show(5, False)
df.groupBy("mobile").count().orderBy("count", ascending=False).show()
df.groupBy("mobile").mean().show()
df.groupBy("mobile").sum().show()
df.groupBy("mobile").max().show()
df.groupBy("mobile").min().show()
# # Aggregation
df.groupBy("Mobile").agg({"ratings": "sum"}).show(5, False)
# # UDF
from pyspark.sql import udf
# import pyspark.sql.functions as F
# udf_fun = F.udf (lambda..., Type())
#
# ## When you see F used in PySpark code, it's typically an alias for pyspark.sql.functions.
# - So, instead of typing out the full module name every time you want to use a function from it, you can just use F as a shorthand.
# ## create udf using python function
# brand_udf=udf(price_range,StringType())
#
# ### This might yeild an error - > Pyspark UDF - TypeError: 'module' object is not callable
# ### So to rectify this use : pyspark.sql.functions as F
#
# brand_udf= F.udf(price_range,StringType())
# ## from pyspark.sql.functions import concat
# df = df.withColumn('full_name', concat(df['first_name'], df['last_name']))
# ## But if you use the F alias, you can shorten it like this:
#
# ## from pyspark.sql.functions import F
# df = df.withColumn('full_name', F.concat(df['first_name'], df['last_name']))
#
# Create a normal python function
# create a price_range column
def price_range(brand):
if brand in ["Apple", "Samsung"]:
return "High Range"
elif brand == "MI":
return "Mid Range"
else:
return "Low Range"
import pyspark.sql.functions as F
# Create udf using 'price_range' function
brand_udf = F.udf(price_range, StringType())
# apply to spark dataframe
df.withColumn("Price_Range", brand_udf(df["Mobile"])).show(10, False)
# apply age group using Lambda Function
age_udf = F.udf(lambda age: "Young" if age <= 30 else "Older", StringType())
# apply age udf on df
df.withColumn("Age Group", age_udf(df["age"])).show(10, False)
# # pandas udf
#
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Lets assume maximum users age to be 100
# calculate the remaining age of the user
def remaining_years(age):
years_left = 100 - age
return years_left
# create udf using python function
age_remaining = F.udf(remaining_years, IntegerType())
# apply to spark df
df.withColumn("Years Remaining", age_remaining(df["age"])).show(10, False)
# UDF using 2 Columns u
def product_review(rating, experience):
x = rating * experience
return x
# create udf using python function
prod_udf = F.udf(product_review, DoubleType())
# apply to DataFrame
df.withColumn("Product_Review", prod_udf(df["ratings"], df["experience"])).show(
10, False
)
df.count()
# Drop duplicates
new_df = df.dropDuplicates()
print(new_df.count())
# Drop prodreview and exper. column
new_df = new_df.drop("Product_Review", "experience")
new_df.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/443/129443258.ipynb
|
sample
|
aayushsin7a
|
[{"Id": 129443258, "ScriptId": 38409168, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3114800, "CreationDate": "05/13/2023 21:28:22", "VersionNumber": 2.0, "Title": "PySpark Data_Pre_Processing", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 217.0, "LinesInsertedFromPrevious": 121.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 96.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185497015, "KernelVersionId": 129443258, "SourceDatasetVersionId": 5665014}]
|
[{"Id": 5665014, "DatasetId": 3256305, "DatasourceVersionId": 5740483, "CreatorUserId": 3114800, "LicenseName": "Unknown", "CreationDate": "05/11/2023 18:30:50", "VersionNumber": 1.0, "Title": "Sample", "Slug": "sample", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3256305, "CreatorUserId": 3114800, "OwnerUserId": 3114800.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5665014.0, "CurrentDatasourceVersionId": 5740483.0, "ForumId": 3321792, "Type": 2, "CreationDate": "05/11/2023 18:30:50", "LastActivityDate": "05/11/2023", "TotalViews": 55, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 3114800, "UserName": "aayushsin7a", "DisplayName": "Aayush Sinha", "RegisterDate": "04/20/2019", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import spark session
from pyspark.sql import SparkSession
# Create Spark Object
spark = SparkSession.builder.appName("Data_PreProcessing").getOrCreate()
df = spark.read.csv(
"/kaggle/input/sample/chapter_2_Data_Processing/sample_data.csv",
header=True,
inferSchema=True,
)
# Columns of Dataframe
df.columns
# Check number of Columns
len(df.columns)
# Number of records in dataframe
df.count()
# Shape of dataset
print(df.count(), len(df.columns))
# printSchema
df.printSchema()
# First few rows of dataframe
df.show(5)
# Select 2 Columns
df.select("age", "experience", "mobile").show(5)
# Info about dataframe
df.describe().show()
from pyspark.sql.types import StringType, DoubleType, IntegerType
df.withColumn("age_after_10Years", (df.age + 10)).show(10, False)
# cast as double (age)
df.withColumn("age_doubleFormat", (df.age.cast("double"))).show(10, False)
# filter the records
df.filter(df["mobile"] == "Vivo").show()
df.filter(df["mobile"] == "Apple").select("ratings", "age", "experience").show(5, False)
df.filter(df["age"] == 32).show()
# Filter with multiple conditions
df.filter(df["age"] == 27).filter(df["mobile"] == "Oppo").show()
df.filter(df["mobile"] == "Apple").filter(df["experience"] > 10.0).show()
# Select Distinct mobile
df.select("mobile").distinct().show()
# Count distinct mobile
df.select("mobile").distinct().count()
# # **GroupBy**
df.groupBy("age").count().orderBy("count", ascending=False).show(5, False)
df.groupBy("mobile").count().orderBy("count", ascending=False).show()
df.groupBy("mobile").mean().show()
df.groupBy("mobile").sum().show()
df.groupBy("mobile").max().show()
df.groupBy("mobile").min().show()
# # Aggregation
df.groupBy("Mobile").agg({"ratings": "sum"}).show(5, False)
# # UDF
from pyspark.sql import udf
# import pyspark.sql.functions as F
# udf_fun = F.udf (lambda..., Type())
#
# ## When you see F used in PySpark code, it's typically an alias for pyspark.sql.functions.
# - So, instead of typing out the full module name every time you want to use a function from it, you can just use F as a shorthand.
# ## create udf using python function
# brand_udf=udf(price_range,StringType())
#
# ### This might yeild an error - > Pyspark UDF - TypeError: 'module' object is not callable
# ### So to rectify this use : pyspark.sql.functions as F
#
# brand_udf= F.udf(price_range,StringType())
# ## from pyspark.sql.functions import concat
# df = df.withColumn('full_name', concat(df['first_name'], df['last_name']))
# ## But if you use the F alias, you can shorten it like this:
#
# ## from pyspark.sql.functions import F
# df = df.withColumn('full_name', F.concat(df['first_name'], df['last_name']))
#
# Create a normal python function
# create a price_range column
def price_range(brand):
if brand in ["Apple", "Samsung"]:
return "High Range"
elif brand == "MI":
return "Mid Range"
else:
return "Low Range"
import pyspark.sql.functions as F
# Create udf using 'price_range' function
brand_udf = F.udf(price_range, StringType())
# apply to spark dataframe
df.withColumn("Price_Range", brand_udf(df["Mobile"])).show(10, False)
# apply age group using Lambda Function
age_udf = F.udf(lambda age: "Young" if age <= 30 else "Older", StringType())
# apply age udf on df
df.withColumn("Age Group", age_udf(df["age"])).show(10, False)
# # pandas udf
#
from pyspark.sql.functions import pandas_udf, PandasUDFType
# Lets assume maximum users age to be 100
# calculate the remaining age of the user
def remaining_years(age):
years_left = 100 - age
return years_left
# create udf using python function
age_remaining = F.udf(remaining_years, IntegerType())
# apply to spark df
df.withColumn("Years Remaining", age_remaining(df["age"])).show(10, False)
# UDF using 2 Columns u
def product_review(rating, experience):
x = rating * experience
return x
# create udf using python function
prod_udf = F.udf(product_review, DoubleType())
# apply to DataFrame
df.withColumn("Product_Review", prod_udf(df["ratings"], df["experience"])).show(
10, False
)
df.count()
# Drop duplicates
new_df = df.dropDuplicates()
print(new_df.count())
# Drop prodreview and exper. column
new_df = new_df.drop("Product_Review", "experience")
new_df.show()
| false | 0 | 1,522 | 1 | 1,538 | 1,522 |
||
129443073
|
<jupyter_start><jupyter_text>Visual_Question_Answering
Thie is VQA v2 from the main website.
Kaggle dataset identifier: visual-question-answering
<jupyter_script># #### all imports
import tensorflow as tf
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.utils import load_img, img_to_array
from tqdm import tqdm
import re
import pickle
# ### initialize a generator for the validation images
# #### Note: change train2014 for the train images
# Define paths to dataset and output files
data_dir = "/kaggle/input/visual-question-answering/"
output_dir = "/kaggle/working/"
image_dir = os.path.join(data_dir, "test2015")
output_file = os.path.join(output_dir, "test_features.npy")
# Define a data generator to preprocess the images
target_size = (299, 299)
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
generator = datagen.flow_from_directory(
image_dir, target_size=target_size, batch_size=32, class_mode=None, shuffle=False
)
# #### load inception v3 and extract images features
# Create an Inception V3 model to extract image features
base_model = InceptionV3(weights="imagenet")
model = Model(inputs=base_model.input, outputs=base_model.get_layer("avg_pool").output)
# Extract image features for each image in the training set
train_features = []
for i in tqdm(range(len(generator))):
batch = generator.next()
features = model.predict_on_batch(batch)
train_features.append(features)
# Concatenate and reshape the extracted features into a numpy array
train_features = np.concatenate(train_features)
train_features = train_features.reshape((len(generator.filenames), -1))
# Save the extracted features to a numpy file
np.save(output_file, train_features)
# #### save features with IDs in a dictionary in a pkl file
# add ids to features
img_ids = np.array(
[
int(re.search("[0-9][0-9][0-9][0-9][0-9]+", gen).group())
for gen in generator.filenames
]
)
image_features = {}
for i in range(len(img_ids)):
image_features[img_ids[i]] = train_features[i]
# save dictionary to test_image_features.pkl file
with open("test_image_features.pkl", "wb") as fp:
pickle.dump(image_features, fp)
print("dictionary saved successfully to file")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/443/129443073.ipynb
|
visual-question-answering
|
hazemabbas
|
[{"Id": 129443073, "ScriptId": 38320234, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9186881, "CreationDate": "05/13/2023 21:25:09", "VersionNumber": 2.0, "Title": "VQA_Image_Inception", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 67.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 185496666, "KernelVersionId": 129443073, "SourceDatasetVersionId": 5611364}]
|
[{"Id": 5611364, "DatasetId": 3225723, "DatasourceVersionId": 5686507, "CreatorUserId": 9186881, "LicenseName": "Unknown", "CreationDate": "05/05/2023 16:40:50", "VersionNumber": 2.0, "Title": "Visual_Question_Answering", "Slug": "visual-question-answering", "Subtitle": NaN, "Description": "Thie is VQA v2 from the main website.", "VersionNotes": "Init version", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3225723, "CreatorUserId": 9186881, "OwnerUserId": 9186881.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5611364.0, "CurrentDatasourceVersionId": 5686507.0, "ForumId": 3290774, "Type": 2, "CreationDate": "05/05/2023 09:43:26", "LastActivityDate": "05/05/2023", "TotalViews": 108, "TotalDownloads": 6, "TotalVotes": 1, "TotalKernels": 7}]
|
[{"Id": 9186881, "UserName": "hazemabbas", "DisplayName": "hazem abbas", "RegisterDate": "12/16/2021", "PerformanceTier": 0}]
|
# #### all imports
import tensorflow as tf
import os
import numpy as np
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model
from keras.applications.inception_v3 import InceptionV3, preprocess_input
from keras.utils import load_img, img_to_array
from tqdm import tqdm
import re
import pickle
# ### initialize a generator for the validation images
# #### Note: change train2014 for the train images
# Define paths to dataset and output files
data_dir = "/kaggle/input/visual-question-answering/"
output_dir = "/kaggle/working/"
image_dir = os.path.join(data_dir, "test2015")
output_file = os.path.join(output_dir, "test_features.npy")
# Define a data generator to preprocess the images
target_size = (299, 299)
datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
generator = datagen.flow_from_directory(
image_dir, target_size=target_size, batch_size=32, class_mode=None, shuffle=False
)
# #### load inception v3 and extract images features
# Create an Inception V3 model to extract image features
base_model = InceptionV3(weights="imagenet")
model = Model(inputs=base_model.input, outputs=base_model.get_layer("avg_pool").output)
# Extract image features for each image in the training set
train_features = []
for i in tqdm(range(len(generator))):
batch = generator.next()
features = model.predict_on_batch(batch)
train_features.append(features)
# Concatenate and reshape the extracted features into a numpy array
train_features = np.concatenate(train_features)
train_features = train_features.reshape((len(generator.filenames), -1))
# Save the extracted features to a numpy file
np.save(output_file, train_features)
# #### save features with IDs in a dictionary in a pkl file
# add ids to features
img_ids = np.array(
[
int(re.search("[0-9][0-9][0-9][0-9][0-9]+", gen).group())
for gen in generator.filenames
]
)
image_features = {}
for i in range(len(img_ids)):
image_features[img_ids[i]] = train_features[i]
# save dictionary to test_image_features.pkl file
with open("test_image_features.pkl", "wb") as fp:
pickle.dump(image_features, fp)
print("dictionary saved successfully to file")
| false | 0 | 635 | 2 | 673 | 635 |
||
129819538
|
# ### Importing Libraries
from sklearn.datasets import make_classification
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# ### Dataset Generation
X, y = make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
n_clusters_per_class=1,
random_state=9,
hypercube=False,
class_sep=10,
)
X.shape
# ### Data Representation
plt.figure(figsize=(8, 5))
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
# ### Logistic Regression from Scratch
def sigmoid(z):
return 1 / (1 + np.exp(-z))
epochs = 2500
lr = 0.5
no_samples = X.shape[0]
def gradient_descent(X, y):
X = np.insert(X, 0, 1, axis=1)
weights = np.ones(X.shape[1])
for i in range(epochs):
y_pred = sigmoid(np.dot(X, weights)) # y_pred will be a matrix
weights = (
weights + lr * np.dot((y - y_pred), X) / no_samples
) # here we are using dot cause all are matrices
return weights[0], weights[1:]
intercept1, coefs1 = gradient_descent(X, y)
print(intercept1, " ", coefs1)
m1 = -(coefs1[0] / coefs1[1])
b1 = -(intercept1 / coefs1[1])
x1_input = np.linspace(-3, 3, 100)
y1_input = m1 * x1_input + b1
# ### Logistic Regression from Sklearn
classifier = LogisticRegression()
classifier.fit(X, y)
intercept2 = classifier.intercept_
coefs2 = classifier.coef_
print(intercept2, " ", coefs2)
m2 = -(coefs2[0][0] / coefs2[0][1])
b2 = -(intercept2 / coefs2[0][1])
x2_input = np.linspace(-3, 3, 100)
y2_input = m2 * x2_input + b1
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input, c="r") # logistic regression from scratch
plt.plot(x2_input, y2_input, c="g") # logistic regression from sklearn
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/819/129819538.ipynb
| null | null |
[{"Id": 129819538, "ScriptId": 38607979, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6819371, "CreationDate": "05/16/2023 17:38:51", "VersionNumber": 1.0, "Title": "logistic-regression-with-gradient-descent", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
# ### Importing Libraries
from sklearn.datasets import make_classification
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# ### Dataset Generation
X, y = make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
n_clusters_per_class=1,
random_state=9,
hypercube=False,
class_sep=10,
)
X.shape
# ### Data Representation
plt.figure(figsize=(8, 5))
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
# ### Logistic Regression from Scratch
def sigmoid(z):
return 1 / (1 + np.exp(-z))
epochs = 2500
lr = 0.5
no_samples = X.shape[0]
def gradient_descent(X, y):
X = np.insert(X, 0, 1, axis=1)
weights = np.ones(X.shape[1])
for i in range(epochs):
y_pred = sigmoid(np.dot(X, weights)) # y_pred will be a matrix
weights = (
weights + lr * np.dot((y - y_pred), X) / no_samples
) # here we are using dot cause all are matrices
return weights[0], weights[1:]
intercept1, coefs1 = gradient_descent(X, y)
print(intercept1, " ", coefs1)
m1 = -(coefs1[0] / coefs1[1])
b1 = -(intercept1 / coefs1[1])
x1_input = np.linspace(-3, 3, 100)
y1_input = m1 * x1_input + b1
# ### Logistic Regression from Sklearn
classifier = LogisticRegression()
classifier.fit(X, y)
intercept2 = classifier.intercept_
coefs2 = classifier.coef_
print(intercept2, " ", coefs2)
m2 = -(coefs2[0][0] / coefs2[0][1])
b2 = -(intercept2 / coefs2[0][1])
x2_input = np.linspace(-3, 3, 100)
y2_input = m2 * x2_input + b1
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input, c="r") # logistic regression from scratch
plt.plot(x2_input, y2_input, c="g") # logistic regression from sklearn
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
| false | 0 | 682 | 3 | 682 | 682 |
||
129819769
|
<jupyter_start><jupyter_text>Marijuana Arrests in Toronto: Racial Disparities
```
Data on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.
```
| Column | Description |
| --- | --- |
| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.
|
| colour | The arrestee's race; a factor with levels: Black; White. |
| year | 1997 through 2002; a numeric vector. |
| age | in years; a numeric vector. |
| sex | a factor with levels: Female; Male. |
| employed | a factor with levels: No; Yes. |
| citizen | a factor with levels: No; Yes. |
| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. – 6 in all) on which the arrestee's name appeared; a numeric vector |
# Source
Personal communication from Michael Friendly, York University.
Kaggle dataset identifier: arrests-for-marijuana-possession
<jupyter_script>import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
from sklearn.linear_model import LinearRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Dataset 1: University Students Complaints & Reports
# Technology is making life much easier, and its fast and unstoppable development made the chances and the ideas unlimited, but we still face new kinds of problems that come to the surface every now and then. students' problems is an example. Internet access was limited to a few students 20 years ago but now everyone can access a huge amount of information on the internet. still, though, today's students are facing difficulties regarding their research. besides some other difficulties in their studying journey. in the following cells, we will represent some of these problems and visualize them to get facts out of the visualization.
df = pd.read_csv(
"/kaggle/input/university-students-complaints-and-reports/Datasetprojpowerbi.csv"
)
df
# ### Academic Support and Resources
# In the following paragraph, we will try to figure out if academic research difficulties are related to researching experience by representing the number of students who face this problem alongside the academic year they are currently in.
research_problem = df[df.Genre == "Academic Support and Resources"]
by_year = research_problem.groupby("Year", as_index=False)
research_by_year = by_year.count()
barplot = sns.barplot(x="Year", y="Reports", data=research_by_year)
research_by_year["Year"].unique()
# ### Conclusion: Academic Support and Resources:
# After examining the graph, we can observe that the majority of reports originate from students who are still in their first year. The number of reports significantly decreases for second-year students, and there are no reports from students who have progressed beyond their second year.
# ### Athletics and sports
# In the following paragraph, we will attempt to determine the causes behind the high volume of reports concerning the university's athletic facilities.
sport_problem = df[df.Genre == "Athletics and sports"]
by_gender = sport_problem.groupby("Gender", as_index=False)
research_by_gender = by_gender.count()
barplot = sns.barplot(x="Gender", y="Reports", data=research_by_gender)
# From the first graph, we can observe that the number of reports coming from females is higher than the number of reports coming from males. This suggests that females may be experiencing mistreatment based on their gender, as indicated by some of the reports.
by_age = sport_problem.groupby("Age", as_index=False)
research_by_age = by_age.count()
barplot = sns.barplot(x="Age", y="Reports", data=research_by_age)
#
# From the second graph, we can observe that there is no discernible pattern between the number of reports and students' age. Based on this, we can conclude that there is no need to be concerned about making additional efforts in the sports facilities specifically targeting young or old students.
# ### Conclusion: Athletics and sports:
# After examining the graphs and reading some reports, we can observe that the main problem students face in the sports facility is related to how they are treated and the lack of general support. Additionally, there are some opinions regarding the equipment in the facility.
# ## Dataset 2: Arrests for Marijuana Possession
# While marijuana consumption is becoming legalized in several countries, the number of people who consume marijuana and weed is rising worldwide. Some claim it is harmless, while others argue the opposite. In the following graphs, we will attempt to understand the relationship between weed addiction and various factors such as color, gender, age, and more.
df2 = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv")
df2
# ### Arrestions over years
# In the following graph, we will represent the number of arrests that have occurred over the years in an attempt to understand whether the number of marijuana consumers is truly increasing or not.
arrests_by_year = df2.groupby("year").size()
plt.figure(figsize=(10, 6))
plt.plot(arrests_by_year.index, arrests_by_year.values, marker="o")
plt.xlabel("Year")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests per Year")
plt.show()
# ### Conclusion: Arrestions over years
# From the graph above, we can notice that the number of arrests from 1997 to 2001 was increasing. However, starting in 2002, the number of arrests dramatically went down. This suggests that it is not necessary that the number of marijuana consumers is increasing.
# ### Age, Gender, and Color: How Effective Are These Factors?
# In the following graphs, we will study the effect of certain factors and how influential they are in affecting an individual's likelihood of becoming addicted to marijuana."
arrests_by_age = df2.groupby("age").size()
plt.figure(figsize=(10, 6))
plt.plot(arrests_by_age.index, arrests_by_age.values, marker="o")
plt.xlabel("Age")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests per Age")
plt.show()
arrests_by_gender = df2.groupby("sex").size()
plt.figure(figsize=(6, 6))
plt.bar(arrests_by_gender.index, arrests_by_gender.values)
plt.xlabel("Gender")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests by Gender")
plt.show()
arrests_by_color = df2.groupby("colour").size()
plt.figure(figsize=(10, 6))
plt.bar(arrests_by_color.index, arrests_by_color.values)
plt.xlabel("Color")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests by Color")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/819/129819769.ipynb
|
arrests-for-marijuana-possession
|
utkarshx27
|
[{"Id": 129819769, "ScriptId": 38602719, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15000594, "CreationDate": "05/16/2023 17:41:21", "VersionNumber": 1.0, "Title": "problems of today's students", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 112.0, "LinesInsertedFromPrevious": 112.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186196227, "KernelVersionId": 129819769, "SourceDatasetVersionId": 5631796}, {"Id": 186196228, "KernelVersionId": 129819769, "SourceDatasetVersionId": 5672268}]
|
[{"Id": 5631796, "DatasetId": 3238325, "DatasourceVersionId": 5707058, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 10:17:21", "VersionNumber": 1.0, "Title": "Marijuana Arrests in Toronto: Racial Disparities", "Slug": "arrests-for-marijuana-possession", "Subtitle": "Marijuana Arrests in Toronto: Race, Release, and Policing (1997-2002)", "Description": "``` \nData on police treatment of individuals arrested in Toronto for simple possession of small quantities of marijuana. The data are part of a larger data set featured in a series of articles in the Toronto Star newspaper. A data frame with 5226 observations on the following 8 variables.\n```\n| Column | Description |\n| --- | --- |\n| released | Whether or not the arrestee was released with a summons; a factor with levels: No; Yes.\n |\n| colour | The arrestee's race; a factor with levels: Black; White. |\n| year | 1997 through 2002; a numeric vector. |\n| age | in years; a numeric vector. |\n| sex | a factor with levels: Female; Male. |\n| employed | a factor with levels: No; Yes. |\n| citizen | a factor with levels: No; Yes. |\n| checks | Number of police data bases (of previous arrests, previous convictions, parole status, etc. \u2013 6 in all) on which the arrestee's name appeared; a numeric vector |\n\n# Source\nPersonal communication from Michael Friendly, York University.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3238325, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5631796.0, "CurrentDatasourceVersionId": 5707058.0, "ForumId": 3303517, "Type": 2, "CreationDate": "05/08/2023 10:17:21", "LastActivityDate": "05/08/2023", "TotalViews": 8788, "TotalDownloads": 1614, "TotalVotes": 49, "TotalKernels": 14}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import matplotlib.pyplot as plt
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
from sklearn.linear_model import LinearRegression
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Dataset 1: University Students Complaints & Reports
# Technology is making life much easier, and its fast and unstoppable development made the chances and the ideas unlimited, but we still face new kinds of problems that come to the surface every now and then. students' problems is an example. Internet access was limited to a few students 20 years ago but now everyone can access a huge amount of information on the internet. still, though, today's students are facing difficulties regarding their research. besides some other difficulties in their studying journey. in the following cells, we will represent some of these problems and visualize them to get facts out of the visualization.
df = pd.read_csv(
"/kaggle/input/university-students-complaints-and-reports/Datasetprojpowerbi.csv"
)
df
# ### Academic Support and Resources
# In the following paragraph, we will try to figure out if academic research difficulties are related to researching experience by representing the number of students who face this problem alongside the academic year they are currently in.
research_problem = df[df.Genre == "Academic Support and Resources"]
by_year = research_problem.groupby("Year", as_index=False)
research_by_year = by_year.count()
barplot = sns.barplot(x="Year", y="Reports", data=research_by_year)
research_by_year["Year"].unique()
# ### Conclusion: Academic Support and Resources:
# After examining the graph, we can observe that the majority of reports originate from students who are still in their first year. The number of reports significantly decreases for second-year students, and there are no reports from students who have progressed beyond their second year.
# ### Athletics and sports
# In the following paragraph, we will attempt to determine the causes behind the high volume of reports concerning the university's athletic facilities.
sport_problem = df[df.Genre == "Athletics and sports"]
by_gender = sport_problem.groupby("Gender", as_index=False)
research_by_gender = by_gender.count()
barplot = sns.barplot(x="Gender", y="Reports", data=research_by_gender)
# From the first graph, we can observe that the number of reports coming from females is higher than the number of reports coming from males. This suggests that females may be experiencing mistreatment based on their gender, as indicated by some of the reports.
by_age = sport_problem.groupby("Age", as_index=False)
research_by_age = by_age.count()
barplot = sns.barplot(x="Age", y="Reports", data=research_by_age)
#
# From the second graph, we can observe that there is no discernible pattern between the number of reports and students' age. Based on this, we can conclude that there is no need to be concerned about making additional efforts in the sports facilities specifically targeting young or old students.
# ### Conclusion: Athletics and sports:
# After examining the graphs and reading some reports, we can observe that the main problem students face in the sports facility is related to how they are treated and the lack of general support. Additionally, there are some opinions regarding the equipment in the facility.
# ## Dataset 2: Arrests for Marijuana Possession
# While marijuana consumption is becoming legalized in several countries, the number of people who consume marijuana and weed is rising worldwide. Some claim it is harmless, while others argue the opposite. In the following graphs, we will attempt to understand the relationship between weed addiction and various factors such as color, gender, age, and more.
df2 = pd.read_csv("/kaggle/input/arrests-for-marijuana-possession/Arrests.csv")
df2
# ### Arrestions over years
# In the following graph, we will represent the number of arrests that have occurred over the years in an attempt to understand whether the number of marijuana consumers is truly increasing or not.
arrests_by_year = df2.groupby("year").size()
plt.figure(figsize=(10, 6))
plt.plot(arrests_by_year.index, arrests_by_year.values, marker="o")
plt.xlabel("Year")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests per Year")
plt.show()
# ### Conclusion: Arrestions over years
# From the graph above, we can notice that the number of arrests from 1997 to 2001 was increasing. However, starting in 2002, the number of arrests dramatically went down. This suggests that it is not necessary that the number of marijuana consumers is increasing.
# ### Age, Gender, and Color: How Effective Are These Factors?
# In the following graphs, we will study the effect of certain factors and how influential they are in affecting an individual's likelihood of becoming addicted to marijuana."
arrests_by_age = df2.groupby("age").size()
plt.figure(figsize=(10, 6))
plt.plot(arrests_by_age.index, arrests_by_age.values, marker="o")
plt.xlabel("Age")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests per Age")
plt.show()
arrests_by_gender = df2.groupby("sex").size()
plt.figure(figsize=(6, 6))
plt.bar(arrests_by_gender.index, arrests_by_gender.values)
plt.xlabel("Gender")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests by Gender")
plt.show()
arrests_by_color = df2.groupby("colour").size()
plt.figure(figsize=(10, 6))
plt.bar(arrests_by_color.index, arrests_by_color.values)
plt.xlabel("Color")
plt.ylabel("Number of Arrests")
plt.title("Number of Arrests by Color")
plt.show()
| false | 2 | 1,665 | 1 | 1,989 | 1,665 |
||
129819111
|
<jupyter_start><jupyter_text>BanFakeNews
## BanFakeNews: A Dataset for Detecting Fake News in Bangla
This work is accepted at LREC 2020. Paper is available at https://arxiv.org/pdf/2004.08789.pdf
### Abstract
Observing the damages that can be done by the rapid propagation of fake news in various sectors like politics and finance, automatic identification of fake news using linguistic analysis has drawn the attention of the research community. However, such methods are largely being developed for English where low resource languages remain out of the focus. But the risks spawned by fake and manipulative news are not confined by languages. In this work, we propose an annotated dataset of ~50K news that can be used for building automated fake news detection systems for a low resource language like Bangla. Additionally, we provide an analysis of the dataset and develop a benchmark system with state of the art NLP techniques to identify Bangla fake news. To create this system, we explore traditional linguistic features and neural network based methods. We expect this dataset will be a valuable resource for building technologies to prevent the spreading of fake news and contribute in research with low resource languages.
#### List of files
* Authentic-48K.csv
* Fake-1K.csv
* LabeledAuthentic-7K.csv
* LabeledFake-1K.csv
**File Format**
Authentic-48K.csv and Fake-1K.csv
| Column Title | Description |
| ------------- |------------- |
| articleID | ID of the news |
| domain | News publisher's site name |
| date | Category of the news|
| category | Category of the news|
| headline | Headline of the news|
| content | Article or body of the news|
| label | 1 or 0 . '1' for authentic '0' for fake|
LabeledAuthentic-7K.csv, LabeledFake-1K.csv
|Column Title |Description |
|------------- |------------- |
| articleID | ID of the news |
| domain | News publisher's site name |
| date | Published Date |
| category | Category of the news |
| source | Source of the news. (One who can verify the claim of the news) |
| relation | Related or Unrelated. Related if headline matches with content's claim otherwise it is labeled as Unrelated |
| headline | Headline of the news |
| content | Article or body of the news |
| label | 1 or 0 . '1' for authentic '0' for fake |
| F-type | Type of fake news (Clickbait, Satire, Fake(Misleading or False Context))
**F-type** is only present in LabeledFake-1K.csv
### Bibtex for citation
```
@InProceedings{Hossain20.1084,
author = {Md Zobaer Hossain, Md Ashraful Rahman, Md Saiful Islam, Sudipta Kar},
title = "{BanFakeNews: A Dataset for Detecting Fake News in Bangla}",
booktitle = {Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020)},
year = {2020},
publisher = {European Language Resources Association (ELRA)},
language = {english}
}
```
Kaggle dataset identifier: banfakenews
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Imports
import pandas as pd
import matplotlib.pyplot as plt
import cufflinks as cf
import plotly
import plotly.express as px
import seaborn as sns
import re
import string
from nltk.stem.porter import PorterStemmer
from bnlp.corpus import stopwords
from bnlp.corpus.util import remove_stopwords
from wordcloud import WordCloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.ensemble import (
RandomForestClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.linear_model import PassiveAggressiveClassifier
from datetime import datetime
from sklearn.feature_extraction.text import CountVectorizer
from pandas import DataFrame
from collections import OrderedDict
from colorama import Fore, Back, Style
y_ = Fore.YELLOW
r_ = Fore.RED
g_ = Fore.GREEN
b_ = Fore.BLUE
m_ = Fore.MAGENTA
sr_ = Style.RESET_ALL
# # Reading the csv file
true = pd.read_csv("../input/banfakenews/LabeledAuthentic-7K.csv")
false = pd.read_csv("../input/banfakenews/LabeledFake-1K.csv")
true.head()
true["F-type"] = "unknown"
true.head()
false.head()
df = pd.concat([true, false])
df.isna().sum()
df.headline.count()
df.category.value_counts()
df["content"] = (
df["domain"]
+ df["headline"]
+ df["category"]
+ df["date"]
+ df["source"]
+ df["relation"]
+ df["F-type"]
)
del df["domain"]
del df["headline"]
del df["date"]
del df["source"]
del df["category"]
del df["relation"]
del df["F-type"]
df.head()
# # Optimization
from bnlp.corpus import stopwords, punctuations, letters, digits
stemmer = PorterStemmer()
def stem_text(text):
final_text = []
for i in text.split():
if i.strip().lower() not in stopwords:
word = stemmer.stem(i.strip())
final_text.append(word)
return " ".join(final_text)
df.content = df.content.apply(stem_text)
df.head()
unique = list()
def unique_text(text):
final_text = []
for i in text.split():
if i.strip().lower() not in unique:
word = i.strip()
final_text.append(word)
return " ".join(final_text)
df["unique_content"] = df.content.apply(unique_text)
df.head()
from bnlp import BasicTokenizer
def tokenizer(txt):
tokens = BasicTokenizer().tokenize(txt)
return tokens
df["content_tokenized"] = df["content"].apply(lambda x: tokenizer(x))
df.head()
from bnlp.corpus import stopwords, punctuations, letters, digits
print(stopwords)
print(punctuations)
print(letters)
print(digits)
from bnlp.corpus.util import remove_stopwords
def remove_stopword(txt):
clean_words = remove_stopwords(txt, stopwords)
return clean_words
df["content_without_stopwords"] = df["unique_content"].apply(
lambda x: remove_stopword(x)
)
df.head()
from bnlp.corpus.util import remove_stopwords
def remove_punctuation(txt):
clean_words = remove_stopwords(txt, punctuations)
return clean_words
df["content_without_stopwords"] = df["unique_content"].apply(
lambda x: remove_punctuation(x)
)
df.head()
# # WordCloud of articles
plt.figure(figsize=(20, 20))
wc = WordCloud(max_words=3000, width=1600, height=800, stopwords=stopwords).generate(
" ".join(df.content)
)
plt.imshow(wc, interpolation="bilinear")
# # Training and Testing
y = df.label.values
x = df.content.values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30)
tfidf_vect = TfidfVectorizer()
tfidf_train = tfidf_vect.fit_transform(x_train)
tfidf_test = tfidf_vect.transform(x_test)
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vect.get_feature_names())
Adab = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=10), n_estimators=5, random_state=1
)
Adab.fit(tfidf_train, y_train)
y_pred3 = Adab.predict(tfidf_test)
ABscore = metrics.accuracy_score(y_test, y_pred3)
print("accuracy: %0.3f" % ABscore)
cm = metrics.confusion_matrix(y_test, y_pred3)
print(cm)
Rando = RandomForestClassifier(n_estimators=100, random_state=0)
Rando.fit(tfidf_train, y_train)
y_pred1 = Rando.predict(tfidf_test)
RFscore = metrics.accuracy_score(y_test, y_pred1)
print("accuracy: %0.3f" % RFscore)
cm = metrics.confusion_matrix(y_test, y_pred1)
print(cm)
clf = MultinomialNB()
clf.fit(tfidf_train, y_train)
pred = clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
linear_clf = PassiveAggressiveClassifier()
linear_clf.fit(tfidf_train, y_train)
pred = linear_clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
dt = DecisionTreeClassifier()
dt.fit(tfidf_train, y_train)
pred = dt.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
# # Model Testing****
import ktrain
from ktrain import text
MODEL_NAME = "sagorsarker/bangla-bert-base"
t = text.Transformer(MODEL_NAME, maxlen=500)
list_content = df["content"].tolist()
list_label = df["label"].astype(int).tolist()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
list_content, list_label, test_size=0.20, random_state=42, shuffle=True
)
trn = t.preprocess_train(X_train, y_train)
val = t.preprocess_test(X_test, y_test)
model = t.get_classifier()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=6)
learner.lr_find(show_plot=True, max_epochs=1)
learner.fit_onecycle(8e-5, 5)
learner.validate()
learner.validate(class_names=t.get_classes())
learner.model.summary()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/819/129819111.ipynb
|
banfakenews
|
cryptexcode
|
[{"Id": 129819111, "ScriptId": 16087561, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6683493, "CreationDate": "05/16/2023 17:34:39", "VersionNumber": 1.0, "Title": "Bangla Fake News Detection uSING ML classifiers", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 191.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 69.0, "LinesInsertedFromFork": 191.0, "LinesDeletedFromFork": 234.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 69.0, "TotalVotes": 0}]
|
[{"Id": 186195075, "KernelVersionId": 129819111, "SourceDatasetVersionId": 2009678}]
|
[{"Id": 2009678, "DatasetId": 618408, "DatasourceVersionId": 2049185, "CreatorUserId": 2324832, "LicenseName": "CC0: Public Domain", "CreationDate": "03/10/2021 05:41:02", "VersionNumber": 2.0, "Title": "BanFakeNews", "Slug": "banfakenews", "Subtitle": "A Dataset for Detecting Fake News in Bangla", "Description": "## BanFakeNews: A Dataset for Detecting Fake News in Bangla \n\nThis work is accepted at LREC 2020. Paper is available at https://arxiv.org/pdf/2004.08789.pdf\n\n\n### Abstract\nObserving the damages that can be done by the rapid propagation of fake news in various sectors like politics and finance, automatic identification of fake news using linguistic analysis has drawn the attention of the research community. However, such methods are largely being developed for English where low resource languages remain out of the focus. But the risks spawned by fake and manipulative news are not confined by languages. In this work, we propose an annotated dataset of ~50K news that can be used for building automated fake news detection systems for a low resource language like Bangla. Additionally, we provide an analysis of the dataset and develop a benchmark system with state of the art NLP techniques to identify Bangla fake news. To create this system, we explore traditional linguistic features and neural network based methods. We expect this dataset will be a valuable resource for building technologies to prevent the spreading of fake news and contribute in research with low resource languages.\n\n\n#### List of files\n* Authentic-48K.csv\n* Fake-1K.csv\n* LabeledAuthentic-7K.csv\n* LabeledFake-1K.csv\n\n**File Format**\nAuthentic-48K.csv and Fake-1K.csv\n\n| Column Title | Description |\n| ------------- |------------- |\n| articleID | ID of the news |\n| domain | News publisher's site name |\n| date | Category of the news|\n| category | Category of the news|\n| headline | Headline of the news|\n| content | Article or body of the news|\n| label | 1 or 0 . '1' for authentic '0' for fake|\n\nLabeledAuthentic-7K.csv, LabeledFake-1K.csv\n\n|Column Title |Description |\n|------------- |------------- |\n| articleID | ID of the news |\n| domain | News publisher's site name |\n| date | Published Date |\n| category | Category of the news |\n| source | Source of the news. (One who can verify the claim of the news) |\n| relation | Related or Unrelated. Related if headline matches with content's claim otherwise it is labeled as Unrelated |\n| headline | Headline of the news |\n| content | Article or body of the news |\n| label | 1 or 0 . '1' for authentic '0' for fake |\n| F-type | Type of fake news (Clickbait, Satire, Fake(Misleading or False Context))\n\n**F-type** is only present in LabeledFake-1K.csv\n\n### Bibtex for citation\n```\n@InProceedings{Hossain20.1084,\n author = {Md Zobaer Hossain, Md Ashraful Rahman, Md Saiful Islam, Sudipta Kar},\n title = \"{BanFakeNews: A Dataset for Detecting Fake News in Bangla}\",\n booktitle = {Proceedings of the Twelfth International Conference on Language Resources and Evaluation (LREC 2020)},\n year = {2020},\n publisher = {European Language Resources Association (ELRA)},\nlanguage = {english}\n}\n```", "VersionNotes": "Bug fix on LabeledAuthentic-7K.csv file", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 618408, "CreatorUserId": 192294, "OwnerUserId": 192294.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2009678.0, "CurrentDatasourceVersionId": 2049185.0, "ForumId": 632527, "Type": 2, "CreationDate": "04/24/2020 04:54:35", "LastActivityDate": "04/24/2020", "TotalViews": 18690, "TotalDownloads": 2365, "TotalVotes": 52, "TotalKernels": 3}]
|
[{"Id": 192294, "UserName": "cryptexcode", "DisplayName": "Sudipta Kar", "RegisterDate": "04/30/2014", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Imports
import pandas as pd
import matplotlib.pyplot as plt
import cufflinks as cf
import plotly
import plotly.express as px
import seaborn as sns
import re
import string
from nltk.stem.porter import PorterStemmer
from bnlp.corpus import stopwords
from bnlp.corpus.util import remove_stopwords
from wordcloud import WordCloud
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.ensemble import (
RandomForestClassifier,
AdaBoostClassifier,
GradientBoostingClassifier,
)
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.linear_model import PassiveAggressiveClassifier
from datetime import datetime
from sklearn.feature_extraction.text import CountVectorizer
from pandas import DataFrame
from collections import OrderedDict
from colorama import Fore, Back, Style
y_ = Fore.YELLOW
r_ = Fore.RED
g_ = Fore.GREEN
b_ = Fore.BLUE
m_ = Fore.MAGENTA
sr_ = Style.RESET_ALL
# # Reading the csv file
true = pd.read_csv("../input/banfakenews/LabeledAuthentic-7K.csv")
false = pd.read_csv("../input/banfakenews/LabeledFake-1K.csv")
true.head()
true["F-type"] = "unknown"
true.head()
false.head()
df = pd.concat([true, false])
df.isna().sum()
df.headline.count()
df.category.value_counts()
df["content"] = (
df["domain"]
+ df["headline"]
+ df["category"]
+ df["date"]
+ df["source"]
+ df["relation"]
+ df["F-type"]
)
del df["domain"]
del df["headline"]
del df["date"]
del df["source"]
del df["category"]
del df["relation"]
del df["F-type"]
df.head()
# # Optimization
from bnlp.corpus import stopwords, punctuations, letters, digits
stemmer = PorterStemmer()
def stem_text(text):
final_text = []
for i in text.split():
if i.strip().lower() not in stopwords:
word = stemmer.stem(i.strip())
final_text.append(word)
return " ".join(final_text)
df.content = df.content.apply(stem_text)
df.head()
unique = list()
def unique_text(text):
final_text = []
for i in text.split():
if i.strip().lower() not in unique:
word = i.strip()
final_text.append(word)
return " ".join(final_text)
df["unique_content"] = df.content.apply(unique_text)
df.head()
from bnlp import BasicTokenizer
def tokenizer(txt):
tokens = BasicTokenizer().tokenize(txt)
return tokens
df["content_tokenized"] = df["content"].apply(lambda x: tokenizer(x))
df.head()
from bnlp.corpus import stopwords, punctuations, letters, digits
print(stopwords)
print(punctuations)
print(letters)
print(digits)
from bnlp.corpus.util import remove_stopwords
def remove_stopword(txt):
clean_words = remove_stopwords(txt, stopwords)
return clean_words
df["content_without_stopwords"] = df["unique_content"].apply(
lambda x: remove_stopword(x)
)
df.head()
from bnlp.corpus.util import remove_stopwords
def remove_punctuation(txt):
clean_words = remove_stopwords(txt, punctuations)
return clean_words
df["content_without_stopwords"] = df["unique_content"].apply(
lambda x: remove_punctuation(x)
)
df.head()
# # WordCloud of articles
plt.figure(figsize=(20, 20))
wc = WordCloud(max_words=3000, width=1600, height=800, stopwords=stopwords).generate(
" ".join(df.content)
)
plt.imshow(wc, interpolation="bilinear")
# # Training and Testing
y = df.label.values
x = df.content.values
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.30)
tfidf_vect = TfidfVectorizer()
tfidf_train = tfidf_vect.fit_transform(x_train)
tfidf_test = tfidf_vect.transform(x_test)
tfidf_df = pd.DataFrame(tfidf_train.A, columns=tfidf_vect.get_feature_names())
Adab = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=10), n_estimators=5, random_state=1
)
Adab.fit(tfidf_train, y_train)
y_pred3 = Adab.predict(tfidf_test)
ABscore = metrics.accuracy_score(y_test, y_pred3)
print("accuracy: %0.3f" % ABscore)
cm = metrics.confusion_matrix(y_test, y_pred3)
print(cm)
Rando = RandomForestClassifier(n_estimators=100, random_state=0)
Rando.fit(tfidf_train, y_train)
y_pred1 = Rando.predict(tfidf_test)
RFscore = metrics.accuracy_score(y_test, y_pred1)
print("accuracy: %0.3f" % RFscore)
cm = metrics.confusion_matrix(y_test, y_pred1)
print(cm)
clf = MultinomialNB()
clf.fit(tfidf_train, y_train)
pred = clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
linear_clf = PassiveAggressiveClassifier()
linear_clf.fit(tfidf_train, y_train)
pred = linear_clf.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
dt = DecisionTreeClassifier()
dt.fit(tfidf_train, y_train)
pred = dt.predict(tfidf_test)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
cm = metrics.confusion_matrix(y_test, pred)
print(cm)
# # Model Testing****
import ktrain
from ktrain import text
MODEL_NAME = "sagorsarker/bangla-bert-base"
t = text.Transformer(MODEL_NAME, maxlen=500)
list_content = df["content"].tolist()
list_label = df["label"].astype(int).tolist()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
list_content, list_label, test_size=0.20, random_state=42, shuffle=True
)
trn = t.preprocess_train(X_train, y_train)
val = t.preprocess_test(X_test, y_test)
model = t.get_classifier()
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
learner = ktrain.get_learner(model, train_data=trn, val_data=val, batch_size=6)
learner.lr_find(show_plot=True, max_epochs=1)
learner.fit_onecycle(8e-5, 5)
learner.validate()
learner.validate(class_names=t.get_classes())
learner.model.summary()
| false | 2 | 2,171 | 0 | 2,961 | 2,171 |
||
129819678
|
# ### Importing Libraries
from sklearn.datasets import make_classification # generate classification datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# ### Dataset Generation
X, y = make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
n_clusters_per_class=1,
random_state=41,
hypercube=False,
class_sep=10,
)
# hypercube - if True then the clusters are kept at the vertices of a hypercube(geometrical shape), if False the clusters are put on the vertices of a random polytope.
# class_sep - greater values spread out the clusters
X.shape
# ### Data Representation
plt.figure(figsize=(8, 5))
plt.scatter(X[:, 0], X[:, 1], c=y, s=100) # c - colours, s - marker size
plt.show()
# ### Step function
epochs = 1000
lr = 0.1 # learning rate
def step(z):
if z > 0:
return 1
return 0
def perceptron_step(X, y):
X = np.insert(X, 0, 1, axis=1) # Adding a col of 1s to X
weights = np.ones(X.shape[1])
for i in range(epochs):
j = np.random.randint(0, 100)
y_pred = step(np.dot(X[j], weights))
weights = weights + (lr * (y[j] - y_pred)) * X[j]
return weights[0], weights[1:]
intercept1, coefs1 = perceptron_step(X, y)
print(intercept1, " ", coefs1)
# using line equations ax + by + c = 0, m = -a/b b = -c/b
# line using step function
m1 = -(coefs1[0] / coefs1[1])
b1 = -(intercept1 / coefs1[1])
# (x, y) values for plotting the line
x1_input = np.linspace(-3, 3, 100)
y1_input = (m1 * x1_input) + b1
# ### Sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def perceptron_sigmoid(X, y):
X = np.insert(X, 0, 1, axis=1)
weights = np.ones(X.shape[1])
for i in range(epochs):
j = np.random.randint(0, 100)
y_pred = sigmoid(np.dot(X[j], weights))
weights = weights + (lr * (y[j] - y_pred)) * X[j]
return weights[0], weights[1:]
intercept2, coefs2 = perceptron_sigmoid(X, y)
print(intercept2, " ", coefs2)
# line using sigmoid function
m2 = -(coefs2[0] / coefs2[1])
b2 = -(intercept2 / coefs2[1])
x2_input = np.linspace(-3, 3, 100)
y2_input = (m2 * x2_input) + b2
# ### Logistic Regression model
classifier = LogisticRegression()
classifier.fit(X, y)
intercept3 = classifier.intercept_
coefs3 = classifier.coef_
print(intercept3, " ", coefs3)
# line using logistic regression
m3 = -(coefs3[0][0] / coefs3[0][1])
b3 = -(intercept3 / coefs3[0][1])
x3_input = np.linspace(-3, 3, 100)
y3_input = (m2 * x3_input) + b2
# ### Plotting lines
# Sigmoid function and Logistic Regression line are overlapping
# blue line - step function
# red line - sigmoid function
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input)
plt.plot(x2_input, y2_input, c="r")
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
# blue line - step function
# green line - logistic regression
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input)
plt.plot(x3_input, y3_input, c="g")
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/819/129819678.ipynb
| null | null |
[{"Id": 129819678, "ScriptId": 38508762, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6819371, "CreationDate": "05/16/2023 17:40:18", "VersionNumber": 4.0, "Title": "step-v/s-sigmoid-function-for-classification", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 115.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Importing Libraries
from sklearn.datasets import make_classification # generate classification datasets
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
# ### Dataset Generation
X, y = make_classification(
n_samples=100,
n_features=2,
n_informative=2,
n_redundant=0,
n_classes=2,
n_clusters_per_class=1,
random_state=41,
hypercube=False,
class_sep=10,
)
# hypercube - if True then the clusters are kept at the vertices of a hypercube(geometrical shape), if False the clusters are put on the vertices of a random polytope.
# class_sep - greater values spread out the clusters
X.shape
# ### Data Representation
plt.figure(figsize=(8, 5))
plt.scatter(X[:, 0], X[:, 1], c=y, s=100) # c - colours, s - marker size
plt.show()
# ### Step function
epochs = 1000
lr = 0.1 # learning rate
def step(z):
if z > 0:
return 1
return 0
def perceptron_step(X, y):
X = np.insert(X, 0, 1, axis=1) # Adding a col of 1s to X
weights = np.ones(X.shape[1])
for i in range(epochs):
j = np.random.randint(0, 100)
y_pred = step(np.dot(X[j], weights))
weights = weights + (lr * (y[j] - y_pred)) * X[j]
return weights[0], weights[1:]
intercept1, coefs1 = perceptron_step(X, y)
print(intercept1, " ", coefs1)
# using line equations ax + by + c = 0, m = -a/b b = -c/b
# line using step function
m1 = -(coefs1[0] / coefs1[1])
b1 = -(intercept1 / coefs1[1])
# (x, y) values for plotting the line
x1_input = np.linspace(-3, 3, 100)
y1_input = (m1 * x1_input) + b1
# ### Sigmoid function
def sigmoid(z):
return 1 / (1 + np.exp(-z))
def perceptron_sigmoid(X, y):
X = np.insert(X, 0, 1, axis=1)
weights = np.ones(X.shape[1])
for i in range(epochs):
j = np.random.randint(0, 100)
y_pred = sigmoid(np.dot(X[j], weights))
weights = weights + (lr * (y[j] - y_pred)) * X[j]
return weights[0], weights[1:]
intercept2, coefs2 = perceptron_sigmoid(X, y)
print(intercept2, " ", coefs2)
# line using sigmoid function
m2 = -(coefs2[0] / coefs2[1])
b2 = -(intercept2 / coefs2[1])
x2_input = np.linspace(-3, 3, 100)
y2_input = (m2 * x2_input) + b2
# ### Logistic Regression model
classifier = LogisticRegression()
classifier.fit(X, y)
intercept3 = classifier.intercept_
coefs3 = classifier.coef_
print(intercept3, " ", coefs3)
# line using logistic regression
m3 = -(coefs3[0][0] / coefs3[0][1])
b3 = -(intercept3 / coefs3[0][1])
x3_input = np.linspace(-3, 3, 100)
y3_input = (m2 * x3_input) + b2
# ### Plotting lines
# Sigmoid function and Logistic Regression line are overlapping
# blue line - step function
# red line - sigmoid function
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input)
plt.plot(x2_input, y2_input, c="r")
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
# blue line - step function
# green line - logistic regression
plt.figure(figsize=(8, 5))
plt.plot(x1_input, y1_input)
plt.plot(x3_input, y3_input, c="g")
plt.scatter(X[:, 0], X[:, 1], c=y, s=100)
plt.show()
| false | 0 | 1,168 | 0 | 1,168 | 1,168 |
||
129434003
|
<jupyter_start><jupyter_text>Water Quality
# Context
`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`
# Content
The water_potability.csv file contains water quality metrics for 3276 different water bodies.
### 1. pH value:
```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ```
### 2. Hardness:
```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.
Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```
### 3. Solids (Total dissolved solids - TDS):
```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```
### 4. Chloramines:
```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```
### 5. Sulfate:
```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```
### 6. Conductivity:
```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ```
### 7. Organic_carbon:
```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```
### 8. Trihalomethanes:
```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```
### 9. Turbidity:
```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```
### 10. Potability:
```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```
Kaggle dataset identifier: water-potability
<jupyter_code>import pandas as pd
df = pd.read_csv('water-potability/water_potability.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<jupyter_text>Examples:
{
"ph": NaN,
"Hardness": 204.8904554713,
"Solids": 20791.318980747,
"Chloramines": 7.3002118732,
"Sulfate": 368.5164413498,
"Conductivity": 564.3086541722,
"Organic_carbon": 10.379783078100001,
"Trihalomethanes": 86.9909704615,
"Turbidity": 2.9631353806,
"Potability": 0.0
}
{
"ph": 3.7160800754,
"Hardness": 129.4229205149,
"Solids": 18630.0578579703,
"Chloramines": 6.6352458839,
"Sulfate": NaN,
"Conductivity": 592.8853591349,
"Organic_carbon": 15.1800131164,
"Trihalomethanes": 56.3290762845,
"Turbidity": 4.5006562749,
"Potability": 0.0
}
{
"ph": 8.0991241893,
"Hardness": 224.2362593936,
"Solids": 19909.5417322924,
"Chloramines": 9.2758836027,
"Sulfate": NaN,
"Conductivity": 418.6062130645,
"Organic_carbon": 16.8686369296,
"Trihalomethanes": 66.4200925118,
"Turbidity": 3.0559337497,
"Potability": 0.0
}
{
"ph": 8.3167658842,
"Hardness": 214.3733940856,
"Solids": 22018.4174407753,
"Chloramines": 8.0593323774,
"Sulfate": 356.8861356431,
"Conductivity": 363.2665161642,
"Organic_carbon": 18.4365244955,
"Trihalomethanes": 100.3416743651,
"Turbidity": 4.6287705368,
"Potability": 0.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
df.sample(6)
df.info() # No object data type
# Missing values in 'ph', 'Sulfate', 'Trihalomethanes '
df.drop_duplicates(keep="first", inplace=True)
df.describe().T
plt.figure(figsize=(16, 14))
for i, col in enumerate(df.columns):
plt.subplot(4, 3, i + 1)
sns.kdeplot(data=df[col])
plt.tight_layout()
# Data follows normal distribution
plt.figure(figsize=(16, 14))
for i, col in enumerate(df.columns):
plt.subplot(4, 3, i + 1)
sns.boxplot(data=df[col])
plt.tight_layout()
# every column except target has outliers
# Since there are outliers in data, we impute by median
df["ph"].fillna(df["ph"].median(), inplace=True)
df["Sulfate"].fillna(df["Sulfate"].median(), inplace=True)
df["Trihalomethanes"].fillna(df["Trihalomethanes"].median(), inplace=True)
df["Potability"].value_counts() # Balanced Target Class
plt.pie(
df["Potability"].value_counts(),
labels=["Contaminated", "Potable"],
colors=["Pink", "Blue"],
autopct="%1.2f%%",
pctdistance=1.4,
startangle=75,
labeldistance=1.15,
radius=1,
counterclock=False,
center=(0, 0),
normalize=True,
data=df,
)
plt.show()
fig = plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True, cmap="viridis")
# No multicollinearity
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.33, random_state=76
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/434/129434003.ipynb
|
water-potability
|
adityakadiwal
|
[{"Id": 129434003, "ScriptId": 37891343, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9849323, "CreationDate": "05/13/2023 19:05:41", "VersionNumber": 1.0, "Title": "notebook897f87c4a6", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 86.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185480277, "KernelVersionId": 129434003, "SourceDatasetVersionId": 2157486}]
|
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
|
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import sklearn
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
import warnings
warnings.filterwarnings("ignore")
df = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
df.sample(6)
df.info() # No object data type
# Missing values in 'ph', 'Sulfate', 'Trihalomethanes '
df.drop_duplicates(keep="first", inplace=True)
df.describe().T
plt.figure(figsize=(16, 14))
for i, col in enumerate(df.columns):
plt.subplot(4, 3, i + 1)
sns.kdeplot(data=df[col])
plt.tight_layout()
# Data follows normal distribution
plt.figure(figsize=(16, 14))
for i, col in enumerate(df.columns):
plt.subplot(4, 3, i + 1)
sns.boxplot(data=df[col])
plt.tight_layout()
# every column except target has outliers
# Since there are outliers in data, we impute by median
df["ph"].fillna(df["ph"].median(), inplace=True)
df["Sulfate"].fillna(df["Sulfate"].median(), inplace=True)
df["Trihalomethanes"].fillna(df["Trihalomethanes"].median(), inplace=True)
df["Potability"].value_counts() # Balanced Target Class
plt.pie(
df["Potability"].value_counts(),
labels=["Contaminated", "Potable"],
colors=["Pink", "Blue"],
autopct="%1.2f%%",
pctdistance=1.4,
startangle=75,
labeldistance=1.15,
radius=1,
counterclock=False,
center=(0, 0),
normalize=True,
data=df,
)
plt.show()
fig = plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True, cmap="viridis")
# No multicollinearity
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.33, random_state=76
)
|
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>water-potability/water_potability.csv:
<column_names>
['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability']
<column_types>
{'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'}
<dataframe_Summary>
{'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<some_examples>
{'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 739 | 0 | 3,016 | 739 |
129434957
|
# # Using Prophet for predicting Demand for Shelter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from prophet import Prophet
# # Basic EDA
dhs_df = pd.read_csv("/kaggle/input/dhs-weekly-report/DHS_weekly.csv")
print(dhs_df.shape)
dhs_df.head()
dhs_df.describe()
dhs_df.info()
dhs_df["Total Individuals in Shelter"].plot(figsize=(10, 5))
plt.ylabel("Total Number of Individuals in Shelter")
plt.xlabel("Index sorted by increasing, consecutive weeks")
print(dhs_df["Date"][0])
dhs_df["Date"][-1:]
# # Data Preparation
# #### Let's transform the Date into an appropriate format
dhs_df.Date = pd.to_datetime(dhs_df.Date, format="%m/%d/%Y")
dhs_df.Date.head()
# Date to ds; time-series to y. Needed to prepare for being used by Prophet
dhs_df = dhs_df.rename(columns={"Date": "ds", "Total Individuals in Shelter": "y"})
dhs_df.head(1)
# # Handling Holidays
# Easter
easter_dates = dhs_df[dhs_df.Easter == 1].ds
easter = pd.DataFrame(
{"holiday": "easter", "ds": easter_dates, "lower_window": -5, "upper_window": 2}
)
# Thanksgiving
thxg_dates = dhs_df[dhs_df.Thanksgiving == 1].ds
thxg = pd.DataFrame(
{"holiday": "thanksgiving", "ds": thxg_dates, "lower_window": -3, "upper_window": 2}
)
holidays = pd.concat([easter, thxg])
holidays
# removing holidays from dhc_df
dhs_df.drop(columns=["Easter", "Thanksgiving"], inplace=True)
dhs_df.head(0)
# Christmas will be used in regressor
# # Prophet Model
# Training and Test Split
test_weeks = 4
training = dhs_df.iloc[:-test_weeks, :]
test = dhs_df.iloc[-test_weeks:, :]
print(training.tail(1))
print(test.head(1))
# Prophet Model
m = Prophet(
growth="linear",
yearly_seasonality=True,
weekly_seasonality=False,
holidays=holidays,
seasonality_mode="multiplicative",
seasonality_prior_scale=10,
holidays_prior_scale=10,
changepoint_prior_scale=0.05,
)
m.add_regressor("Christmas")
m.add_regressor("Temperature")
m.fit(training)
# Regressor Coefficients
from prophet.utilities import regressor_coefficients
regressor_coefficients(m)
# # Forecasting
# future dataframe
future = m.make_future_dataframe(periods=test_weeks, freq="W")
future = pd.concat([future, dhs_df.iloc[:, 2:]], axis=1)
future.head()
forecast = m.predict(future)
forecast.head()
predictions = forecast.yhat[-test_weeks:]
# Accuracy assessment
from sklearn.metrics import mean_squared_error, mean_absolute_error
print("MAE:", mean_absolute_error(test.y, predictions))
print("RMSE:", np.sqrt(mean_squared_error(test.y, predictions)))
# # Visualization
m.plot(forecast)
# Structural time-series decomposition
m.plot_components(forecast)
# # Cross-Validation
# CV
from prophet.diagnostics import cross_validation
dhs_cv = cross_validation(
m, horizon="28 days", initial="1400 days", parallel="processes"
)
dhs_cv.head()
# Errors
print("RMSE:", np.sqrt(mean_squared_error(dhs_cv.y, dhs_cv.yhat)))
print("MAE:", mean_absolute_error(dhs_cv.y, dhs_cv.yhat))
# CV Visualization
from prophet.plot import plot_cross_validation_metric
plot_cross_validation_metric(dhs_cv, metric="rmse")
# # Parameter tuning
# Parameter Grid
from sklearn.model_selection import ParameterGrid
param_grid = {
"seasonality_mode": ["additive", "multiplicative"],
"seasonality_prior_scale": [5, 10, 20],
"holidays_prior_scale": [5, 10, 20],
"changepoint_prior_scale": [0.01, 0.05, 0.07, 0.1],
}
grid = ParameterGrid(param_grid)
len(list(grid))
# Empty list to store results
rmse = []
# Start the loop
for params in grid:
# Build the model
m = Prophet(
growth="linear",
yearly_seasonality=True,
weekly_seasonality=True,
daily_seasonality=False,
holidays=holidays,
seasonality_mode=params["seasonality_mode"],
seasonality_prior_scale=params["seasonality_prior_scale"],
holidays_prior_scale=params["holidays_prior_scale"],
changepoint_prior_scale=params["changepoint_prior_scale"],
)
m.add_regressor("Temperature", mode="multiplicative")
m.fit(training)
# Cross-Validation
dhs_cv = cross_validation(
m, horizon="28 days", initial="1400 days", parallel="processes"
)
# Measure and store results
error = np.sqrt(mean_squared_error(dhs_cv["y"], dhs_cv["yhat"]))
rmse.append(error)
# Get the best parameters
tuning_results = pd.DataFrame(grid)
tuning_results["rsme"] = rmse
tuning_results
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/434/129434957.ipynb
| null | null |
[{"Id": 129434957, "ScriptId": 38485759, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8880242, "CreationDate": "05/13/2023 19:18:10", "VersionNumber": 1.0, "Title": "Prophet Time Series Forecasting", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 166.0, "LinesInsertedFromPrevious": 166.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# # Using Prophet for predicting Demand for Shelter
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from prophet import Prophet
# # Basic EDA
dhs_df = pd.read_csv("/kaggle/input/dhs-weekly-report/DHS_weekly.csv")
print(dhs_df.shape)
dhs_df.head()
dhs_df.describe()
dhs_df.info()
dhs_df["Total Individuals in Shelter"].plot(figsize=(10, 5))
plt.ylabel("Total Number of Individuals in Shelter")
plt.xlabel("Index sorted by increasing, consecutive weeks")
print(dhs_df["Date"][0])
dhs_df["Date"][-1:]
# # Data Preparation
# #### Let's transform the Date into an appropriate format
dhs_df.Date = pd.to_datetime(dhs_df.Date, format="%m/%d/%Y")
dhs_df.Date.head()
# Date to ds; time-series to y. Needed to prepare for being used by Prophet
dhs_df = dhs_df.rename(columns={"Date": "ds", "Total Individuals in Shelter": "y"})
dhs_df.head(1)
# # Handling Holidays
# Easter
easter_dates = dhs_df[dhs_df.Easter == 1].ds
easter = pd.DataFrame(
{"holiday": "easter", "ds": easter_dates, "lower_window": -5, "upper_window": 2}
)
# Thanksgiving
thxg_dates = dhs_df[dhs_df.Thanksgiving == 1].ds
thxg = pd.DataFrame(
{"holiday": "thanksgiving", "ds": thxg_dates, "lower_window": -3, "upper_window": 2}
)
holidays = pd.concat([easter, thxg])
holidays
# removing holidays from dhc_df
dhs_df.drop(columns=["Easter", "Thanksgiving"], inplace=True)
dhs_df.head(0)
# Christmas will be used in regressor
# # Prophet Model
# Training and Test Split
test_weeks = 4
training = dhs_df.iloc[:-test_weeks, :]
test = dhs_df.iloc[-test_weeks:, :]
print(training.tail(1))
print(test.head(1))
# Prophet Model
m = Prophet(
growth="linear",
yearly_seasonality=True,
weekly_seasonality=False,
holidays=holidays,
seasonality_mode="multiplicative",
seasonality_prior_scale=10,
holidays_prior_scale=10,
changepoint_prior_scale=0.05,
)
m.add_regressor("Christmas")
m.add_regressor("Temperature")
m.fit(training)
# Regressor Coefficients
from prophet.utilities import regressor_coefficients
regressor_coefficients(m)
# # Forecasting
# future dataframe
future = m.make_future_dataframe(periods=test_weeks, freq="W")
future = pd.concat([future, dhs_df.iloc[:, 2:]], axis=1)
future.head()
forecast = m.predict(future)
forecast.head()
predictions = forecast.yhat[-test_weeks:]
# Accuracy assessment
from sklearn.metrics import mean_squared_error, mean_absolute_error
print("MAE:", mean_absolute_error(test.y, predictions))
print("RMSE:", np.sqrt(mean_squared_error(test.y, predictions)))
# # Visualization
m.plot(forecast)
# Structural time-series decomposition
m.plot_components(forecast)
# # Cross-Validation
# CV
from prophet.diagnostics import cross_validation
dhs_cv = cross_validation(
m, horizon="28 days", initial="1400 days", parallel="processes"
)
dhs_cv.head()
# Errors
print("RMSE:", np.sqrt(mean_squared_error(dhs_cv.y, dhs_cv.yhat)))
print("MAE:", mean_absolute_error(dhs_cv.y, dhs_cv.yhat))
# CV Visualization
from prophet.plot import plot_cross_validation_metric
plot_cross_validation_metric(dhs_cv, metric="rmse")
# # Parameter tuning
# Parameter Grid
from sklearn.model_selection import ParameterGrid
param_grid = {
"seasonality_mode": ["additive", "multiplicative"],
"seasonality_prior_scale": [5, 10, 20],
"holidays_prior_scale": [5, 10, 20],
"changepoint_prior_scale": [0.01, 0.05, 0.07, 0.1],
}
grid = ParameterGrid(param_grid)
len(list(grid))
# Empty list to store results
rmse = []
# Start the loop
for params in grid:
# Build the model
m = Prophet(
growth="linear",
yearly_seasonality=True,
weekly_seasonality=True,
daily_seasonality=False,
holidays=holidays,
seasonality_mode=params["seasonality_mode"],
seasonality_prior_scale=params["seasonality_prior_scale"],
holidays_prior_scale=params["holidays_prior_scale"],
changepoint_prior_scale=params["changepoint_prior_scale"],
)
m.add_regressor("Temperature", mode="multiplicative")
m.fit(training)
# Cross-Validation
dhs_cv = cross_validation(
m, horizon="28 days", initial="1400 days", parallel="processes"
)
# Measure and store results
error = np.sqrt(mean_squared_error(dhs_cv["y"], dhs_cv["yhat"]))
rmse.append(error)
# Get the best parameters
tuning_results = pd.DataFrame(grid)
tuning_results["rsme"] = rmse
tuning_results
| false | 0 | 1,493 | 2 | 1,493 | 1,493 |
||
129434964
|
<jupyter_start><jupyter_text>Sentiment140 dataset with 1.6 million tweets
### Context
This is the sentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .
### Content
It contains the following 6 fields:
1. **target**: the polarity of the tweet (*0* = negative, *2* = neutral, *4* = positive)
2. **ids**: The id of the tweet ( *2087*)
3. **date**: the date of the tweet (*Sat May 16 23:58:44 UTC 2009*)
4. **flag**: The query (*lyx*). If there is no query, then this value is NO_QUERY.
5. **user**: the user that tweeted (*robotickilldozr*)
6. **text**: the text of the tweet (*Lyx is cool*)
Kaggle dataset identifier: sentiment140
<jupyter_script>import pandas as pd
import numpy as np
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import EarlyStopping
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv(
"/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv",
header=None,
encoding="ISO-8859-1",
names=["target", "id", "date", "flag", "user", "text"],
)
data
nltk.download("wordnet")
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
lemmatizer = WordNetLemmatizer()
def preprocess_tweet(text):
text = re.sub(r"http\S+", "", text) # Remove URLs
text = re.sub(r"@[A-Za-z0-9]+", "", text) # Remove mentions
text = re.sub(r"#", "", text) # Remove hashtags
text = text.translate(
str.maketrans("", "", string.punctuation)
) # Remove punctuation
text = " ".join(
[
lemmatizer.lemmatize(word.lower())
for word in text.split()
if word.lower() not in stop_words
]
) # Tokenize, lemmatize, and remove stop words
return text
data["text"] = data["text"].apply(preprocess_tweet)
data["text"]
train_size = int(len(data) * 0.8)
train_data = data[:train_size]
test_data = data[train_size:]
vectorizer = TfidfVectorizer(max_features=5000)
train_features = vectorizer.fit_transform(train_data["text"])
test_features = vectorizer.transform(test_data["text"])
# Train a classifier
clf = MultinomialNB()
clf.fit(train_features, train_data["target"])
# Text generation
def generate_tweet(seed_sentence, n=10):
current_sentence = seed_sentence
perplexity = 0
for i in range(n):
vectorized_sentence = vectorizer.transform([current_sentence])
prediction = clf.predict(vectorized_sentence)[0]
if prediction == 0:
next_word = np.random.choice(train_data[train_data["target"] == 0]["text"])
else:
next_word = np.random.choice(train_data[train_data["target"] == 4]["text"])
current_sentence += " " + next_word
# Calculate perplexity
prob = clf.predict_proba(vectorized_sentence)
perplexity += math.log(prob[0][prediction])
# Calculate average perplexity
avg_perplexity = math.exp(-perplexity / n)
return current_sentence, avg_perplexity
import math
# Example usage
seed_sentence = "I am feeling"
generated_tweet, perplexity = generate_tweet(seed_sentence)
print("Generated tweet: ", generated_tweet)
print("Perplexity score: ", perplexity)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/434/129434964.ipynb
|
sentiment140
|
kazanova
|
[{"Id": 129434964, "ScriptId": 38467229, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6116128, "CreationDate": "05/13/2023 19:18:12", "VersionNumber": 1.0, "Title": "Task Work", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 98.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 185481915, "KernelVersionId": 129434964, "SourceDatasetVersionId": 4140}]
|
[{"Id": 4140, "DatasetId": 2477, "DatasourceVersionId": 4140, "CreatorUserId": 111640, "LicenseName": "Other (specified in description)", "CreationDate": "09/13/2017 22:43:19", "VersionNumber": 2.0, "Title": "Sentiment140 dataset with 1.6 million tweets", "Slug": "sentiment140", "Subtitle": "Sentiment analysis with tweets", "Description": "### Context\n\nThis is the sentiment140 dataset. It contains 1,600,000 tweets extracted using the twitter api . The tweets have been annotated (0 = negative, 4 = positive) and they can be used to detect sentiment .\n\n### Content\n\nIt contains the following 6 fields:\n\n1. **target**: the polarity of the tweet (*0* = negative, *2* = neutral, *4* = positive)\n\n2. **ids**: The id of the tweet ( *2087*)\n\n3. **date**: the date of the tweet (*Sat May 16 23:58:44 UTC 2009*)\n\n4. **flag**: The query (*lyx*). If there is no query, then this value is NO_QUERY.\n\n5. **user**: the user that tweeted (*robotickilldozr*)\n\n6. **text**: the text of the tweet (*Lyx is cool*)\n\n\n### Acknowledgements\n\nThe official link regarding the dataset with resources about how it was generated is [here][1]\nThe official paper detailing the approach is [here][2]\n\nCitation: Go, A., Bhayani, R. and Huang, L., 2009. Twitter sentiment classification using distant supervision. *CS224N Project Report, Stanford, 1(2009), p.12*.\n\n\n### Inspiration\n\nTo detect severity from tweets. You [may have a look at this][3].\n\n[1]: http://%20http://help.sentiment140.com/for-students/\n[2]: http://bhttp://cs.stanford.edu/people/alecmgo/papers/TwitterDistantSupervision09.pdf\n[3]: https://www.linkedin.com/pulse/social-machine-learning-h2o-twitter-python-marios-michailidis", "VersionNotes": "updated description", "TotalCompressedBytes": 238803811.0, "TotalUncompressedBytes": 238803811.0}]
|
[{"Id": 2477, "CreatorUserId": 111640, "OwnerUserId": 111640.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4140.0, "CurrentDatasourceVersionId": 4140.0, "ForumId": 6554, "Type": 2, "CreationDate": "09/13/2017 22:07:02", "LastActivityDate": "02/06/2018", "TotalViews": 758824, "TotalDownloads": 116621, "TotalVotes": 1628, "TotalKernels": 457}]
|
[{"Id": 111640, "UserName": "kazanova", "DisplayName": "\u039c\u03b1\u03c1\u03b9\u03bf\u03c2 \u039c\u03b9\u03c7\u03b1\u03b7\u03bb\u03b9\u03b4\u03b7\u03c2 KazAnova", "RegisterDate": "06/24/2013", "PerformanceTier": 4}]
|
import pandas as pd
import numpy as np
import re
import string
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from keras.models import Sequential
from keras.layers import Dense, LSTM, Dropout
from keras.callbacks import EarlyStopping
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv(
"/kaggle/input/sentiment140/training.1600000.processed.noemoticon.csv",
header=None,
encoding="ISO-8859-1",
names=["target", "id", "date", "flag", "user", "text"],
)
data
nltk.download("wordnet")
nltk.download("stopwords")
stop_words = set(stopwords.words("english"))
lemmatizer = WordNetLemmatizer()
def preprocess_tweet(text):
text = re.sub(r"http\S+", "", text) # Remove URLs
text = re.sub(r"@[A-Za-z0-9]+", "", text) # Remove mentions
text = re.sub(r"#", "", text) # Remove hashtags
text = text.translate(
str.maketrans("", "", string.punctuation)
) # Remove punctuation
text = " ".join(
[
lemmatizer.lemmatize(word.lower())
for word in text.split()
if word.lower() not in stop_words
]
) # Tokenize, lemmatize, and remove stop words
return text
data["text"] = data["text"].apply(preprocess_tweet)
data["text"]
train_size = int(len(data) * 0.8)
train_data = data[:train_size]
test_data = data[train_size:]
vectorizer = TfidfVectorizer(max_features=5000)
train_features = vectorizer.fit_transform(train_data["text"])
test_features = vectorizer.transform(test_data["text"])
# Train a classifier
clf = MultinomialNB()
clf.fit(train_features, train_data["target"])
# Text generation
def generate_tweet(seed_sentence, n=10):
current_sentence = seed_sentence
perplexity = 0
for i in range(n):
vectorized_sentence = vectorizer.transform([current_sentence])
prediction = clf.predict(vectorized_sentence)[0]
if prediction == 0:
next_word = np.random.choice(train_data[train_data["target"] == 0]["text"])
else:
next_word = np.random.choice(train_data[train_data["target"] == 4]["text"])
current_sentence += " " + next_word
# Calculate perplexity
prob = clf.predict_proba(vectorized_sentence)
perplexity += math.log(prob[0][prediction])
# Calculate average perplexity
avg_perplexity = math.exp(-perplexity / n)
return current_sentence, avg_perplexity
import math
# Example usage
seed_sentence = "I am feeling"
generated_tweet, perplexity = generate_tweet(seed_sentence)
print("Generated tweet: ", generated_tweet)
print("Perplexity score: ", perplexity)
| false | 1 | 948 | 5 | 1,194 | 948 |
||
129434808
|
<jupyter_start><jupyter_text>Top Ranked 2000 Universities of the World
This is the list of top 2000 universities of the World. The dataset contains complete data of universities by ranking of education, research, employment, faculty and score. Harvard University of USA ranked number 1.
Kaggle dataset identifier: top-2000-universities-of-the-world
<jupyter_script>import numpy as np
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
df = pd.read_csv(
"/kaggle/input/top-2000-universities-of-the-world/Top 2000 Universities of the World - Sheet1.csv"
)
df
df.info()
df.describe()
df["Research Performance Rank"].replace("-", "0", inplace=True)
df["National Rank"].replace("-", "0", inplace=True)
df["Quality of Education Rank"].replace("-", "0", inplace=True)
df["Alumni Employment Rank"].replace("-", "0", inplace=True)
df["Quality of Faculty Rank"].replace("-", "0", inplace=True)
df
df["Research Performance Rank"] = df["Research Performance Rank"].astype("int")
df["National Rank"] = df["National Rank"].astype("int")
df["Quality of Education Rank"] = df["Quality of Education Rank"].astype("int")
df["Alumni Employment Rank"] = df["Alumni Employment Rank"].astype("int")
df["Quality of Faculty Rank"] = df["Quality of Faculty Rank"].astype("int")
df.info()
df["Institution"].duplicated()
country_count = df["Country"].value_counts()
country_count_1 = country_count == 1
uni_contry = df["Country"].unique()
type(uni_contry)
print(uni_contry)
df_f = df.sort_values("National Rank")
df_f_10 = df_f.loc[df["National Rank"] == 1]
df_f_10
plt.figure(figsize=(35, 30))
plt.plot(
df_f_10["Alumni Employment Rank"], df_f_10["Country"], label="NR"
) # Note here we are using seaborn as sns which is an in-nuilt library in python that uses mtplotlib underneath to plot graphs.
# plt.xticks(rotation='vertical')
# plt.title('Alumni Employment Rank & National Rank',fontsize=40) #Note labelling the data
# plt.ylabel('National Rank',fontsize=25) #Note labelling the y-label
# plt.xlabel('Alumni Employment Rank',fontsize=25) #Note labelling the x-label
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/434/129434808.ipynb
|
top-2000-universities-of-the-world
|
batrosjamali
|
[{"Id": 129434808, "ScriptId": 33926213, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6678378, "CreationDate": "05/13/2023 19:16:07", "VersionNumber": 10.0, "Title": "analysis_p_university", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 52.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 42.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185481658, "KernelVersionId": 129434808, "SourceDatasetVersionId": 4258063}]
|
[{"Id": 4258063, "DatasetId": 2466811, "DatasourceVersionId": 4315540, "CreatorUserId": 10689222, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "09/28/2022 02:49:15", "VersionNumber": 3.0, "Title": "Top Ranked 2000 Universities of the World", "Slug": "top-2000-universities-of-the-world", "Subtitle": "The dataset contains data of Worlds top 2000 Universities ranking 2021", "Description": "This is the list of top 2000 universities of the World. The dataset contains complete data of universities by ranking of education, research, employment, faculty and score. Harvard University of USA ranked number 1.", "VersionNotes": "Data Update 2022/09/28", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2466811, "CreatorUserId": 10689222, "OwnerUserId": 10689222.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4258063.0, "CurrentDatasourceVersionId": 4315540.0, "ForumId": 2494616, "Type": 2, "CreationDate": "09/09/2022 09:51:05", "LastActivityDate": "09/09/2022", "TotalViews": 8241, "TotalDownloads": 1845, "TotalVotes": 40, "TotalKernels": 2}]
|
[{"Id": 10689222, "UserName": "batrosjamali", "DisplayName": "Batros Jamali", "RegisterDate": "05/30/2022", "PerformanceTier": 2}]
|
import numpy as np
import pandas as pd
import seaborn as sb
import matplotlib.pyplot as plt
df = pd.read_csv(
"/kaggle/input/top-2000-universities-of-the-world/Top 2000 Universities of the World - Sheet1.csv"
)
df
df.info()
df.describe()
df["Research Performance Rank"].replace("-", "0", inplace=True)
df["National Rank"].replace("-", "0", inplace=True)
df["Quality of Education Rank"].replace("-", "0", inplace=True)
df["Alumni Employment Rank"].replace("-", "0", inplace=True)
df["Quality of Faculty Rank"].replace("-", "0", inplace=True)
df
df["Research Performance Rank"] = df["Research Performance Rank"].astype("int")
df["National Rank"] = df["National Rank"].astype("int")
df["Quality of Education Rank"] = df["Quality of Education Rank"].astype("int")
df["Alumni Employment Rank"] = df["Alumni Employment Rank"].astype("int")
df["Quality of Faculty Rank"] = df["Quality of Faculty Rank"].astype("int")
df.info()
df["Institution"].duplicated()
country_count = df["Country"].value_counts()
country_count_1 = country_count == 1
uni_contry = df["Country"].unique()
type(uni_contry)
print(uni_contry)
df_f = df.sort_values("National Rank")
df_f_10 = df_f.loc[df["National Rank"] == 1]
df_f_10
plt.figure(figsize=(35, 30))
plt.plot(
df_f_10["Alumni Employment Rank"], df_f_10["Country"], label="NR"
) # Note here we are using seaborn as sns which is an in-nuilt library in python that uses mtplotlib underneath to plot graphs.
# plt.xticks(rotation='vertical')
# plt.title('Alumni Employment Rank & National Rank',fontsize=40) #Note labelling the data
# plt.ylabel('National Rank',fontsize=25) #Note labelling the y-label
# plt.xlabel('Alumni Employment Rank',fontsize=25) #Note labelling the x-label
plt.show()
| false | 1 | 566 | 0 | 663 | 566 |
||
129109319
|
<jupyter_start><jupyter_text>The shortest path data
This is a small project that uses optimization algorithms to find the five shortest paths. However, every spot has a limit; every spot ball couldn't exceed 100.
Kaggle dataset identifier: ga-optimization
<jupyter_script>import pandas as pd
import gurobipy as gp
from gurobipy import *
import math as m
import random as rand
data = pd.read_excel(r"/kaggle/input/ga-optimization/AI term project.xlsx")
data
def distance(x1, x2, y1, y2):
dis = m.pow(m.pow((x1 - x2), 2) + m.pow((y1 - y2), 2), 0.5)
return round(dis, 4)
distance(3, 4, 1, 2)
all_nodes_connect = []
for node_x in range(0, len(data["X"])):
for node_y in range(0, len(data["Y"])):
all_nodes_connect.append((node_x, node_y))
for_trans_matrix = list()
for nodes in all_nodes_connect:
# print(nodes)
# print(data['X'].iloc[nodes[0]],data['X'].iloc[nodes[1]],data['Y'].iloc[nodes[0]],data['Y'].iloc[nodes[1]])
dis = distance(
data["X"].iloc[nodes[0]],
data["X"].iloc[nodes[1]],
data["Y"].iloc[nodes[0]],
data["Y"].iloc[nodes[1]],
)
# print(dis)
for_trans_matrix.append([nodes, dis])
# ### Set the parameters
# * cost matrix
# * N : the numbers of nodes
# * K : the type of vehicles
# * C : Capacity of every vehicles
# * M : the number of all vehicles
cost_matrix = tupledict(for_trans_matrix)
# N =list(range(0,len(data['寶可夢座標點'])))
N = list(range(0, len(data["寶可夢座標點"])))
K = [1]
C = {1: 100}
M = {1: 5}
model = Model(name="VRP")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/109/129109319.ipynb
|
ga-optimization
|
yinn94
|
[{"Id": 129109319, "ScriptId": 38338780, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8215644, "CreationDate": "05/11/2023 04:43:04", "VersionNumber": 2.0, "Title": "Gurobipy way find the shortest", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 48.0, "LinesInsertedFromPrevious": 38.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 10.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184874101, "KernelVersionId": 129109319, "SourceDatasetVersionId": 5292295}]
|
[{"Id": 5292295, "DatasetId": 3077588, "DatasourceVersionId": 5365381, "CreatorUserId": 8215644, "LicenseName": "Unknown", "CreationDate": "04/02/2023 06:54:31", "VersionNumber": 1.0, "Title": "The shortest path data", "Slug": "ga-optimization", "Subtitle": "Using optimization to find the shortest path.", "Description": "This is a small project that uses optimization algorithms to find the five shortest paths. However, every spot has a limit; every spot ball couldn't exceed 100.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3077588, "CreatorUserId": 8215644, "OwnerUserId": 8215644.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5292295.0, "CurrentDatasourceVersionId": 5365381.0, "ForumId": 3140511, "Type": 2, "CreationDate": "04/02/2023 06:54:31", "LastActivityDate": "04/02/2023", "TotalViews": 58, "TotalDownloads": 8, "TotalVotes": 0, "TotalKernels": 3}]
|
[{"Id": 8215644, "UserName": "yinn94", "DisplayName": "yinn94", "RegisterDate": "08/26/2021", "PerformanceTier": 1}]
|
import pandas as pd
import gurobipy as gp
from gurobipy import *
import math as m
import random as rand
data = pd.read_excel(r"/kaggle/input/ga-optimization/AI term project.xlsx")
data
def distance(x1, x2, y1, y2):
dis = m.pow(m.pow((x1 - x2), 2) + m.pow((y1 - y2), 2), 0.5)
return round(dis, 4)
distance(3, 4, 1, 2)
all_nodes_connect = []
for node_x in range(0, len(data["X"])):
for node_y in range(0, len(data["Y"])):
all_nodes_connect.append((node_x, node_y))
for_trans_matrix = list()
for nodes in all_nodes_connect:
# print(nodes)
# print(data['X'].iloc[nodes[0]],data['X'].iloc[nodes[1]],data['Y'].iloc[nodes[0]],data['Y'].iloc[nodes[1]])
dis = distance(
data["X"].iloc[nodes[0]],
data["X"].iloc[nodes[1]],
data["Y"].iloc[nodes[0]],
data["Y"].iloc[nodes[1]],
)
# print(dis)
for_trans_matrix.append([nodes, dis])
# ### Set the parameters
# * cost matrix
# * N : the numbers of nodes
# * K : the type of vehicles
# * C : Capacity of every vehicles
# * M : the number of all vehicles
cost_matrix = tupledict(for_trans_matrix)
# N =list(range(0,len(data['寶可夢座標點'])))
N = list(range(0, len(data["寶可夢座標點"])))
K = [1]
C = {1: 100}
M = {1: 5}
model = Model(name="VRP")
| false | 0 | 489 | 0 | 547 | 489 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.