script
stringlengths 113
767k
|
---|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import xgboost as xgb
from sklearn.metrics import mean_squared_error
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/bitcoin-and-ethereum-prices-from-start-to-2023/Bitcoin prices.csv"
)
# # Exploring Data
df.head()
df.tail()
df.info()
df.describe()
df = df.set_index("Date")
df.index = pd.to_datetime(df.index)
# # EDA
df["Close"].plot(style=".", figsize=(15, 5), title="BTC Price Over The Time")
plt.show()
df["Close"].plot(kind="hist", bins=500)
# **The distribution of the BTC price seems not like Normal Distribution**
df["Close"] = np.log1p(df["Close"])
df["Close"].plot(kind="hist", bins=500)
# # Creating Features
target_map = df["Close"].to_dict()
df["BTC+1"] = (df.index + pd.Timedelta("1 days")).map(target_map)
df["BTC+2"] = (df.index + pd.Timedelta("2 days")).map(target_map)
df["BTC+3"] = (df.index + pd.Timedelta("3 days")).map(target_map)
df
df["moving_avg"] = df["Close"].rolling(window=20).mean()
df["ema_12"] = df["Close"].ewm(span=12, adjust=False).mean()
df["ema_26"] = df["Close"].ewm(span=26, adjust=False).mean()
df["macd"] = df["ema_12"] - df["ema_26"]
df["macd_signal"] = df["macd"].ewm(span=9, adjust=False).mean()
delta = df["Close"].diff()
gain = delta.where(delta > 0, 0)
loss = -delta.where(delta < 0, 0)
avg_gain = gain.rolling(window=14).mean()
avg_loss = loss.rolling(window=14).mean()
rs = avg_gain / avg_loss
df["rsi"] = 100 - (100 / (1 + rs))
aroon_up = 100 * ((25 - (df["Close"].rolling(25).apply(lambda x: x.argmax() + 1))) / 25)
aroon_down = 100 * (
(25 - (df["Close"].rolling(25).apply(lambda x: x.argmin() + 1))) / 25
)
df["aroon_oscillator"] = aroon_up - aroon_down
df.tail()
df.drop(["2023-04-06", "2023-04-07", "2023-04-08"], inplace=True)
df.drop(["Open", "High", "Low", "Adj Close", "Volume"], axis=1, inplace=True)
# # Train/Test Split
size = int(len(df) * 0.8)
df_train = df.iloc[:size]
df_test = df.iloc[size:]
fig, ax = plt.subplots(figsize=(15, 5))
df_train["Close"].plot(ax=ax, label="Training Set", title="Data Train/Test Split")
df_test["Close"].plot(ax=ax, label="Test Set")
ax.axvline("2021-07-31", color="black", ls="--")
ax.legend(["Training Set", "Test Set"])
plt.show()
y_train = df_train["BTC+1"]
y_test = df_test["BTC+1"]
x_train = df_train.drop(["Close", "BTC+1", "BTC+2", "BTC+3"], axis=1)
x_test = df_test.drop(["Close", "BTC+1", "BTC+2", "BTC+3"], axis=1)
# # Model Training
reg = xgb.XGBRegressor(
base_score=0.5,
booster="gbtree",
n_estimators=1000,
early_stopping_rounds=50,
objective="reg:linear",
max_depth=3,
learning_rate=0.01,
)
reg.fit(x_train, y_train, eval_set=[(x_train, y_train), (x_test, y_test)], verbose=100)
# # Results
y_pred = reg.predict(x_test)
y_test = np.expm1(y_test)
y_pred = np.expm1(y_pred)
score = np.sqrt(mean_squared_error(y_test, y_pred))
score
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
pred_vs_actual = pd.DataFrame(data=[y_pred, y_test]).T
pred_vs_actual.columns = ["Pred", "Actual"]
pred_vs_actual["Date"] = x_test.index + pd.Timedelta("1 days")
pred_vs_actual = pred_vs_actual.set_index("Date")
pred_vs_actual
plt.figure(figsize=(10, 7))
sns.lineplot(data=pred_vs_actual)
plt.show()
|
# This project aims to use Deep Learning for Iris classification.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# Importing all the necessary packages.
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from keras.models import Sequential
from keras.layers import Dense
# Importing the Iris dataset.
df_iris = pd.read_csv("../input/iris/Iris.csv")
df_iris.head()
# Pre-processing the data and splitting it into training and test sets.
y = df_iris["Species"]
x = df_iris.values
x = x[:, 1:-1]
min_max_scaler = preprocessing.MinMaxScaler()
x = min_max_scaler.fit_transform(x)
print(x[:5, :])
x_train, x_test, y_train, y_test = train_test_split(x, y.values, test_size=0.2)
# Converting target labels into numerical categories using Ordinal Encoder
y_train = y_train.reshape(len(y_train), 1)
y_test = y_test.reshape(len(y_test), 1)
le = preprocessing.OrdinalEncoder()
le.fit(y_train)
y_train_num = le.transform(y_train)
y_test_num = le.transform(y_test)
y_train_num[:10]
# Creating a 3-layer Neural Network using relu activation, and softmax for the output layer.
# Since the dataset is very small, the epochs have been set to 100.
test_model = Sequential()
test_model.add(Dense(8, input_shape=(4,), activation="relu"))
test_model.add(Dense(12, activation="relu"))
test_model.add(Dense(10, activation="relu"))
test_model.add(Dense(3, activation="softmax"))
test_model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
test_model.fit(x_train, y_train_num, batch_size=20, epochs=100)
# Roughly 97% accuracy is achieved.
test_model.evaluate(x_test, y_test_num)
# Checking the prediction for a chosen value.
a = np.argmax(test_model.predict(x_test[2:3]))
print("Predicted value: %s, Actual VAlue: %s" % (a, y_test_num[2]))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
df = pd.read_csv("../input/covid-world-vaccination-progress/country_vaccinations.csv")
df.info()
df1 = df.drop(["iso_code", "date", "source_website"], axis=1)
df1
country = list(df1["country"])
vaccine = list(df1["vaccines"])
iso = list(df["iso_code"])
data = {"country": country, "vaccine": vaccine, "iso": iso}
df2 = pd.DataFrame(data)
df2
# **What vaccines are used and in which countries?**
result = df2.drop_duplicates()
result
result.loc[result.iso.isnull(), "country"]
df.iloc[3739]
result.fillna("GBR")
import plotly.express as px
fig = px.choropleth(
result,
locations="iso",
color="vaccine",
color_continuous_scale=px.colors.sequential.Plasma,
)
fig.show()
# **What country is vaccinated more people?**
people_fully = df["people_fully_vaccinated"]
data1 = {"iso": iso, "country": country, "fully_vaccinated": people_fully}
people = pd.DataFrame(data1)
new = people.dropna()
new.head(30)
df4 = pd.DataFrame(new.groupby("country")["fully_vaccinated"].sum())
df4.head()
# #### **Calculation in terms of Fully Vaccinated people**
df4.sort_values(by=["fully_vaccinated"], inplace=True, ascending=False)
df4
# ***The United States had majority of people fully vaccinated according to the given dataset***
# ### **What country is vaccinated a larger percent from its population?**
per = df1.dropna()
per.head()
per["population_percentage"] = per.groupby("country")[
"people_fully_vaccinated_per_hundred"
].transform("sum")
per.head()
import matplotlib.pyplot as plt
X = list(per["country"])
Y = list(per["population_percentage"])
fig = plt.figure(figsize=(15, 6))
plt.xlabel("countries")
plt.ylabel("population fully vaccinated")
plt.title("population percentage fully vaccinated")
plt.xticks(rotation=90)
plt.bar(X, Y)
plt.show()
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
from sklearn.metrics import classification_report, confusion_matrix
from sklearn import preprocessing
import warnings
warnings.filterwarnings("ignore")
fraud = pd.read_csv("/kaggle/input/fraud-check/Fraud_check.csv")
fraud
f_c1 = fraud.drop(["City.Population"], axis=1)
f_c1
f_c1.info()
# ##### We need to add target column for risky or not, according to the given problem statement
y1 = np.empty(600, dtype=object)
i = 0
for value in f_c1["Taxable.Income"]:
if value <= 30000:
y1[i] = "Risky"
else:
y1[i] = "Good"
i = i + 1
y1
t1 = pd.DataFrame(y1, columns=["Target"])
t1
f_c = pd.concat([f_c1, t1], axis=1)
f_c.head()
f_c.isna().sum()
f_c.info()
f_c.corr()
f_c.groupby(["Undergrad", "Marital.Status", "Urban"]).count()
# ##### Label Encoding
label_encoder = preprocessing.LabelEncoder()
f_c["Undergrad"] = label_encoder.fit_transform(f_c["Undergrad"])
f_c["Marital.Status"] = label_encoder.fit_transform(f_c["Marital.Status"])
f_c["Urban"] = label_encoder.fit_transform(f_c["Urban"])
f_c["Target"] = label_encoder.fit_transform(f_c["Target"])
f_c.head()
f_c.Target.value_counts()
colnames = list(f_c.columns)
colnames
# #### Visualization
sns.pairplot(f_c)
plt.show()
sns.distplot(f_c["Taxable.Income"])
plt.show()
sns.distplot(f_c["Work.Experience"])
plt.show()
plt.figure(figsize=(8, 6))
sns.heatmap(f_c.corr(), cmap="magma", annot=True, fmt=".2f")
plt.show()
sns.scatterplot(x="Taxable.Income", y="Work.Experience", data=f_c)
plt.show()
x = f_c.iloc[:, 0:5]
y = f_c[["Target"]]
x.head()
y.tail()
# ### Building Decision Tree Classifier using Entropy Criteria
# Splitting data into training and testing data set
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=32
)
model2 = DecisionTreeClassifier(criterion="entropy", max_depth=3)
model2.fit(x_train, y_train)
tree.plot_tree(model2)
# PLot the decision tree
fn = ["Undergrad", "Marital.Status", "Taxable.Income", "Work.Experience", "Urban"]
fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(3, 2), dpi=300)
tree.plot_tree(
model2,
feature_names=fn,
# class_names=cn,
filled=True,
)
preds2 = model2.predict(x_test)
pd.Series(preds2).value_counts()
preds2
y_test
y_test2 = y_test.to_numpy()
y_test2 = np.reshape(y_test2, 180)
y_test2
pd.crosstab(
y_test2, preds2
) # getting the 2 way table to understand the correct and wrong predictions
# ### Building Decision Tree Classifier (CART) using Gini Criteria
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.22, random_state=18
)
from sklearn.tree import DecisionTreeClassifier
model2_gini = DecisionTreeClassifier(criterion="gini", max_depth=3)
model2_gini.fit(x_train, y_train)
y_test
y_test3 = y_test.to_numpy()
y_test3 = np.reshape(y_test3, 132)
y_test3
# Prediction and computing the accuracy
pred = model2.predict(x_test)
np.mean(pred == y_test3)
# #### Decision Tree Regression Example
# Decision Tree Regression
from sklearn.tree import DecisionTreeRegressor
array = f_c.values
X = array[:, 0:5]
y = array[:, -1]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=21
)
model = DecisionTreeRegressor()
model.fit(X_train, y_train)
# Accuracy
model.score(X_test, y_test)
|
# A few years ago I wrote a [kernel exploring word vectors](https://www.kaggle.com/gabrielaltay/word-vectors-from-pmi-matrix) calculated from Pointwise Mutual Information. This is a reboot of that kernel using the Kensho Derived Wikimedia Dataset. This new version includes a dynamic context window, context distribution smoothing, and eigenvalue weighting.
# # Kensho Derived Wikimedia Dataset - Word Vectors from Decomposing a Word-Word Pointwise Mutual Information Matrix
# Lets create some simple [word vectors](https://en.wikipedia.org/wiki/Word_embedding) by applying a [singular value decomposition](https://en.wikipedia.org/wiki/Singular-value_decomposition) to a [pointwise mutual information](https://en.wikipedia.org/wiki/Pointwise_mutual_information) word-word matrix. There are many other ways to create word vectors, but matrix decomposition is one of the most straightforward. A well cited description of the technique used in this notebook can be found in Chris Moody's blog post [Stop Using word2vec](https://multithreaded.stitchfix.com/blog/2017/10/18/stop-using-word2vec/). If you are interested in reading further about the history of word embeddings and a discussion of modern approaches check out the following blog post by Sebastian Ruder, [An overview of word embeddings and their connection to distributional semantic models](http://blog.aylien.com/overview-word-embeddings-history-word2vec-cbow-glove/). Especially interesting to me is the work by Omar Levy, Yoav Goldberg, and Ido Dagan which shows that tuning hyperparameters is as (if not more) important as the algorithm chosen to build word vectors. [Improving Distributional Similarity with Lessons Learned from Word Embeddings](https://transacl.org/ojs/index.php/tacl/article/view/570) (LGD15).
# We will be using the [Kensho Derived Wikimedia Dataset](https://www.kaggle.com/kenshoresearch/kensho-derived-wikimedia-data) which contains the text of English Wikipedia from 2019-12-01. In this notebook tutorial we will implement as much as we can without using libraries that obfuscate the algorithm. We're not going to write our own linear algebra or sparse matrix routines, but we will calculate unigram frequency, skipgram frequency, and the pointwise mutual information matrix "by hand". We will also use the notation from LGD15 so you can follow along using that paper. Hopefully this will make the method easier to understand!
from collections import Counter
import json
import os
import random
import re
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from tqdm import tqdm
from scipy import sparse
from scipy.sparse import linalg
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import euclidean_distances
sns.set()
sns.set_context("talk")
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
NUM_KLAT_LINES = 5_343_564
MIN_UNIGRAM_COUNT = 100
WINDOW = 5
MAX_PAGES = 1_000_000
INTROS_ONLY = True
kdwd_path = os.path.join("/kaggle", "input", "kensho-derived-wikimedia-data")
# # Read Data and Preview
# Lets start with a class to read the Wikipedia text. We'll give ourselves the option to only use *Introduction* sections and to limit the number of pages.
def tokenizer(text):
return text.strip().lower().split()
class KdwdLinkAnnotatedText:
def __init__(self, file_path, intros_only=False, max_pages=1_000_000_000):
self._file_path = file_path
self._intros_only = intros_only
self._max_pages = max_pages
self._num_lines = NUM_KLAT_LINES
self.pages_to_parse = min(self._num_lines, self._max_pages)
def __iter__(self):
with open(self._file_path) as fp:
for ii, line in enumerate(fp):
page = json.loads(line)
for section in page["sections"]:
yield section["text"]
if self._intros_only:
break
if ii + 1 >= self.pages_to_parse:
break
file_path = os.path.join(kdwd_path, "link_annotated_text.jsonl")
klat_intros_2 = KdwdLinkAnnotatedText(file_path, intros_only=True, max_pages=2)
klat_intros = KdwdLinkAnnotatedText(
file_path, intros_only=INTROS_ONLY, max_pages=MAX_PAGES
)
two_intros = [intro for intro in klat_intros_2]
two_intros
# # Unigrams
# Now lets calculate a unigram vocabulary. The following code assigns a unique ID to each token, stores that mapping in two dictionaries (`tok2indx` and `indx2tok`), and counts how often each token appears in the corpus.
def filter_unigrams(unigrams, min_unigram_count):
tokens_to_drop = [
token for token, count in unigrams.items() if count < min_unigram_count
]
for token in tokens_to_drop:
del unigrams[token]
return unigrams
def get_unigrams(klat):
unigrams = Counter()
for text in tqdm(klat, total=klat.pages_to_parse, desc="calculating unigrams"):
tokens = tokenizer(text)
unigrams.update(tokens)
return unigrams
unigrams = get_unigrams(klat_intros)
print("token count: {}".format(sum(unigrams.values())))
print("vocabulary size: {}".format(len(unigrams)))
unigrams = filter_unigrams(unigrams, MIN_UNIGRAM_COUNT)
print("token count: {}".format(sum(unigrams.values())))
print("vocabulary size: {}".format(len(unigrams)))
tok2indx = {tok: indx for indx, tok in enumerate(unigrams.keys())}
indx2tok = {indx: tok for tok, indx in tok2indx.items()}
# # Skipgrams
# Now lets calculate word-context pairs (i.e., skipgrams). We will loop through each token in a section (the "word") and then use a `word2vec` style dynamic window to sample a context token to form skipgrams.
def get_skipgrams(klat, window, tok2indx, seed=938476):
rnd = random.Random()
rnd.seed(a=seed)
skipgrams = Counter()
for text in tqdm(klat, total=klat.pages_to_parse, desc="calculating skipgrams"):
tokens = tokenizer(text)
vocab_indices = [tok2indx[tok] for tok in tokens if tok in tok2indx]
num_tokens = len(vocab_indices)
if num_tokens == 1:
continue
for ii_word, word in enumerate(vocab_indices):
# sample within window
ii_context = ii_word
while ii_context == ii_word:
sign = rnd.getrandbits(1) * 2 - 1
offset = rnd.randint(1, window)
if sign < 0:
ii_context = max(0, ii_word - offset)
else:
ii_context = min(num_tokens - 1, ii_word + offset)
# add skipgram
context = vocab_indices[ii_context]
skipgram = (word, context)
skipgrams[skipgram] += 1
return skipgrams
skipgrams = get_skipgrams(klat_intros, WINDOW, tok2indx)
print("number of unique skipgrams: {}".format(len(skipgrams)))
print("number of skipgrams: {}".format(sum(skipgrams.values())))
most_common = [
(indx2tok[sg[0][0]], indx2tok[sg[0][1]], sg[1]) for sg in skipgrams.most_common(10)
]
print("most common: {}".format(most_common))
# # Sparse Matrices
# We will calculate several matrices that store word-word information. These matrices will be $N \times N$ where $N \approx 100,000$ is the size of our vocabulary. We will need to use a sparse format so that it will fit into memory. A nice implementation is available in [scipy.sparse.csr_matrix](https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html). To create these sparse matrices we create three iterables that store row indices, column indices, and data values.
# # Word-Word Count Matrix
# Our very first word vectors will come from a word-word count matrix. This matrix is symmetric so we can (equivalently) take the word vectors to be the rows or columns. However we will try and code as if the rows are word vectors and the columns are context vectors.
def get_count_matrix(skipgrams, tok2indx):
row_indxs = []
col_indxs = []
dat_values = []
for skipgram in tqdm(
skipgrams.items(),
total=len(skipgrams),
desc="building count matrix row,col,dat",
):
(tok_word_indx, tok_context_indx), sg_count = skipgram
row_indxs.append(tok_word_indx)
col_indxs.append(tok_context_indx)
dat_values.append(sg_count)
print("building sparse count matrix")
return sparse.csr_matrix((dat_values, (row_indxs, col_indxs)))
count_matrix = get_count_matrix(skipgrams, tok2indx)
# normalize rows
count_matrix_l2 = normalize(count_matrix, norm="l2", axis=1)
# demonstrate normalization
irow = 10
row = count_matrix_l2.getrow(irow).toarray().flatten()
print(np.sqrt((row * row).sum()))
row = count_matrix.getrow(irow).toarray().flatten()
print(np.sqrt((row * row).sum()))
# # Word Similarity with Sparse Count Matrices
def ww_sim(word, mat, tok2indx, topn=10):
"""Calculate topn most similar words to word"""
indx = tok2indx[word]
if isinstance(mat, sparse.csr_matrix):
v1 = mat.getrow(indx)
else:
v1 = mat[indx : indx + 1, :]
sims = cosine_similarity(mat, v1).flatten()
# dists = cosine_distances(mat, v1).flatten()
dists = euclidean_distances(mat, v1).flatten()
sindxs = np.argsort(-sims)
sim_word_scores = [(indx2tok[sindx], sims[sindx]) for sindx in sindxs[0:topn]]
return sim_word_scores
word = "strike"
ww_sim(word, count_matrix, tok2indx)
ww_sim(word, count_matrix_l2, tok2indx)
# Note that the normalized vectors will produce the same similarities as the un-normalized vectors as long as we are using cosine similarity.
# # Pointwise Mutual Information Matrices
# ## Definitions
# $$
# PMI(w, c) =
# \log \frac
# {\hat{P}(w,c)}
# {\hat{P}(w)\hat{P}(c)} =
# \log \frac
# {\#(w,c) \, \cdot \lvert D \rvert}
# {\#(w) \cdot \#(c)}
# $$
# $$
# PPMI(w, c) = {\rm max} \left[ PMI(w, c), 0 \right]
# $$
# $$
# \#(w) = \sum_{c^{\prime}} \#(w, c^{\prime}),
# \quad
# \#(c) = \sum_{w^{\prime}} \#(w^{\prime}, c)
# $$
# ## Context Distribution Smoothing
# $$
# PMI_{\alpha}(w, c) =
# \log \frac
# {\hat{P}(w,c)}
# {\hat{P}(w)\hat{P}_{\alpha}(c)}
# $$
# $$
# \hat{P}_{\alpha}(c) =
# \frac
# {\#(c)^{\alpha}}
# {\sum_c \#(c)^{\alpha}}
# $$
# Lets explain how these equations relate to our variables. LGD15 use $\#(w,c)$ to denote the number of times a word-context pair appears in the corpus. We first calculated these numbers in our `skipgrams` variable and then stored them in `count_matrix`. The rows in `count_matrix` represent words and the columns represent contexts. Given a word token and a context token we can look up their indices in `tok2indx` and access the count via `skipgrams` or `count_matrix`
word = "the"
context = "of"
word_indx = tok2indx[word]
context_indx = tok2indx[context]
print(
"pound_wc for ({},{}) from skipgrams: {}".format(
word, context, skipgrams[(word_indx, context_indx)]
)
)
print(
"pound_wc for ({},{}) from count_matrix: {}".format(
word, context, count_matrix[word_indx, context_indx]
)
)
# LGD15 use $\#(w)$ to denote the number of times a word appears anywhere in the corpus and $\#(c)$ to denote the number of times a context appears anywhere in the corpus. We can calculate $\#(w)$ by summing over the columns of `count_matrix` and $\#(c)$ by summing over the rows in `count_matrix`.
sum_over_words = np.array(count_matrix.sum(axis=0)).flatten() # sum over rows
sum_over_contexts = np.array(count_matrix.sum(axis=1)).flatten() # sum over columns
pound_w_check1 = count_matrix.getrow(word_indx).sum()
pound_w_check2 = sum_over_contexts[word_indx]
print('pound_w for "{}" from getrow then sum: {}'.format(word, pound_w_check1))
print('pound_w for "{}" from sum_over_contexts: {}'.format(word, pound_w_check2))
pound_c_check1 = count_matrix.getcol(context_indx).sum()
pound_c_check2 = sum_over_words[context_indx]
print('pound_c for "{}" from getcol then sum: {}'.format(context, pound_c_check1))
print('pound_c for "{}" from sum_over_words: {}'.format(context, pound_c_check2))
def get_ppmi_matrix(skipgrams, count_matrix, tok2indx, alpha=0.75):
# for standard PPMI
DD = sum(skipgrams.values())
sum_over_contexts = np.array(count_matrix.sum(axis=1)).flatten()
sum_over_words = np.array(count_matrix.sum(axis=0)).flatten()
# for context distribution smoothing (cds)
sum_over_words_alpha = sum_over_words**alpha
Pc_alpha_denom = np.sum(sum_over_words_alpha)
row_indxs = []
col_indxs = []
ppmi_dat_values = [] # positive pointwise mutual information
for skipgram in tqdm(
skipgrams.items(), total=len(skipgrams), desc="building ppmi matrix row,col,dat"
):
(tok_word_indx, tok_context_indx), pound_wc = skipgram
pound_w = sum_over_contexts[tok_word_indx]
pound_c = sum_over_words[tok_context_indx]
pound_c_alpha = sum_over_words_alpha[tok_context_indx]
Pwc = pound_wc / DD
Pw = pound_w / DD
Pc = pound_c / DD
Pc_alpha = pound_c_alpha / Pc_alpha_denom
pmi = np.log2(Pwc / (Pw * Pc_alpha))
ppmi = max(pmi, 0)
row_indxs.append(tok_word_indx)
col_indxs.append(tok_context_indx)
ppmi_dat_values.append(ppmi)
print("building ppmi matrix")
return sparse.csr_matrix((ppmi_dat_values, (row_indxs, col_indxs)))
ppmi_matrix = get_ppmi_matrix(skipgrams, count_matrix, tok2indx)
word = "strike"
ww_sim(word, ppmi_matrix, tok2indx)
# # Singular Value Decomposition
# With the PPMI matrix in hand, we can apply a singular value decomposition (SVD) to create dense word vectors from the sparse ones we've been using. SVD factorizes a matrix $M$ into a product of matrices $M = U \cdot \Sigma \cdot V^T$ where $U$ and $V$ are orthonormal and $\Sigma$ is a diagonal matrix of eigenvalues. By keeping the top $d$ elements of $\Sigma$, we obtain $M_d = U_d \cdot \Sigma_d \cdot V_d^T$.
# Word and context vectors are typically represented by:
# $$
# W = U_d \cdot \Sigma_d, \quad C = V_d
# $$
# It has been shown empirically that weighting the eigenvalue matrix can effect performance.
# $$
# W = U_d \cdot \Sigma_d^p
# $$
# LGD15 suggest always using this weighting but that $p$ should be tuned to the task. They investigate values of $p=0.5$ and $p=0$ (with $p=1$ corresponding to the traditional case). Lets use $p=0.5$.
embedding_size = 50
uu, ss, vv = linalg.svds(ppmi_matrix, embedding_size)
print("vocab size: {}".format(len(unigrams)))
print("ppmi size: {}".format(ppmi_matrix.shape))
print("embedding size: {}".format(embedding_size))
print("uu.shape: {}".format(uu.shape))
print("ss.shape: {}".format(ss.shape))
print("vv.shape: {}".format(vv.shape))
# Lets check that dot-products between rows of $M_d$ are equal to dot-products between rows of $W$ where,
# $$
# M_d = U_d \cdot \Sigma_d \cdot V_d^T, \quad W = U_d \cdot \Sigma_d
# $$
# x = (uu.dot(np.diag(ss)).dot(vv))[word_indx, :]
# y = (uu.dot(np.diag(ss)).dot(vv))[context_indx, :]
# print((x * y).sum())
# x = (uu.dot(np.diag(ss)))[word_indx, :]
# y = (uu.dot(np.diag(ss)))[context_indx, :]
# print((x * y).sum())
# Now lets create our final embeddings.
p = 0.5
svd_word_vecs = uu.dot(np.diag(ss**p))
print(word_vecs.shape)
word = "car"
sims = ww_sim(word, word_vecs, tok2indx)
for sim in sims:
print(" ", sim)
word = "king"
sims = ww_sim(word, word_vecs, tok2indx)
for sim in sims:
print(" ", sim)
word = "queen"
sims = ww_sim(word, word_vecs, tok2indx)
for sim in sims:
print(" ", sim)
word = "news"
sims = ww_sim(word, word_vecs, tok2indx)
for sim in sims:
print(" ", sim)
word = "hot"
sims = ww_sim(word, word_vecs, tok2indx)
for sim in sims:
print(" ", sim)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
doc = pd.DataFrame({"x": [1, 2, 4, 3, 5], "y": [1, 3, 3, 2, 5]})
print(doc)
import numpy as np
s = np.mean(doc["x"])
print(s)
t = np.mean(doc["y"])
print(t)
g = doc["x"] - s
doc['x-x"'] = g
h = doc["y"] - t
doc['y-y"'] = h
print(doc)
doc['(x-x")^2'] = np.square(g)
print(doc)
doc['(x-x")(y-y")'] = np.multiply(g, h)
print(doc)
q = np.sum(doc['(x-x")(y-y")'])
w = np.sum(doc['(x-x")^2'])
e = q / w
print("slope=", e)
c = t - s * e
print(c)
a = (e * doc["x"]) + c
doc['y"'] = a
print(doc)
doc['y-y""'] = a - doc["y"]
doc['(y-y"")^2'] = np.square(doc['y-y""'])
print(doc)
o = np.mean(doc['(y-y"")^2'])
j = np.sqrt(o)
print(j)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import pandas_profiling
from pandas.plotting import scatter_matrix
file = "/kaggle/input/youtube-new/USvideos.csv"
df = pd.read_csv(file)
df.head()
df.info()
df.isnull().sum()
# Since description is least usefull, so will drop it
df.dropna(axis=1, how="any", inplace=True)
df.head(3)
# Check for null or empty value's percenge
Total = df.isnull().sum().sort_values(ascending=False)
percent = ((df.isnull().sum() / df.isnull().count()) * 100).sort_values(ascending=False)
missing_data = pd.concat([Total, percent], axis=1, keys=["Total", "percent"])
missing_data
df.head()
# Chenage the date columns to pandas date data type
df["publish_time"] = pd.to_datetime(df["publish_time"]).dt.strftime("%y-%m-%d")
df["trending_date"] = df["trending_date"].str.replace(".", "-")
df["trending_date"] = pd.to_datetime(df["trending_date"], format="%y-%d-%m")
# drop unwanted columns from DataFrame
del df["tags"]
del df["thumbnail_link"]
del df["video_id"]
del df["category_id"]
df.info()
df.head()
# Data Analysis starts from here
# Which Title have most likes
MaxLikes = df["likes"].max()
mask = df["likes"] == MaxLikes
df[mask]
# Which Title have most dislikes
MinLikes = df["dislikes"].min()
mask = df["dislikes"] == MinLikes
df[mask]
df[mask].count()
# What is the row of max likes in video had error or removed.
df.head()
mask1 = df[df["video_error_or_removed"]]
Max_df = mask1["likes"].max()
mask1[mask1["likes"] == Max_df]
# What is the row of MIN likes in video had error or removed.
mask1 = df[df["video_error_or_removed"]]
Min_df = mask1["likes"].min()
mask1[mask1["likes"] == Min_df]
df.head()
df["publish_time"] = pd.to_datetime(df["publish_time"]).dt.strftime("%d-%m-%Y")
df["publish_year"] = df["publish_time"].str.split("-", expand=True)[2]
df["publish_time"] = pd.to_datetime(df["publish_time"])
# df['publish_year'] = pd.to_datetime(df['publish_year'] )
df.info()
df.head(3)
# Which publish year has most views and its rows.
df.groupby("publish_year")["views"].sum().sort_values(ascending=False)
# Which year had most likes
df.groupby("publish_year")["likes"].sum().sort_values(ascending=False)
# Which year had most dislikes
df.groupby("publish_year")["dislikes"].sum().sort_values(ascending=False)
# Which Title has most likes and views in 2017
mass3 = df["publish_year"] == "2017"
temp = df[mass3]
Max2017L = temp["likes"].max()
Max2017V = temp["views"].max()
mass1 = temp["likes"] == Max2017L
mass2 = temp["views"] == Max2017V
temp[mass1 | mass2]
# Which Title has most likes and views in 2018
mass3 = df["publish_year"] == "2018"
temp = df[mass3]
Max2017L = temp["likes"].max()
Max2017V = temp["views"].max()
mass1 = temp["likes"] == Max2017L
mass2 = temp["views"] == Max2017V
temp[mass1 | mass2]
# Visualizing the inference
import seaborn as sns
from matplotlib import pyplot
# Visualize correlations
pyplot.scatter(df["views"], df["likes"])
# Calculate Covariance
# cov(X, Y) = (sum (x - mean(X)) * (y - mean(Y)) ) * 1/(n-1)
vmean = df["views"].mean()
lmean = df["likes"].mean()
vsum = 0
# calculate sum (x - mean(X)
for val in df["views"]:
dif = val - vmean
vsum = vsum + dif
lsum = 0
# calculate sum (x - mean(Y)
for val in df["likes"]:
dif = val - lmean
lsum = lsum + dif
total = lsum * vsum
le = len(df) - 1
Covar = total / le
print(Covar)
df = []
col_list = ["title", "views", "likes"]
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if filename.endswith(".csv"):
df1 = pd.DataFrame(
pd.read_csv(os.path.join(dirname, filename), header=0, usecols=col_list)
)
df1["country"] = filename[:2]
df.append(df1)
train = pd.concat(df, axis=0, ignore_index=True)
df = []
col_list = ["title", "views", "likes"]
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
if filename.endswith(".csv"):
df1 = pd.DataFrame(
pd.read_csv(os.path.join(dirname, filename), header=0, usecols=col_list)
)
df1["country"] = filename[:2]
df.append(df1)
train = pd.concat(df, axis=0, ignore_index=True)
train.shape
train.isna().sum()
train.describe()
train.corr()
train.head(10)
train.cov()
sns.heatmap(train.corr(), annot=True)
scatter_matrix(train)
plt.show()
sns.regplot(x="views", y="likes", data=train)
plt.title("Correlation between views and likes")
pd.DataFrame(train.groupby("country").sum())
country = pd.DataFrame(train.groupby("country").sum())
sns.barplot(x=country.index, y=country["views"])
sns.barplot(x=country.index, y=country["likes"])
titlewise = pd.DataFrame(train.groupby(by=["title"]).sum())
titlewise
titlewise.sort_values(by=["views", "likes"], ascending=False, inplace=True)
titlewise
titlewise[titlewise["views"] == titlewise["views"].max()]
titlewise[titlewise["likes"] == titlewise["likes"].max()]
sns.barplot(x=titlewise.index[:10], y=titlewise.likes[:10])
plt.xticks(rotation=90)
|
# # LoisLab Data Science Starter Kit -- Winter 2020 -- Class Ideas
# ### 1. Load training data
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train
# ### 2.Extract titles from name
# this code says: extract any substring that starts with a uppercase letter and ends with a '.'
train["Title"] = train["Name"].str.extract(" ([A-Za-z]+)\.")
print("here are the unique set of titles: ", train["Title"].unique())
# ### 3. Count the number of string tokens in each name
# this function splits a string into a list, then returns the length of the list
# the splitting uses a the space character ' ' as a delimiter, so 'A B C' would
# become: ['A','B','C'], and len(['A','B','C']) = 3
def count_number_of_tokens(s):
return len(s.split(" "))
# you can apply a function to a series (in this case, to make a new series)
train["NameCount"] = train["Name"].apply(count_number_of_tokens)
train[["Name", "Title", "NameCount"]]
# ### 4. Organize Ages into Age Groups
train["Age"] = train["Age"].fillna(-1)
bins = [-2, 0, 8, 16, 24, 36, 60, np.inf]
labels = ["Unknown", "Child", "Tween", "YA", "Adult", "Boomer", "Oldster"]
train["AgeGroup"] = pd.cut(train["Age"], bins, labels=labels)
train[["Age", "AgeGroup"]]
# ### 5. Make input series for all the features that we talked about
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
# dependent variable... the thing we are trying to predict
y = train["Survived"]
# independent variables...
features = ["Sex", "Embarked", "AgeGroup", "Title", "NameCount", "Pclass"]
X = pd.get_dummies(train[features])
X
# ### 6. Give it a try...
model = RandomForestClassifier(n_estimators=100)
model.fit(X, y)
predictions = model.predict(X)
print(np.shape(X), X.columns)
print("Accuracy is ", metrics.accuracy_score(predictions, y))
# ### 7. Now, using the test (not training) data set, prepare data set by adding all fields
# get the test data set
test = pd.read_csv("/kaggle/input/titanic/test.csv")
# add our data features...
test["Title"] = test["Name"].str.extract(" ([A-Za-z]+)\.")
test["NameCount"] = test["Name"].apply(count_number_of_tokens)
test["Age"] = test["Age"].fillna(-1)
bins = [-2, 0, 8, 16, 24, 36, 60, np.inf]
labels = ["Unknown", "Child", "Tween", "YA", "Adult", "Boomer", "Oldster"]
test["AgeGroup"] = pd.cut(test["Age"], bins, labels=labels)
test[["Age", "AgeGroup"]]
# independent variables for which our model is already fit...
features = ["Sex", "Embarked", "AgeGroup", "Title", "NameCount", "Pclass"]
X_test = pd.get_dummies(test[features])
# wait! some fields are missing... that will confuse the model.
# which are the missing fields?
missing_fields = set(X.columns) - set(X_test.columns)
print("missing fields", missing_fields)
# populate the missing fields with zeros
for field in missing_fields:
X_test[field] = 0
# make sure the columns are in the same order as the orginal
# set of independent variables X:
X_test = X_test[X.columns]
# check that alignment is correct
for s, t in zip(X.columns, X_test.columns):
print(s, t, s == t)
# ### 8. And... run the trained model on the test data
Y_test = model.predict(X_test)
lois_lab_result = pd.DataFrame({"PassengerId": test["PassengerId"], "Survived": Y_test})
lois_lab_result.to_csv("lois_lab_submission.csv", index=False)
|
from IPython.display import clear_output
clear_output()
import multiprocessing
import os
import shutil
import cv2
import numpy as np
from tqdm import tqdm
def main(args) -> None:
if os.path.exists(args["output_dir"]):
shutil.rmtree(args["output_dir"])
os.makedirs(args["output_dir"])
# Get all image paths
image_file_names = os.listdir(args["images_dir"])
# Splitting images with multiple threads
progress_bar = tqdm(
total=len(image_file_names), unit="image", desc="Prepare split image"
)
workers_pool = multiprocessing.Pool(args["num_workers"])
for image_file_name in image_file_names:
print(image_file_name)
workers_pool.apply_async(
worker,
args=(image_file_name, args),
callback=lambda arg: progress_bar.update(1),
)
workers_pool.close()
workers_pool.join()
progress_bar.close()
def worker(image_file_name, args) -> None:
image = cv2.imread(f"{args['images_dir']}/{image_file_name}", cv2.IMREAD_UNCHANGED)
image_height, image_width = image.shape[0:2]
print(image_height + " " + image_width + "\n")
# print(args["image_size"])
index = 1
if image_height >= args["image_size"] and image_width >= args["image_size"]:
for pos_y in range(0, image_height - args["image_size"] + 1, args["step"]):
for pos_x in range(0, image_width - args["image_size"] + 1, args["step"]):
# print("HELLO1")
# Crop
crop_image = image[
pos_y : pos_y + args["image_size"],
pos_x : pos_x + args["image_size"],
...,
]
# print("hello2")
crop_image = np.ascontiguousarray(crop_image)
# print("HELLO3")
# Save image
print(
f"{args['output_dir']}/{image_file_name.split('.')[-2]}_{index:04d}.{image_file_name.split('.')[-1]}"
)
cv2.imwrite(
f"{args['output_dir']}/{image_file_name.split('.')[-2]}_{index:04d}.{image_file_name.split('.')[-1]}",
crop_image,
)
index += 1
if __name__ == "__main__":
# --images_dir ../data/ImageNet/original --output_dir ../data/ImageNet/SRGAN/train --image_size 128 --step 64 --num_workers 16"
# parser = argparse.ArgumentParser(description="Prepare database scripts.")
# parser.add_argument("--images_dir", type=str, help="Path to input image directory.")
# parser.add_argument("--output_dir", type=str, help="Path to generator image directory.")
# parser.add_argument("--image_size", type=int, help="Low-resolution image size from raw image.")
# parser.add_argument("--step", type=int, help="Crop image similar to sliding window.")
# parser.add_argument("--num_workers", type=int, help="How many threads to open at the same time.")
# args = parser.parse_args()
args = {
"images_dir": "/kaggle/input/div2k-dataset/DIV2K_train_HR/DIV2K_train_HR",
"output_dir": "/kaggle/working/div2k-dataset/SRGAN/train",
"image_size": 128,
"step": 64,
"num_workers": 16,
}
main(args)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
ds = pd.read_csv("../input/datasheet.csv")
print(ds)
import numpy as np
a = np.mean(ds["x"])
print(a)
b = np.mean(ds["y"])
print(b)
c = ds["x"] - a
ds["x1"] = c
d = ds["y"] - b
ds["y1"] = d
print(ds)
ds["(x1)^2"] = np.square(ds["x1"])
ds["(x1)^2mean"] = np.mean(ds["(x1)^2"])
print(ds)
ds["(x1)x(y1)"] = np.multiply(ds["x1"], ds["y1"])
print(ds)
e = np.sum(ds["(x1)x(y1)"])
f = np.sum(ds["(x1)^2"])
print("slope=")
g = e / f
print(g)
C = b - g * a
print(C)
ds["C"] = C
ds["y-pri"] = (g * ds["x"]) + C
print(ds)
ds["y-pri - y"] = ds["y-pri"] - ds["y"]
ds["(y-pri - y)^2"] = np.square(ds["y-pri - y"])
print(ds)
h = np.mean(ds["(y-pri - y)^2"])
o = np.sqrt(h)
print(o)
|
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
ROOT_PATH = "/kaggle/input/house-prices-advanced-regression-techniques"
train = pd.read_csv(ROOT_PATH + "/train.csv")
test = pd.read_csv(ROOT_PATH + "/test.csv")
sample = pd.read_csv(ROOT_PATH + "/sample_submission.csv")
def summary(text, df):
print(f"{text} shape: {df.shape}")
summ = pd.DataFrame(df.dtypes, columns=["dtypes"])
summ["null"] = df.isnull().sum()
summ["unique"] = df.nunique()
summ["min"] = df.min()
summ["median"] = df.median()
summ["max"] = df.max()
summ["mean"] = df.mean()
summ["std"] = df.std()
return summ
summary("train", train)
train["LotFrontage"].fillna((train["LotFrontage"].mean()), inplace=True)
summary("test", test)
test["LotFrontage"].fillna((test["LotFrontage"].mean()), inplace=True)
pd.set_option("display.max_columns", None)
train.head(5)
CAT_COLS = train.select_dtypes(include="object")
CAT_COLS
NUM_COLS = train.select_dtypes(include=["float64", "int64"])
NUM_COLS
|
#
# ##**Módulo | Python: Projeto Final**
# Professor [André Perez](https://www.linkedin.com/in/andremarcosperez/)
# ##**Tópicos**
# Descrição do problema;
# Exploração de dados;
# Transformação e limpeza de dados;
# Visualização de dados;
# Storytelling.
# ##1\. *Breve descrição do problema:*
# Vamos explorar dados de crédito presentes neste neste [link](https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv). Os dados estão no formato CSV e contém informações sobre clientes de uma instituição financeira. Em especial, estamos interessados em explicar a segunda coluna, chamada de **default**, que indica se um cliente é adimplente(`default = 0`), ou inadimplente (`default = 1`), ou seja, queremos entender o porque um cliente deixa de honrar com suas dívidas baseado no comportamento de outros atributos, como salário, escolaridade e movimentação financeira. Uma descrição completa dos atributos está abaixo.
# | Coluna | Descrição |
# | ------- | --------- |
# | id | Número da conta |
# | default | Indica se o cliente é adimplente (0) ou inadimplente (1) |
# | idade | --- |
# | sexo | --- |
# | depedentes | --- |
# | escolaridade | --- |
# | estado_civil | --- |
# | salario_anual | Faixa do salario mensal multiplicado por 12 |
# | tipo_cartao | Categoria do cartao: blue, silver, gold e platinium |
# | meses_de_relacionamento | Quantidade de meses desde a abertura da conta |
# | qtd_produtos | Quantidade de produtos contratados |
# | iteracoes_12m | Quantidade de iteracoes com o cliente no último ano |
# | meses_inatico_12m | Quantidade de meses que o cliente ficou inativo no último ano |
# | limite_credito | Valor do limite do cartão de crédito |
# | valor_transacoes_12m | Soma total do valor das transações no cartão de crédito no último ano |
# | qtd_transacoes_12m | Quantidade total de transações no cartão de crédito no último ano |
# ##2\. Exploração de dados
# Começamos então importando os pacotes que vamos utilizar, e vamos ler os dados em um dataframe pandas.
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set_style("whitegrid")
df = pd.read_csv(
"https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv",
na_values="na",
)
df.head(n=10)
# Com o dados em mãos, vamos conhecer um pouco melhor a estrutura do nosso conjunto de dados.
# ### **2.1. Estrutura**
#
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
# vamos calcular a proporção de clientes adimplente e inadimplente.
qtd_total, _ = df.shape
qtd_adimplentes, _ = df[df["default"] == 0].shape
qtd_inadimplentes, _ = df[df["default"] == 1].shape
print(
f"A proporção de clintes adimplentes é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A proporção de clintes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
# **Obs:** Podemos notar que a nossa base é composta por mais de 80% de clientes adimplente.
# ### **2.2. Schema**
df.head(n=5)
df.dtypes # Aqui podemos ver os tipos de cada coluna.
# - **Obs:**
# As colunas **limite_credito** e **qtd_transacoes_12m** estão sendo reconhecidas como object.
# Vamos corrigir na etapa de tratamento.
# - Vamos separar as colunas por atributos **categóricos**, e atributos **numéricos**.
# - Atributos **categóricos.**
df.select_dtypes("object").describe().transpose()
# Podemos confirmar que as colunas **limite_credito** e **qtd_transacoes_12m** realmente estão sendo reconhecidas como object.
# - Atributos **numéricos.**
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# ### **2.3. Dados faltantes**
# Aqui vamos ver se temos dados faltates na nossa base de dados.
df.head()
df.isna().any() # Vamos verificar quais colunas possuem dados faltantes.
# - A função abaixo levanta algumas estatisticas sobre as colunas dos dados faltantes.
def stats_dados_faltantes(df: pd.DataFrame) -> None:
stats_dados_faltantes = []
for col in df.columns:
if df[col].isna().any():
qtd, _ = df[df[col].isna()].shape
total, _ = df.shape
dict_dados_faltantes = {
col: {"quantidade": qtd, "porcentagem": round(100 * qtd / total, 2)}
}
stats_dados_faltantes.append(dict_dados_faltantes)
for stat in stats_dados_faltantes:
print(stat)
stats_dados_faltantes(df=df)
stats_dados_faltantes(df=df[df["default"] == 0])
stats_dados_faltantes(df=df[df["default"] == 1])
# ## 3\. Transformação e limpeza de dados
# Agora que conhecemos melhor a natureza do nosso conjunto de dados, vamos conduzir uma atividade conhecida como *data wrangling* que consiste na transformação e limpeza dos dados do conjunto para que possam ser melhor analisados. Em especial, vamos remover:
# - Corrigir o *schema* das nossas colunas;
# - Remover os dados faltantes.
# ### **3.1. Correção de schema**
# Na etapa de exploração, notamos que as colunas **limite_credito** e **valor_transacoes_12m** estavam sendo interpretadas como colunas categóricas (`dtype = object`).
df[["limite_credito", "valor_transacoes_12m"]].dtypes
df[["limite_credito", "valor_transacoes_12m"]].head(n=5)
# Vamos criar uma função `lambda` para limpar os dados.
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(
lambda valor: float(valor.replace(".", "").replace(",", "."))
)
df["limite_credito"] = df["limite_credito"].apply(
lambda valor: float(valor.replace(".", "").replace(",", "."))
)
df.dtypes
# Podemos notar que as colunas **limite_credito** e **valor_transacoes_12m** agora está sendo reconhecida como float.
# - Atributos **categóricos**.
df.select_dtypes("object").describe().transpose()
# - Atributos **numéricos**.
df.drop("id", axis=1).select_dtypes("number").describe().transpose()
# ### **3.2. Remoção de dados faltantes**
# Como o pandas está ciente do que é um dados faltante, a remoção das linhas problemáticas é trivial.
df.dropna(inplace=True)
# Vamos analisar a estrutura dos dados novamente.
df.shape
df[df["default"] == 0].shape
df[df["default"] == 1].shape
qtd_total_novo, _ = df.shape
qtd_adimplentes_novo, _ = df[df["default"] == 0].shape
qtd_inadimplentes_novo, _ = df[df["default"] == 1].shape
print(
f"A proporção de adimplentes ativos é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporção de clintes adimplentes é de {round(100 * qtd_adimplentes_novo / qtd_total_novo, 2)}%"
)
print("")
print(
f"A proporção de clintes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
print(
f"A nova proporção de clintes inadimplentes é de {round(100 * qtd_inadimplentes_novo / qtd_total_novo, 2)}%"
)
# ## 4\. Visualização de dados
# Os dados estão prontos, vamos criar diversas visualizações para correlacionar variáveis explicativas com a variável resposta para buscar entender qual fator leva um cliente a inadimplencia. E para isso, vamos sempre comparar a base com todos os clientes com a base de adimplentes e inadimplentes.
df_adimplente = df[df["default"] == 0]
df_inadimplente = df[df["default"] == 1]
# ### **4.1. Visualizações categóricas**
# Nesta seção, vamos visualizar a relação entre a variável resposta **default** com os atributos categóricos.
df.select_dtypes("object").head(n=5)
coluna_escolaridade = "escolaridade"
titulo_escolaridade = [
"Escolaridade dos Clientes",
"Escolaridade dos Clientes Adimplentes",
"Escolaridade dos Clientes Inadiplentes",
]
coluna_salario = "salario_anual"
titulo_salario = [
"Salário Anual dos Clientes",
"Salário Anual dos Clientes Adimplentes",
"Salário Anual dos Clientes Inadimplentes",
]
coluna_estado_civil = "estado_civil"
titulo_estado_civil = [
"Estado_civil dos Clientes",
"Estado_civil dos Clientes Adimplentes",
"Estado_civil dos Clientes Inadimplentes",
]
coluna_tipo_cartao = "tipo_cartao"
titulo_tipo_cartao = [
"Tipo de cartao dos Clientes",
"Tipo de cartao dos Clientes Adimplentes",
"Tipo de cartao dos Clientes Inadimplentes",
]
def plot_tabela_clientes(df, df_adimplente, df_inadimplente, coluna=None, titulos=None):
eixo = 0
max_y = 0
max = df.select_dtypes("object").describe()[coluna]["freq"] * 1.1
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
df_to_plot = dataframe[coluna].value_counts().to_frame()
df_to_plot.rename(columns={coluna: "frequencia_absoluta"}, inplace=True)
df_to_plot[coluna] = df_to_plot.index
df_to_plot.sort_values(by=[coluna], inplace=True)
df_to_plot.sort_values(by=[coluna])
f = sns.barplot(
x=df_to_plot[coluna], y=df_to_plot["frequencia_absoluta"], ax=eixos[eixo]
)
f.set(
title=titulos[eixo],
xlabel=coluna.capitalize(),
ylabel="Frequencia Absoluta",
)
f.set_xticklabels(labels=f.get_xticklabels(), rotation=90)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
plt.close()
return figura
# - Escolaridade
plot_tabela_clientes(
df,
df_adimplente,
df_inadimplente,
coluna=coluna_escolaridade,
titulos=titulo_escolaridade,
)
# - Salário Anual
plot_tabela_clientes(
df, df_adimplente, df_inadimplente, coluna=coluna_salario, titulos=titulo_salario
)
# - Estado civil
plot_tabela_clientes(
df,
df_adimplente,
df_inadimplente,
coluna=coluna_estado_civil,
titulos=titulo_estado_civil,
)
# - Tipo de cartão
plot_tabela_clientes(
df,
df_adimplente,
df_inadimplente,
coluna=coluna_tipo_cartao,
titulos=titulo_tipo_cartao,
)
# **Obs:**
# - Analisando os atributos categóricos, notamos que o padrão se mantém e não há diferença significativa que possa tornar os clientes inadimplentes.
# ### **4.2. Visualizações numéricas**
# Nesta seção, vamos visualizar a relação entre a variável resposta **default** com os atributos numéricos.
df.drop(["id", "default"], axis=1).select_dtypes("number").head(n=5)
coluna_qtd_transacoes = "qtd_transacoes_12m"
titulo_qtd_transacoes = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
coluna_valor_transacoes = "valor_transacoes_12m"
titulo_valor_transacoes = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
def plot_numerico(df, df_adimplente, df_inadimplente, coluna=None, titulos=None):
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(
title=titulos[eixo],
xlabel=coluna.capitalize(),
ylabel="Frequencia Absoluta",
)
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
plt.close()
return figura
# - Quantidade de Transações nos Últimos 12 Meses
plot_numerico(
df,
df_adimplente,
df_inadimplente,
coluna=coluna_qtd_transacoes,
titulos=titulo_qtd_transacoes,
)
# **1.insight**
# Analisando os atributos numéricos, podemos notar que clientes que realizaram entre 20 e 60 transações por ano apresentam uma possibilidade maior de se tornarem inadimplentes.
# - Valor das Transações nos Últimos 12 Meses
plot_numerico(
df,
df_adimplente,
df_inadimplente,
coluna=coluna_valor_transacoes,
titulos=titulo_valor_transacoes,
)
# **2.insight**
# Clientes inadimplentes estão apresentando um padrão de comportamento no qual eles têm um valor de transação nos últimos 12 meses muito próximo de R$2.500.
# - Valor de Transações nos Últimos 12 Meses x Quantidade de Transações nos Últimos 12 Meses
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Compare Text According to Embeddings
from sentence_transformers import SentenceTransformer, util
import torch
model = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2")
# ## Look for matches in corpus
# get a sample from the middle of The Little Prince to compare documents against, looking for similarity
import re
text = pd.read_csv(
"/kaggle/input/littleprince/the_little_prince.txt", sep="/n", names=["Text"]
)
for x in range(0, len(text)):
text.iloc[x][0] = text.iloc[x][0].lower()
for x in range(0, len(text)):
text.iloc[x][0] = re.sub("\W+", " ", text.iloc[x][0])
# make an array with strings
text_preprocessed = [0] * len(text)
for x in range(len(text)):
text_preprocessed[x] = text.iloc[x][0]
# remove blanks
for x in range(len(text_preprocessed)):
if text_preprocessed[x][0] == " ":
text_preprocessed[x] = text_preprocessed[x][1:]
for x in range(len(text_preprocessed)):
if text_preprocessed[x][-1] == " ":
text_preprocessed[x] = text_preprocessed[x][:-1]
# take a sample of 50 lines from the middle of the work
little_prince_sample = [0] * 50
for x in range(0, len(little_prince_sample)):
little_prince_sample[x] = text_preprocessed[800 + x]
little_prince_sample_string = " ".join(little_prince_sample)
little_prince_sample_string
# use tool LangDetect to detext the language of matching documents
# will also detect the language
from langdetect import detect, DetectorFactory
def det(x):
try:
language = detect(x)
except:
language = "Other"
return language
# based on this article:
# https://towardsdatascience.com/multilingual-text-similarity-matching-using-embedding-f79037459bf2
from sentence_transformers import SentenceTransformer, util
import torch
model = SentenceTransformer("paraphrase-multilingual-MiniLM-L12-v2", cache_folder="./")
# this is just a sample corpus that was in the original documentation, used to check the algorithm
# it should be replaced with large documents based on web crawling
corpus = [
"I am a boy",
"What are you doing?",
"Can you help me?",
"A man is riding a horse.",
"A woman is playing violin.",
"A monkey is chasing after a goat",
"The quick brown fox jumps over the lazy dog",
]
corpus_embedding = model.encode(corpus, convert_to_tensor=True)
top_k = min(5, len(corpus))
query_embedding = model.encode(little_prince_sample_string, convert_to_tensor=True)
cos_scores = util.cos_sim(query_embedding, corpus_embedding)[0]
top_results = torch.topk(cos_scores, k=top_k)
for score, idx in zip(top_results[0], top_results[1]):
print(f"{round(score.item(), 3)} | {corpus[idx]}")
print("Language: " + det(corpus[idx]))
# ## Check the quality of translations
# the similarity will be checked between the English text of The Little Prince and its Interslavic versions (separately, in Cyrillic and Latin)
isl_cyr = pd.read_csv(
"/kaggle/input/littleprince/lp_ch1_isl_cyr.txt", sep="/n", names=["Text"]
)
isl_lat = pd.read_csv(
"/kaggle/input/littleprince/lp_ch1_isl_lat.txt", sep="/n", names=["Text"]
)
en = pd.read_csv("/kaggle/input/littleprince/lp_ch1_en.txt", sep="/n", names=["Text"])
# for cyrillic
for i in range(len(en)):
source = en.iloc[i][0]
target = isl_cyr.iloc[i][0]
source_embedding = model.encode(source, convert_to_tensor=True)
target_embedding = model.encode(target, convert_to_tensor=True)
cos_score = util.cos_sim(source_embedding, target_embedding)[0]
cos_score = cos_score.numpy()[0]
print("Cosine similarity: ")
print(cos_score)
if cos_score < 0.5:
print("The translation of paragraph", i + 1, "is not good enough!")
# for latin
for i in range(len(en)):
source = en.iloc[i][0]
target = isl_lat.iloc[i][0]
source_embedding = model.encode(source, convert_to_tensor=True)
target_embedding = model.encode(target, convert_to_tensor=True)
cos_score = util.cos_sim(source_embedding, target_embedding)[0]
cos_score = cos_score.numpy()[0]
print("Cosine similarity: ")
print(cos_score)
if cos_score < 0.5:
print("The translation of paragraph", i + 1, "is not good enough!")
# -------------------------------------------------
# # Flavourisation Interslavic to Bulgarian
# define sample text
isl = "Kogda mi bylo šest lět, uviděl jesm raz prělěpy obrazok v knigě o pralěsu, ktora nazyvala se «Pravdive Povědky iz Prirody». Na obrazku byla zmija boa požirajuča dikogo zvěrja. Tuto jest kopija ovoj rysovanky. V knigě pisalo se: «Zmija boa prěgoltaje svoju žrtvu v cělosti, bez žuvanja. Potom ne može dvigati se i spi šest měsecev, dokolě ju ne prěvari.» Mnogo jesm togda razmysljal o prigodah v džungli i barvnoju olovkoju uspěl jesm sdělati svoju prvu rysovanku. To byla moja rysovanka nr. 1. Ona izgledala tako:"
# preprocess the text
def preprocess(text):
text = text.lower()
text = text.replace(" ", " ")
text = text.replace(",", "")
text = text.replace(";", "")
text = text.replace(":", "")
text = text.replace("»", "")
text = text.replace("«", "")
text = text.replace("”", "")
text = text.replace("“", "")
text = text.replace("'", "")
return text
isl = preprocess(isl)
# exchange letters with their equivalents
def isl_to_bg(text):
text = text.replace(
"ja", "я"
) # important to begin with these replacements, before the Latin "a" has turned into Cyrillic counterpart
text = text.replace("ju", "ю")
text = text.replace("je", "e")
text = text.replace("a", "а")
text = text.replace("o", "о")
text = text.replace("e", "е")
text = text.replace("u", "у")
text = text.replace("i", "и")
text = text.replace("y", "и")
text = text.replace("ę", "е")
text = text.replace("ě", "е")
text = text.replace("ė", "ъ")
text = text.replace("ȯ", "ъ")
text = text.replace("ų", "ъ")
text = text.replace("å", "а")
text = text.replace("ě", "я")
text = text.replace("j", "й") # later sheck if "йо" should be "ьо"
text = text.replace("b", "б")
text = text.replace("v", "в")
text = text.replace("g", "г")
text = text.replace("d", "д")
text = text.replace("ž", "ж")
text = text.replace("z", "з")
text = text.replace("k", "к")
text = text.replace("l", "л")
text = text.replace("m", "м")
text = text.replace("n", "н")
text = text.replace("p", "п")
text = text.replace("r", "р")
text = text.replace("s", "с")
text = text.replace("t", "т")
text = text.replace("f", "ф")
text = text.replace("h", "х")
text = text.replace("c", "ц")
text = text.replace("č", "ч")
text = text.replace("š", "ш")
text = text.replace("ć", "щ")
text = text.replace("ď", "д")
text = text.replace("ľ", "л")
text = text.replace("ń", "н")
text = text.replace("ŕ", "р")
text = text.replace("ś", "с")
text = text.replace("ť", "т")
text = text.replace("ź", "з")
text = text.split()
for word in text:
if "йо" in word:
a = word.find("йо")
if a != 0 and (word[a - 1] not in "аъоуеи"):
word = word.replace("йо", "ьо")
text = " ".join(text)
return text
isl = isl_to_bg(isl)
isl
# install and check Russian PoS tagger
# (alternatively, can try OCS tagger: https://sciendo.com/pdf/10.2478/jazcas-2021-0051)
import spacy
from spacy import displacy
from collections import Counter
import pandas as pd
pd.set_option("max_rows", 400)
pd.set_option("max_colwidth", 400)
nlp = spacy.load("ru_core_news_md")
txt = isl
document = nlp(txt)
for token in document:
print(token.lemma_, token.pos_, token.dep_)
|
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/ipl2023/match_01.csv")
# # **Cleaning the data**
df
df.info()
df.duplicated().sum()
df = df.drop("comment", axis=1)
# # **Splitting the dataframe into two dataframes of respective teams**
df1 = df.iloc[0:121, :]
df1
df2 = df.iloc[121:, :]
df2
df2 = df2.reset_index()
df2.info()
df2 = df2.drop("index", axis=1)
df2
df1 = df1.drop("inningno", axis=1)
df2 = df2.drop("inningno", axis=1)
df1
df2
# # **Count of over each batter played in inning 1**
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=df1["batter"])
plt.xticks(rotation=90)
# # **Count of over each batter played in inning 2**
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=df2["batter"])
plt.xticks(rotation=90)
# # **Count of over each bowler played in inning 1**
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=df1["bowler"])
plt.xticks(rotation=90)
# # **Count of over each bowler played in inning 2**
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=df2["bowler"])
plt.xticks(rotation=90)
df1
# # **Players scored six in inning 1 (batter and bowler)**
players = df1[df1["outcome"] == "6"]
players
players["batter"].value_counts()
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=players["batter"])
plt.xticks(rotation=90)
players["bowler"].value_counts()
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=players["bowler"])
plt.xticks(rotation=90)
# # **Players scored six in inning 2 (batter and bowler)**
players = df2[df2["outcome"] == "6"]
players
players["batter"].value_counts()
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=players["batter"])
plt.xticks(rotation=90)
players["bowler"].value_counts()
plt.figure(figsize=(16, 10))
ax = sns.countplot(x=players["bowler"])
plt.xticks(rotation=90)
# # **Score card of inning 1**
df3 = df1.groupby(["batter", "outcome"])["outcome"].count()
df3.to_csv("score.csv")
df3 = pd.read_csv("score.csv")
df3 = pd.pivot_table(
df3,
values=None,
index=["batter"],
columns=["outcome"],
fill_value=0,
aggfunc=np.sum,
)
df3
df3.plot(kind="bar")
# # **Score card of inning 2**
df3 = df2.groupby(["batter", "outcome"])["outcome"].count()
df3.to_csv("score.csv")
df3 = pd.read_csv("score.csv")
df3 = pd.pivot_table(
df3,
values=None,
index=["batter"],
columns=["outcome"],
fill_value=0,
aggfunc=np.sum,
)
df3
df3.plot(kind="bar")
|
# # **Exchanged Rates**
# ## _Descripción del problema_
# _Análisis y visualización de series temporales de tipos de cambio de divisas para diferentes monedas en comparación al peso chileno._
# ### _Configuración de librerías_
# Se realiza la instalación de las librerías necesarias para trabajar la importación de datos, análisis estadístico y visualización de algunas relaciones entre los datos. Además, se realiza una configuración general de manejo de mensajes de advertencia y de estilo de gráficos.
#
# Importar librerías
# ==============================================================================
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime # << Manejo de fechas
# Configuración warnings
# ==============================================================================
import warnings
warnings.filterwarnings("ignore")
# Configuración de estilos de gráficas
# ==============================================================================
sns.set()
# ## Data Adquisition ##
# _El conjunto de datos se obtiene desde un perfil de [Kaggle](https://www.kaggle.com/datasets/ruchi798/currency-exchange-rates)._
#
# Lectura de datos
# ==============================================================================
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv(
"/kaggle/input/currency-exchange-rates/exchange_rates.csv", index_col=0
)
# ## _Descripción del dataset_
# ### Conociendo el dataset
# _Las variables que se presentan en el data set son: **Country/Currency**: Nombre de la moneda; **currency**: Abreviatura de la moneda; **value**: Valor de la moneda y **date**: Fecha del registro de la moneda._
#
df.head() # << Indentificando la naturaleza de los datos
df.info() # << Obtener información rápida de nulos y tipo de datos
# _Un aspecto importante que se aprecia es que los valores de moneda 'EUR' son todos 1, puesto que se usan como referencia para todas las demás monedas. A continuación un ejemplo de cómo se compara el valor de 'EUR' con 'USD' para algunos días del dataset._
df[(df["currency"] == "EUR") | (df["currency"] == "USD")].head()
# ### Transformando el dataset
# _Debemos notar que que la variable 'date' es de tipo objeto, y para aprovechar las bondandes de python en el manejo de fechas debemos cambiar el tipo a un formato fecha._
#
df["date"] = pd.to_datetime(
df["date"], format="%d/%m/%Y", dayfirst=True
) # << Transformamos la columna objeto 'date' en un datetime
df.info() # << Validamos los cambios
#
# _Una mejor forma de trabajar estos datos es creando una tabla de tipo tydi, que me deje cada columna como una de las monedas. Por comodidad, utilizamos las abreviaciones como encabezado de cada columna. Otro aspecto imporante es considerar que no se definen indices en la tabla para poder usarlos en las visualizaciones de más adelante._
#
df_t = (
pd.pivot_table(
data=df,
values="value",
index="date",
columns="currency",
aggfunc="mean",
fill_value=None,
)
.reset_index()
.sort_values(by="date")
) # << generamos una versión tidy de la tabla
df_t.head() # << damos un vistazo a como nos queda
# _Crearé un método (simple) para seleccionar algunas monedas de mi interes, particularmente las de ámerica para comparar su evolución versus el peso chileno. Para esto creo una lista que tenga definida las monedas que deseo estudiar, y luego creo un diccionario que me permita obtener la abreviación para cada una._
lista = [
"Euro",
"USA Dollar",
"Argentina Peso",
"Bahamas Dollar",
"Barbados Dollar",
"Bolivia Boliviano",
"Brazil Real",
"Chili Peso",
"Colombia Peso",
"Costa Rica Colon",
"Cuba Convertible Peso",
"Cuba Peso",
"Dominican Republich Peso",
"El Salvador Colon",
"Guatemala Quetzal",
"Guyana Dollar",
"Haiti Gourde",
"Honduras Lempira",
"Jamaica Dollar",
"Mexico Peso",
"Nicaragua Cordoba Oro",
"Panama Balboa",
"Paraguay Guarani",
"Peru Nuevo Sol",
"Trinidad/Tobago Dollar",
"Uruguay Peso",
]
dicts = {
lista[i]: " ".join(df[df["Country/Currency"] == lista[i]]["currency"].unique())
for i in range(len(lista))
}
dicts # << damos un vistazo para ver cómo quedo el diccionario.
# _Modificamos el dataset, para obtener las visualizaciones anteriores pero a nivel de 'USD', para ello solamente realizamos la transformación del dataset, multiplicando los valores de las monedas por el inverso del valor de 'USD'. Para esto utilizamos la función 'mul' de pandas._
# ***
# ### Visualización de Series de tiempo
# ***
# #### a. Series de tiempo del valor de 1 Euro en las diferentes monedas
# _Utilizaremos la información tal como está en el dataset original. En este archivo se presenta cuanto vale 1 EUR en las diferentes monedas._
df_t_eur = df_t # << Copiamos la información del dataset original para ir generando diferentes escenarios.
# _Desarrollamos un ciclo que nos ayude a realizar todos los gráficos de una sola vez. La base de esta técnica es utilizar la tabla en formato tidy y el diccionario que generamos anteriormente para así llamar los datos de una forma iterativa. Además aprovechamos en cada iteración parametrizar los títulos y agregar algo de formato._
fig, axs = plt.subplots(9, 3, figsize=(20, 30), constrained_layout=True, sharex=False)
for ax, m in zip(axs.flat, dicts):
ax2 = ax.twinx() # << Usar eje secundario
ax.plot("date", dicts[m], data=df_t_eur, color="b")
ax.set_ylabel(m + "por 1 EUR", color="b", size=10, weight="bold")
ax.tick_params(axis="y", size=8, colors="b")
ax2.plot("date", dicts["Chili Peso"], data=df_t_eur, color="g")
ax2.set_xlabel("date", size=10)
ax2.set_ylabel("Chili Peso por 1 EUR", color="g", size=10, weight="bold")
ax2.tick_params(axis="y", size=8, colors="g")
plt.title(
"Correlación con CLP: "
+ str(round(df_t_eur[dicts["Chili Peso"]].corr(df_t_eur[dicts[m]]), 2)),
weight="bold",
)
# #### b. Series de tiempo del valor de 1 Dolar en las diferentes monedas
# _Realizamos una transformación en la cual dividimos todos los resultados del dataset en la cantidad de dolares por euro registrado. Con esto logramos transformar los datos a dolares. En otras palabras el dataset 'df_t_usd' corresponde a cuánto vale 1 USD en cada moneda._
df_t_eur = df_t.set_index("date")
df_t_usd = df_t_eur.mul(1 / df_t_eur["USD"], axis=0).reset_index()
df_t_eur = df_t_eur.reset_index()
df_t_usd.tail(3)
# _Utilizamos el mismo procedimiento anterior, pero considerando el nuevo dataset._
fig, axs = plt.subplots(9, 3, figsize=(20, 30), constrained_layout=True, sharex=False)
for ax, m in zip(axs.flat, dicts):
ax2 = ax.twinx()
ax.plot("date", dicts[m], data=df_t_usd, color="b")
ax.set_ylabel(m + "por 1 USD", color="b", size=10, weight="bold")
ax.tick_params(axis="y", size=8, colors="b")
ax2.plot("date", dicts["Chili Peso"], data=df_t_usd, color="g")
ax2.set_xlabel("date", size=10)
ax2.set_ylabel("Chili Peso por 1 USD", color="g", size=10, weight="bold")
ax2.tick_params(axis="y", size=8, colors="g")
plt.title(
"Correlación con CLP: "
+ str(round(df_t_usd[dicts["Chili Peso"]].corr(df_t_usd[dicts[m]]), 2)),
weight="bold",
)
# #### c. Series de tiempo del valor de las diferentes monedas en pesos chilenos
# _Finalmente reviso cuánto sale cada moneda en pesos chilenos para evaluar el deterioro del valor de este último._
df_t_aux = df_t
df_t_aux = df_t_aux.set_index("date")
df_t_inv = 1 / df_t_aux
df_t_clp = df_t_inv.mul(df_t_aux["CLP"], axis=0).reset_index()
df_t_clp.tail(3)
# _Acá la gráfica contempla cuánto vale cada moneda en pesos chilenos. Se puede apreciar que en el último mes las monedas de los países seleccionados tienen un mayor valor, es decir existe un deterioro del peso chileno._
fig, axs = plt.subplots(9, 3, figsize=(20, 30), constrained_layout=True, sharex=False)
for ax, m in zip(axs.flat, dicts):
ax.plot("date", dicts[m], data=df_t_clp)
ax.set_ylabel("CLP por 1 " + m, size=10, weight="bold")
ax.tick_params(axis="y", size=8)
|
import numpy as np
from gensim.models.word2vec import Word2Vec
import pickle
from torch.utils.data import Dataset, DataLoader
import os
import torch
import torch.nn as nn
def spilt_poetry(file="/kaggle/input/poetry-7txt/poetry_7.txt"):
all_data = open(file, "r", encoding="utf-8").read()
all_data_split = " ".join(all_data)
with open("split.txt", "w", encoding="utf-8") as f:
f.write(all_data_split)
def train_vec(
split_file="split.txt", org_file="/kaggle/input/poetry-7txt/poetry_7.txt"
):
vec_params_file = "vec_params.pkl"
if os.path.exists(split_file) == False:
spilt_poetry()
split_all_data = open(split_file, "r", encoding="utf-8").read().split("\n")[:1000]
org_data = open(org_file, "r", encoding="utf-8").read().split("\n")[:1000]
if os.path.exists(vec_params_file):
return org_data, pickle.load(open(vec_params_file, "rb"))
model = Word2Vec(split_all_data, vector_size=107, min_count=1, workers=6)
pickle.dump(
(model.syn1neg, model.wv.key_to_index, model.wv.index_to_key),
open(vec_params_file, "wb"),
)
return org_data, (model.syn1neg, model.wv.key_to_index, model.wv.index_to_key)
class MyDataset(Dataset):
# 加载所有的数据
# 存储和初始化一些变量
def __init__(self, all_data, w1, word_2_index):
self.w1 = w1
self.word_2_index = word_2_index
self.all_data = all_data
# 获取一条数据, 并作处理
def __getitem__(self, index):
a_poetry_words = self.all_data[index]
a_poetry_index = [self.word_2_index[word] for word in a_poetry_words]
xs_index = a_poetry_index[:-1]
ys_index = a_poetry_index[1:]
xs_embedding = self.w1[xs_index]
return xs_embedding, np.array(ys_index).astype(np.int64)
# 获取数据的总长度
def __len__(self):
return len(all_data)
class Mymodel(nn.Module):
def __init__(self, embedding_num, hidden_num, word_size):
super().__init__()
self.embedding_num = embedding_num
self.hidden_num = hidden_num
self.word_size = word_size
self.lstm = nn.LSTM(
input_size=embedding_num,
hidden_size=hidden_num,
batch_first=True,
num_layers=2,
bidirectional=False,
)
self.dropout = nn.Dropout(0.3) # 有了随机失活, 生成的古诗就不会唯一了
self.flatten = nn.Flatten(0, 1)
self.linear = nn.Linear(hidden_num, word_size)
self.cross_entropy = nn.CrossEntropyLoss()
def forward(self, xs_embedding, h_0=None, c_0=None):
xs_embedding = xs_embedding.to(device)
if h_0 == None or c_0 == None:
h_0 = torch.tensor(
np.zeros((2, xs_embedding.shape[0], self.hidden_num), np.float32)
)
c_0 = torch.tensor(
np.zeros((2, xs_embedding.shape[0], self.hidden_num), np.float32)
)
h_0 = h_0.to(device)
c_0 = c_0.to(device)
hidden, (h_0, c_0) = self.lstm(xs_embedding, (h_0, c_0))
hidden_drop = self.dropout(hidden)
flatten_hidden = self.flatten(hidden_drop)
pre = self.linear(flatten_hidden)
return pre, (h_0, c_0)
def generate_poetry_auto():
result = ""
word_index = np.random.randint(0, word_size, 1)[0]
result += index_2_word[word_index]
h_0 = torch.tensor(np.zeros((2, 1, hidden_num), np.float32))
c_0 = torch.tensor(np.zeros((2, 1, hidden_num), np.float32))
for i in range(31):
# word_embedding = torch.tensor(w1[word_index].reshape(1,1,-1))
word_embedding = torch.tensor(w1[word_index][None][None])
pre, (h_0, c_0) = model(word_embedding, h_0, c_0)
word_index = int(torch.argmax(pre))
result += index_2_word[word_index]
print(result)
if __name__ == "__main__":
device = "cuda" if torch.cuda.is_available() else "cpu"
batch_size = 64
all_data, (w1, word_2_index, index_2_word) = train_vec()
dataset = MyDataset(all_data, w1, word_2_index)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
hidden_num = 128
lr = 0.007
epochs = 1000
word_size, embedding_num = w1.shape
model = Mymodel(embedding_num, hidden_num, word_size)
model = model.to(device)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr)
for e in range(epochs):
for batch_index, (xs_embedding, ys_index) in enumerate(dataloader):
xs_embedding = xs_embedding.to(device)
ys_index = ys_index.to(device)
pre, _ = model(xs_embedding)
loss = model.cross_entropy(pre, ys_index.reshape(-1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_index % 100 == 0:
print(f"loss:{loss:.3f}")
generate_poetry_auto()
|
# # Load/Clean Data
# Let's take a look at our dataset, and do some simple cleaning/reorganization for easy study later.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
dat_path = "/kaggle/input/house-prices-advanced-regression-techniques/"
train_raw = pd.read_csv(f"{dat_path}train.csv")
test_raw = pd.read_csv(f"{dat_path}test.csv")
sample_sub = pd.read_csv(f"{dat_path}sample_submission.csv")
train_raw.shape
test_raw.shape
train_raw.dropna(axis=1)
test_raw.dropna(axis=1)
# ### How to deal with NaN values?
# It looks like quite a few columns are dropped when we drop Null values. However, we don't necessarily need to drop the entire column if only a few of the values are null - we can just as well drop a few rows if those are the only ones for which the column is null.
# Let's get the counts of null values by column.
plt.rcParams["figure.figsize"] = [12, 8]
plt.rcParams["font.size"] = 16
plt.bar(np.arange(test_raw.shape[1]), test_raw.isna().sum() / len(test_raw))
plt.ylabel("Fraction of Rows with NaN Value")
plt.xlabel("Column Index")
# Let's do the same for the training data.
plt.bar(np.arange(train_raw.shape[1]), train_raw.isna().sum() / len(train_raw))
plt.ylabel("Fraction of Rows with NaN Value")
plt.xlabel("Column Index")
# **Result**: It looks like there are quite a few columns where only a small fraction of the rows are null. whereas there are some for which a substantial portion (> 15 percent) are missing. Let's drop the columns where at least 15% of values are missing, and then retain the rest of the columns, but drop the rows for which a value is missing there.
test_raw.shape
test = test_raw.dropna(thresh=test_raw.shape[0] * 0.9, axis=1)
train = train_raw.dropna(thresh=train_raw.shape[0] * 0.9, axis=1)
test.shape
train.shape
train = train.dropna(axis=0)
train.shape
# ### Convert Categorical Data
# Quite a few of our variables are categorical (as opposed to numerical). Let's take a look at these and see if we can create one-hot vectors out of them without too much trouble.
train.columns.values
train["Exterior2nd"].dtype.name
# Let's gather up all the column names whose datatype is "object" (and so NOT numeric).
categorical_train_cols = [
col_name
for col_name in train.columns.values
if train[col_name].dtype.name == "object"
]
categorical_train_cols += ["MSSubClass"]
categorical_test_cols = [
col_name
for col_name in test.columns.values
if test[col_name].dtype.name == "object"
]
categorical_test_cols += ["MSSubClass"]
numeric_cols = [
col_name
for col_name in train.columns.values
if col_name not in categorical_train_cols
]
# We can use the `get_dummies` function in Pandas to convert this categorical data into one-hot vectors.
train_df = pd.concat(
(
train[numeric_cols],
pd.concat(
[
pd.get_dummies(train[col_name], prefix=f"{col_name}")
for col_name in categorical_train_cols
],
axis=1,
),
),
axis=1,
)
numeric_test_cols = [
col_name
for col_name in test.columns.values
if col_name not in categorical_test_cols
]
test_df = pd.concat(
(
test[numeric_test_cols],
pd.concat(
[
pd.get_dummies(test[col_name], prefix=f"{col_name}")
for col_name in categorical_test_cols
],
axis=1,
),
),
axis=1,
)
train_df
test_df
# ### Odds and Ends
# * We need to split the train data into train, test, and validation data. Our "test_df" as of now has no training labels, because it's used for submission - so the name 'test' is a bit of a misnomer. We'll need a 3-way split because we're going to be comparing several regression models. Since the test data will be used for hyperparameter selection at the individualized model level (e.g. the regularization weight for LASSO), we need an entirely separate validation dataset at the level of comparing models.
# * We need to separate the predictive features ("X") from the sale price feature ("Y")
# * We should eliminate the additonal columns present in the train data that are not in the test data. No point in using these features for prediction if they are not part of the test data.
extra_train_cols = set(train_df.columns.values).difference(set(test_df.columns.values))
extra_test_cols = set(test_df.columns.values).difference(set(train_df.columns.values))
extra_train_cols.remove("SalePrice")
train_df = train_df.drop(columns=extra_train_cols)
test_df = test_df.drop(columns=extra_test_cols)
train_X = train_df.copy().drop(columns=["SalePrice", "Id"])
train_Y = train_df["SalePrice"].copy()
from sklearn.model_selection import train_test_split
train_X, test_X_all, train_Y, test_Y_all = train_test_split(
train_X, train_Y, train_size=0.7, shuffle=True, random_state=42
)
train_X.shape
test_X_all.shape
submission_test_df = test_df.copy()
test_X, validation_X, test_Y, validation_Y = train_test_split(
test_X_all, test_Y_all, train_size=0.6, shuffle=True
)
test_X.shape
validation_X.shape
# # Data Exploration
# ## Distribution of Target Prices
plt.hist(train_Y, bins=40)
plt.title("Distribution of Sale Prices for Train Data")
# The high-priced outliers will skew a model that is trained on squared errors. So, we should log-normalize sale prices to account for this.
train_log_Y = np.log(train_Y)
plt.hist(train_log_Y, bins=40)
plt.title("Distribution of (Log-Scaled) Sale Prices for Train Data")
# ## Feature Correlations
correlations_series = train_df.corrwith(train_log_Y, method="pearson").dropna()
correlations_series
sorted(correlations_series)
plt.bar(np.arange(len(correlations_series)), sorted(correlations_series))
plt.title("Correlation of Individual Features with Target Variable (LogSalePrice)")
plt.ylabel("Correlation (Pearson R)")
plt.xlabel("Feature Index Number")
# **Takeaway**:
# 1. Most features have fairly low correlation so we can impose a fairly strong condition on the number of features used without losing much signal. For regression, this involves a sparsity constraint on the feature vector. For decision trees, this means a maximum depth constraint.
# 2. We have an almost perfect balance between negatively and positively corelated features. This is useful for decision trees especially, as it enables us to split downwards (that is, predict a lower price) and upwards as we traverse the tree.
# # Test Models
# ## Ordinary Least Squares
# The simplest regression technique, and a good baseline for further testing.
from numpy.linalg import lstsq, norm
# set rcond = -1 to use higher precision than the default
lstsq_weights, residuals, train_rank, train_sing_values = lstsq(
train_X, train_log_Y, rcond=-1
)
# Least squares regression uses a **Mean Squared Error** loss function. Mathematically, the vector $y$ is the target variable (log Sale Price) and the matrix $X$ is a collection of feature vectors, so that each house corresponds to a unique row. We solve for $w$ in the equation:
# $$y = Xw$$
# The resulting estimator is denoted $\hat w$, and the loss is then:
# $$ \mathcal{L}(\hat w) = \frac{1}{n} \left\lVert y - X \hat w \right\rVert_2$$
# Where $n$ is the number of data points.
lstsq_train_loss = norm(train_X.dot(lstsq_weights) - train_log_Y) ** 2 / len(train_X)
lstsq_train_loss
# Note that this is not the error in predicting housing precises, but rather the *log* of those prices. Here's the loss when we undo the logarithms.
norm(np.exp(train_X.dot(lstsq_weights)) - train_Y) / len(train_X)
# So an average prediction error of about $500, which isn't bad. But we could certainly do better! Finally, let's get the test loss.
test_log_Y = np.log(test_Y)
lstsq_test_loss = norm(test_X.dot(lstsq_weights) - test_log_Y) / len(test_log_Y)
lstsq_test_loss
norm(np.exp(test_X.dot(lstsq_weights)) - test_Y) / len(test_log_Y)
# ## Random Forest
from sklearn.ensemble import RandomForestRegressor
rf_regressor = RandomForestRegressor(n_estimators=100, max_depth=10, random_state=42)
rf_regressor.fit(train_X, train_log_Y)
rf_train_loss = norm(rf_regressor.predict(train_X) - train_log_Y) / len(train_X)
rf_test_loss = norm(rf_regressor.predict(test_X) - test_log_Y) / len(test_Y)
rf_train_loss
rf_test_loss
# Good! A test loss of $0.0104$ is a big improvement over the previous $0.0155$. However, notice that there is an even bigger difference between train/test loss here than in the previous model of OLS, indicating some overfitting.
# ## Dealing with Overfitting
# ### Weaker Random Forest
# Let's use a much weaker RF model with just 10 trees and a max depth of 4.
weak_rf_regressor = RandomForestRegressor(n_estimators=10, max_depth=4, random_state=42)
weak_rf_regressor.fit(train_X, train_log_Y)
weak_rf_train_loss = norm(weak_rf_regressor.predict(train_X) - train_log_Y) / len(
train_X
)
weak_rf_test_loss = norm(weak_rf_regressor.predict(test_X) - test_log_Y) / len(test_X)
weak_rf_train_loss
weak_rf_test_loss
# While the train loss increases as we would expect, this doesn't help since the test loss actually increased slightly! We already have a fairly weak random forest regressor which does not solve the overfitting issue - let's try regularization for linear regression next.
# ### Sparse Regression - Is it worth it?
# First, let's look at what OLS ended up doing for individual feature weights. The idea behind sparse/regularized regression is to ensure that *only a few features are used*, so we need to make sure this isn't already happening.
plt.bar(np.arange(len(lstsq_weights)), sorted(np.abs(lstsq_weights)))
plt.title("Feature Weights for Ordinary Least Squares Regression")
plt.ylabel("Coefficient (Absolute Value)")
# Indeed, it looks like there isn't really a bifurcation of features weights, and instead a significant portion are used with relatively large weight. Nevertheless, it looks like a significant portion of the weight is in the top 50-100 most important features.
# ### LASSO Regression
# LASSO regression is a particular kind of linear regression in which the $\ell_1$-norm of the weight vector is part of the loss function. Hence there is an incentive to assign lower weights to features, which is what we hope to achieve in order to overcome overfitting.
from sklearn.linear_model import Lasso
lasso_model = Lasso(
alpha=1.0, normalize=True, fit_intercept=True, tol=1e-6, random_state=42
)
lasso_model.fit(train_X, train_log_Y)
lasso_train_loss = norm(lasso_model.predict(train_X) - train_log_Y) / len(train_X)
lasso_test_loss = norm(lasso_model.predict(test_X) - test_log_Y) / len(test_X)
lasso_train_loss
lasso_test_loss
# Now we're getting somewhere - the train and test loss are relatively close! Unfortunately the test loss is quite high, so we probably overshot a bit. Let's try a weaker regularization (e.g. lower $\alpha$)
lasso_model_001 = Lasso(
alpha=0.01, normalize=True, fit_intercept=True, tol=1e-6, random_state=42
)
lasso_model_001.fit(train_X, train_log_Y)
lasso_train_loss_001 = norm(lasso_model_001.predict(train_X) - train_log_Y) / len(
train_X
)
lasso_test_loss_001 = norm(lasso_model_001.predict(test_X) - test_log_Y) / len(test_X)
lasso_train_loss_001
lasso_test_loss_001
# We get a very slight improvement, but LASSO is still not competitive with the random forest model. Let's try an extremely weak regularization of $\alpha = 10^{-4}$ and see if we get anywhere.
lasso_model_1e4 = Lasso(
alpha=0.0001, normalize=True, fit_intercept=True, tol=1e-6, random_state=42
)
lasso_model_1e4.fit(train_X, train_log_Y)
lasso_train_loss_1e4 = norm(lasso_model_1e4.predict(train_X) - train_log_Y) / len(
train_X
)
lasso_test_loss_1e4 = norm(lasso_model_1e4.predict(test_X) - test_log_Y) / len(test_X)
lasso_train_loss_1e4
lasso_test_loss_1e4
lstsq_test_loss
# Great! Looks like we saw a real improvement with the lower $\alpha$. Now LASSO outperforms OLS on the test loss, although it still is outperformed by random forest models. Let's compare feature weights for OLS and the weakest LASSO to see what the practical effect is in terms of feature importance.
lasso_weights = lasso_model_1e4.coef_
plt.scatter(np.abs(lstsq_weights), np.abs(lasso_weights), s=10, marker="o")
plt.xlabel("Feature Weight for Least Squares")
plt.ylabel("Feature Weight for LASSO")
plt.title("Feature Weights for Regression with/without Regularization")
# **How to interpret this plot**: This mainly tells use that LASSO assigned much lower weights to most features. The almost-solid line at the $x$-axis tells use that many nonzero features in OLS were set to zero for LASSO. Despite this, LASSO has a better test error! Therefore we found that correcting for the overfitting was worth it.
# **Comparison to Random Forest**: Interestingly, while we were able to get a useful improvement for linear regression when using regularization, our weaker random forest model had worse test error. This might indicate a *double descent* type phenomenon, or just that we didn't search hyperparameter space well enough.
# # Model Selection and Result Submission
# So far we have tested 4 regression models:
# 1. Ordinary Least Squares Regression
# 2. LASSO Regression (AKA Sparse Linear Regression)
# 3. Random Forest Regression
# 4. Low-Depth RF Regression (Random Forest with Stronger Sparsity Constraints)
# To select the best-performing model, we need to test on our *validation data* to avoid data incest.
# ## Compute Validation Error
validation_log_Y = np.log(validation_Y)
lasso_validation_loss_1e4 = norm(
lasso_model_1e4.predict(validation_X) - validation_log_Y
) / len(validation_X)
weak_rf_regressor_validation_loss = norm(
weak_rf_regressor.predict(validation_X) - validation_log_Y
) / len(validation_X)
rf_regressor_validation_loss = norm(
rf_regressor.predict(validation_X) - validation_log_Y
) / len(validation_X)
lstsq_validation_loss = norm(validation_X.dot(lstsq_weights) - validation_log_Y) / len(
validation_X
)
lasso_validation_loss_1e4
weak_rf_regressor_validation_loss
rf_regressor_validation_loss
lstsq_validation_loss
# ## Random Forest Wins
# Although regularized models like LASSO or weaker random forest did reduce the gap between train/test loss, it appears that a random forest model performed best. In terms of insights, what this tells us is that **we need a large fraction of the features to predict well**, and the signal is not concentrated in a few features.
sample_submission_df = pd.read_csv(f"{dat_path}sample_submission.csv")
submission_test_df.shape
train_X.shape
submission_X = submission_test_df.drop(columns=["Id"])
# We need to fill in missing values in the submission DF. Let's use the mean of each feature.
feature_means = np.mean(train_X, axis=0)
submission_X_no_nan = submission_X.fillna(value=feature_means)
submission_X_no_nan.shape
len(lasso_model.coef_)
len(lasso_model_001.coef_)
len(lasso_model_1e4.coef_)
train_X.shape
lasso_model_1e4.predict(submission_X_no_nan)
submission_extra_cols = set(submission_X.columns.values).difference(
set(train_X.columns.values)
)
len(lasso_predict)
len(submission_X)
submission_predict = np.exp(lasso_model_1e4.predict(submission_X))
submission_predict
pd.concat((submission_test_df["Id"], pd.Series(submission_predict)), axis=1)
submission_test_df
|
# # list current path & folder
import os
for curtdir, dirs, filenames in os.walk("/kaggle/input"):
print(curtdir)
if "round3-mesonet" in curtdir:
print(filenames)
# # install MTCNN
os.path.exists("/kaggle/input/0130-round3-mesonet/mtcnn-0.1.0-py3-none-any.whl")
# # import libraries
import sys
import numpy as np
import pandas as pd
import cv2
from keras.optimizers import Adam, SGD
from tqdm.notebook import tqdm
import glob
from mtcnn import MTCNN
# use shutil to copy file from input folder to working folder
from shutil import copyfile
copyfile(
src="/kaggle/input/0130-round3-mesonet/mesonet.py", dst="/kaggle/working/mesonet.py"
)
from mesonet import MesoNet
# # parameters
INPUT = "/kaggle/input/deepfake-detection-challenge"
MAX_SKIP = 10
NUM_FRAME = 150
LR_ALPHA = 1e-3
VERSION = "9"
WEIGHTS = {
"1": "weights-038-0.5749-8661.hdf5",
"2": "weights-040-0.6058-17289.hdf5",
"3": "weights-029-0.5417-26165.hdf5",
"5": "weights-065-0.4962-5049.hdf5",
"5a": "weights-051-0.4875-26519.hdf5",
"7": "weights-089-0.6475-14215.hdf5",
"8": "weights-064-0.6275-4920.hdf5",
"9": "weights-015-0.4556-32159.hdf5",
}
WEIGHTS_PATH = os.path.join("/kaggle/input/0130-round3-mesonet", WEIGHTS[VERSION])
print(WEIGHTS_PATH)
# # prepare face crops
test_dir = os.path.join(INPUT, "test_videos/")
filenames = os.listdir(test_dir)
prediction_filenames = filenames
test_video_files = [os.path.join(test_dir, fn) for fn in filenames]
print(test_video_files[:2])
# initiate mtcnn as detector
detector = MTCNN()
def detect_face(image):
# convert to RGB for MTCNN use
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
final = []
detected_faces_raw = detector.detect_faces(img)
# check if any faces detected
if not detected_faces_raw:
# print("no faces")
return []
# store confidences & bounding box
confidences = []
for n in detected_faces_raw:
x, y, w, h = n["box"]
final.append([x, y, w, h])
confidences.append(n["confidence"])
pass
# check if detector is confident that this is a face
if max(confidences) < 0.7:
return []
# get the most confident detected face bounding box
max_conf_coord = final[confidences.index(max(confidences))]
return max_conf_coord
def crop(image, x, y, w, h):
# enlarge bounding box to ensure containing the fucking face
x -= 40
y -= 40
w += 80
h += 80
if x < 0:
x = 0
if y <= 0:
y = 0
crop = image[y : y + h, x : x + w, :]
crop = cv2.resize(crop, (256, 256))
return cv2.cvtColor(crop, cv2.COLOR_BGR2RGB)
def detect_video(video):
# extract the middle frame of a video
camera = cv2.VideoCapture(video)
camera.set(1, NUM_FRAME)
success, vframe = camera.read()
# convert color to RGB & detect face
vframe = cv2.cvtColor(vframe, cv2.COLOR_BGR2RGB)
bounding_box = detect_face(vframe)
# deal with no faces, try the following 10 frames to detect bounding box
if not bounding_box:
count = 0
current = NUM_FRAME
for i in range(NUM_FRAME + 1, NUM_FRAME + 11):
success, vframe = camera.read()
vframe = cv2.cvtColor(vframe, cv2.COLOR_BGR2RGB)
bounding_box = detect_face(vframe)
# check if getting a face
if len(bounding_box) != 0:
break
# check if still no face detected
if not bounding_box:
print("no fucking face found")
prediction_filenames.remove(video.replace(test_dir, ""))
return None
# got a face
x, y, w, h = bounding_box
camera.release()
return crop(vframe, x, y, w, h)
# generate test_X
test_X = []
# for video in tqdm(test_video_files[:32]):
for video in tqdm(test_video_files):
x = detect_video(video)
# if no face found
if x is None:
continue
test_X.append(x)
pass
# convert test_X to array
test_X = np.array(test_X)
# # reload in models
model = MesoNet.build(width=256, height=256, depth=3, classes=2)
opt = Adam(lr=LR_ALPHA)
model.compile(
loss="binary_crossentropy",
optimizer=opt,
metrics=["accuracy", "crossentropy"],
)
model.load_weights(WEIGHTS_PATH)
predictions = []
probs = model.predict(test_X, batch_size=32).clip(0.02, 0.98)
probs[:10]
# # fill into submission cvs
submission = pd.read_csv(os.path.join(INPUT, "sample_submission.csv"))
# fill in 0.5 for no face use
submission["label"] = 0.5
submission.head()
# loop over each filename, fill in probs
for prob, fname in zip(probs, prediction_filenames):
vname = fname.split(os.path.sep)[-1]
submission.loc[submission["filename"] == vname, "label"] = prob[1]
pass
submission.to_csv("submission.csv", index=False)
submission.head()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import os
import tensorflow as tf
print(tf.__version__)
from tensorflow.keras import models, layers, mixed_precision
train_dir = "/kaggle/input/histopathologic-cancer-detection/train"
test_dir = "/kaggle/input/histopathologic-cancer-detection/test"
policy = mixed_precision.Policy("mixed_float16")
mixed_precision.set_global_policy(policy)
print("Compute dtype: %s" % policy.compute_dtype)
print("Variable dtype: %s" % policy.variable_dtype)
print("Traing Number: ", len(os.listdir(train_dir)))
print("Test Number: ", len(os.listdir(test_dir)))
df = pd.read_csv("/kaggle/input/histopathologic-cancer-detection/train_labels.csv")
print(
"Data's target distribution ((1) label num/ (1 + 0) label num): ",
len(df[df.label == 1]) / len(df),
)
df.head()
df.label = df.label.astype(str)
df.id = df.id + ".tif"
print(df.info())
df.head()
w = 10
h = 10
fig = plt.figure(figsize=(15, 15))
columns = 10
rows = 5
for i in range(1, columns * rows + 1):
img = plt.imread(train_dir + "/" + df.iloc[i]["id"])
fig.add_subplot(rows, columns, i)
plt.axis("off")
plt.title(df.iloc[i]["label"])
plt.imshow(img)
plt.show()
plt.figure()
img = plt.imread(train_dir + "/" + df.iloc[0]["id"])
print("Image shape: ", img.shape)
print("Label: ", df.iloc[0]["label"])
plt.imshow(img)
plt.colorbar()
plt.grid(False)
plt.show()
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def get_train_val_generator(train_datagen, df, sample_frac=1.0, bs=64):
df = df.sample(frac=sample_frac, random_state=42)
train_generator = train_datagen.flow_from_dataframe(
dataframe=df,
directory=train_dir,
x_col="id",
y_col="label",
subset="training",
target_size=(96, 96),
batch_size=bs,
class_mode="binary",
)
valid_generator = train_datagen.flow_from_dataframe(
dataframe=df,
directory=train_dir,
x_col="id",
y_col="label",
subset="validation",
target_size=(96, 96),
batch_size=bs,
shuffle=False,
class_mode="binary",
)
return train_generator, valid_generator
def get_model(pretrained_model, preprocess_input):
inputs = tf.keras.Input(shape=(96, 96, 3))
# For feature extraction using transfer learning
x = preprocess_input(inputs)
x = pretrained_model(x)
# For classifier
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dropout(0.2)(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(64, activation="relu")(x)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Dense(1)(x)
outputs = tf.keras.layers.Activation("sigmoid", dtype="float32")(x)
return tf.keras.Model(inputs, outputs)
def fit_model(model, train_generator, valid_generator, epochs=5, callbacks=[]):
return model.fit(
train_generator,
steps_per_epoch=train_generator.n // train_generator.batch_size,
epochs=epochs,
validation_data=valid_generator,
validation_steps=valid_generator.n // valid_generator.batch_size,
use_multiprocessing=True,
workers=4,
callbacks=callbacks,
)
def plt_performance(train, valid, title):
plt.figure(figsize=(10, 10))
plt.subplot(2, 1, 1)
plt.plot(train, label="Training")
plt.plot(valid, label="Validation")
plt.legend(loc="upper left")
plt.ylim([min(plt.ylim()) - 0.1, max(plt.ylim()) + 0.1])
plt.title(title)
train_datagen = ImageDataGenerator(validation_split=0.2)
train_generator, valid_generator = get_train_val_generator(
train_datagen, df, sample_frac=0.3
)
preprocess_mobile = tf.keras.applications.mobilenet_v2.preprocess_input
mobilenet_v2 = tf.keras.applications.MobileNetV2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_res = tf.keras.applications.resnet_v2.preprocess_input
resnet_v2 = tf.keras.applications.ResNet152V2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_incep = tf.keras.applications.inception_resnet_v2.preprocess_input
incep_v2 = tf.keras.applications.InceptionResNetV2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_dense = tf.keras.applications.densenet.preprocess_input
dense = tf.keras.applications.DenseNet201(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
preprocess_eff = tf.keras.applications.efficientnet.preprocess_input
effnet_b2 = tf.keras.applications.EfficientNetB2(
input_shape=(96, 96, 3), include_top=False, weights="imagenet"
)
models = [
(mobilenet_v2, preprocess_mobile),
(resnet_v2, preprocess_res),
(incep_v2, preprocess_incep),
(dense, preprocess_incep),
(effnet_b2, preprocess_eff),
]
for pretrained_model, preprocess in models:
model = get_model(pretrained_model, preprocess)
model.compile(
optimizer="adam",
loss="binary_crossentropy",
metrics=["accuracy"],
)
val_loss, val_acc = model.evaluate(valid_generator)
print("\nPretrained Model: ", pretrained_model.name)
print("Val Loss: ", val_loss)
print("Val Acc: ", val_acc)
resnet_v2.trainable = True
print("Number of layers in the base net: ", len(resnet_v2.layers))
fine_tune_at = round(len(resnet_v2.layers) * 0.9)
print("Mobile model would be trainable from ", fine_tune_at)
for l in resnet_v2.layers[:fine_tune_at]:
l.trainable = False
base_lr = 3e-3
model = get_model(resnet_v2, preprocess_res)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_lr),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
print("Model trainable param number: ", len(model.trainable_variables))
model.summary()
tf.keras.utils.plot_model(model, show_shapes=True)
# Training
decay_steps = 20
lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(base_lr, decay_steps)
callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_decayed_fn)]
history = fit_model(
model, train_generator, valid_generator, epochs=3, callbacks=callbacks
)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
train_datagen = ImageDataGenerator(
validation_split=0.2,
vertical_flip=True,
horizontal_flip=True,
width_shift_range=0.2,
height_shift_range=0.2,
zoom_range=0.2,
)
train_aug_generator, valid_aug_generator = get_train_val_generator(
train_datagen, df, sample_frac=0.3
)
# Training
model = None
model = get_model(effnet_b2, preprocess_eff)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=base_lr),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
history = fit_model(
model, train_aug_generator, valid_aug_generator, epochs=3, callbacks=callbacks
)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
x_train, y_train = train_aug_generator.next()
x_val, y_val = valid_aug_generator.next()
import keras_tuner as kt
class MyHyperModel(kt.HyperModel):
def build(self, hp):
model = get_model(resnet_v2, preprocess_res)
model.compile(
optimizer=tf.keras.optimizers.Adam(
learning_rate=hp.Float(
"learning_rate", min_value=1e-4, max_value=1e-2, sampling="log"
)
),
loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
metrics=["accuracy"],
)
return model
tuner = kt.RandomSearch(MyHyperModel(), objective="val_loss", max_trials=10)
tuner.search(
x_train,
y_train,
validation_data=(x_val, y_val),
epochs=10,
callbacks=[tf.keras.callbacks.EarlyStopping(patience=2)],
)
best_hps = tuner.get_best_hyperparameters(1)[0]
print("Best Learning Rate: ", best_hps.get("learning_rate"))
train_full_generator, valid_full_generator = get_train_val_generator(
train_datagen, df, sample_frac=1
)
# best_lr = 0.009
# model = None
# model = get_model(effnet_b2, preprocess_eff)
# model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=best_lr),
# loss=tf.keras.losses.BinaryCrossentropy(label_smoothing=0.1),
# metrics=["accuracy"],)
# lr_decayed_fn = tf.keras.optimizers.schedules.CosineDecay(best_lr, 25)
# callbacks = [tf.keras.callbacks.LearningRateScheduler(lr_decayed_fn), tf.keras.callbacks.EarlyStopping(patience=2)]
# history = fit_model(model, train_full_generator, valid_full_generator, epochs=12, callbacks=callbacks)
# Evaluating
plt_performance(
history.history["accuracy"], history.history["val_accuracy"], "Train/Valid Accuracy"
)
plt_performance(
history.history["loss"], history.history["val_loss"], "Train/Valid Loss"
)
df_test = pd.read_csv(
"/kaggle/input/histopathologic-cancer-detection/sample_submission.csv"
)
df_test.id = df_test.id + ".tif"
test_generator = ImageDataGenerator().flow_from_dataframe(
dataframe=df_test,
directory=test_dir,
x_col="id",
y_col=None,
target_size=(96, 96),
batch_size=2,
shuffle=False,
class_mode=None,
)
test_generator.reset()
preds = model.predict(test_generator, verbose=1)
preds.shape
submission = pd.DataFrame()
submission["id"] = df_test["id"].apply(lambda x: x.split(".")[0])
submission["label"] = preds[:, 0]
submission.to_csv("submission.csv", index=False)
submission.head()
|
values = [1, 2, 3, 4]
matrix = []
for i, val_i in enumerate(values):
inner_list = []
for j, val_j in enumerate(values):
inner_list.append(val_j - val_i + 1)
matrix.append(inner_list)
matrix
# Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import norm
from sklearn.preprocessing import StandardScaler
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_rows", 500)
pd.set_option("display.max_columns", 500)
pd.set_option("display.width", 1000)
# File Name
import os
print(os.listdir("../input/fifa19"))
# OR
# File Path
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/fifa19/data.csv")
df.head()
# Shape of dataframe
print(" Shape of dataframe: ", df.shape)
# There are 18206 players for which 89 features each are provided
# Drop duplicates
df.drop_duplicates()
print(" Shape of dataframe after dropping duplicates: ", df.shape)
# #### No duplicate entries
# Variable inspection
print("Names of columns ", list(df.columns))
# #### Unnamed is index duplicate and is not required: Drop unnamed
df = df.drop(columns="Unnamed: 0")
# # Let's understand the variables
# ## Player demographics: 'ID', 'Name', 'Age', 'Photo', 'Nationality', 'Flag', 'Club', 'Club Logo', 'Value', 'Wage','Preferred Foot' , 'Position', 'Jersey Number', 'Joined', 'Loaned From', 'Contract Valid Until', 'Height', 'Weight', 'Release Clause'
# ## Player position rating: 'LS', 'ST', 'RS', 'LW', 'LF', 'CF', 'RF', 'RW', 'LAM', 'CAM', 'RAM', 'LM', 'LCM', 'CM', 'RCM', 'RM', 'LWB', 'LDM', 'CDM', 'RDM', 'RWB', 'LB', 'LCB', 'CB', 'RCB', 'RB'
# ## Player rating: 'Overall', 'Potential'
# ## Rating by player skill/strength:
# #### Physical rating: 'Acceleration', 'SprintSpeed', 'Agility', 'Reactions', 'Balance', 'Jumping', 'Stamina', 'Strength'
# #### Technical rating: 'Crossing', 'Finishing', 'HeadingAccuracy', 'ShortPassing','Volleys', 'Dribbling', 'Curve', 'FKAccuracy', 'LongPassing', 'BallControl', 'ShotPower', 'LongShots', 'Penalties', 'Marking','StandingTackle', 'SlidingTackle'
# #### Mental rating: 'Aggression', 'Interceptions', 'Positioning', 'Vision','Composure'
# #### GK rating: 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes'
# ## Rating by game play:
# #### PACE: 'Acceleration', 'SprintSpeed'
# #### SHOOTING: 'Finishing', 'LongShots', 'Penalties', 'Positioning', 'ShotPower', 'Volleys'
# #### PASSING: 'Crossing', 'Curve', 'FKAccuracy', 'LongPassing', 'ShortPassing', 'Vision'
# #### DRIBBLING: 'Agility', 'Reactions', 'Balance', 'BallControl','Composure', 'Dribbling','Reactions'
# #### DEFENDING: 'HeadingAccuracy','Interceptions', 'Marking', 'StandingTackle', 'SlidingTackle'
# #### PHYSICAL: 'Acceleration', 'Jumping', 'Stamina', 'Strength'
# #### GOALKEEPING: 'GKDiving', 'GKHandling', 'GKKicking', 'GKPositioning', 'GKReflexes'
# #### https://www.fifauteam.com/fifa-18-attributes-guide/
# #### https://www.fifauteam.com/fifa-19-attributes-guide/
# ## Other: 'Special', 'International Reputation', 'Weak Foot', 'Skill Moves', 'Work Rate', 'Body Type', 'Real Face'
#
# Null values
null = df.isnull().sum().sort_values(ascending=False)
total = df.shape[0]
percent_missing = (df.isnull().sum() / total).sort_values(ascending=False)
missing_data = pd.concat(
[null, percent_missing], axis=1, keys=["Total missing", "Percent missing"]
)
missing_data.reset_index(inplace=True)
missing_data = missing_data.rename(columns={"index": " column name"})
print("Null Values in each column:\n", missing_data)
# ## Missing values:
# #### 1) Since a big chunk of players are not loaned to other clubs, hence there are 16943 missing values out of 18206 i.e. they play for their own clubs.
# #### 2) There is a consistency (11.45%) in missing values for the position (they play on), this needs to be explored.
# #### 3) Release clause, joined, contract valid until, club, position, jersey number have missing values and need to be explored.
# #### 4) There is a consistency (0.26%) in missing values for the players rating attributes
#
# Filtering data with null values for position
df_isnull = pd.isnull(df["LB"])
pos_null = df[df_isnull]
print(pos_null.shape)
print(pos_null.isnull().sum().sort_values(ascending=False))
# ### Position, Jersey number and all players rating attributes have all the null values in this subset (as found in the both anomaly detection- Null value)
# ## Hence, they can be dropped
# Filtering relevant data & checking for club null values
df_notnull = pd.notnull(df["LB"])
df = df[df_notnull]
df_isnull = pd.isnull(df["Club"])
pos_null = df[df_isnull]
print(pos_null.shape)
print(pos_null.isnull().sum().sort_values(ascending=False))
# ### Release clause, club, contract valid until, joined have the same number of missing values- Club related info is absent, hence irrelevant for our model and can be dropped
df_notnull = pd.notnull(df["Club"])
df = df[df_notnull]
print(df.shape)
print(df.isnull().sum().sort_values(ascending=False))
# ### Remaining null values
# #### 1) As stated earlier, a big chunk of players are not loaned to other clubs, hence there are 16943 (now 14751) missing values out of 18206 (now 15926) i.e. they play for their own clubs.
# #### 2) There might not be any release clause set in the player contract and the player might be academy produced- hence no info on Joined is available.
# #### And it is okay to move forward with this dataset.
print(df.info())
unique_position = df.Position.unique()
unique_position
# Creating subsets according to playing positions
attack = ["CF", "LF", "LS", "LW", "RF", "RS", "RW", "ST"]
df_attack = df[~df.Position.isin(attack)]
print(df_attack.shape)
defense = ["RWB", "RCB", "RB", "LWB", "LCB", "LB", "CB"]
df_defense = df[~df.Position.isin(defense)]
print(df_defense.shape)
mid = ["RM", "RDM", "RCM", "RAM", "LM", "LDM", "LAM", "LCM", "CM", "CDM", "CAM"]
df_mid = df[~df.Position.isin(mid)]
print(df_mid.shape)
gk = ["GK"]
df_gk = df[~df.Position.isin(gk)]
print(df_gk.shape)
|
import matplotlib # 2D Plotting Library
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns # Python Data Visualization Library based on matplotlib
import geopandas as gpd # Python Geospatial Data Library
plt.style.use("fivethirtyeight")
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# 
# 
# Coronaviruses (CoV) are a large family of viruses that cause illness ranging from the common cold to more severe diseases such as Middle East Respiratory Syndrome (MERS-CoV) and Severe Acute Respiratory Syndrome (SARS-CoV). A novel coronavirus (nCoV) is a new strain that has not been previously identified in humans.
# Coronaviruses are zoonotic, meaning they are transmitted between animals and people. Detailed investigations found that SARS-CoV was transmitted from civet cats to humans and MERS-CoV from dromedary camels to humans. Several known coronaviruses are circulating in animals that have not yet infected humans.
# Common signs of infection include respiratory symptoms, fever, cough, shortness of breath and breathing difficulties. In more severe cases, infection can cause pneumonia, severe acute respiratory syndrome, kidney failure and even death.
# Standard recommendations to prevent infection spread include regular hand washing, covering mouth and nose when coughing and sneezing, thoroughly cooking meat and eggs. Avoid close contact with anyone showing symptoms of respiratory illness such as coughing and sneezing.
df = pd.read_csv("/kaggle/input/novel-corona-virus-2019-dataset/2019_nCoV_data.csv")
df.tail()
print(df.columns.values)
df.info()
del df["Sno"]
df.describe()
df.describe(include=["O"])
# Finding the correlation among columns
fig = plt.gcf()
fig.set_size_inches(10, 7)
fig = sns.heatmap(
df.corr(),
annot=True,
cmap="cubehelix",
linewidths=1,
linecolor="k",
square=True,
mask=False,
vmin=-1,
vmax=1,
cbar_kws={"orientation": "vertical"},
cbar=True,
)
df.describe().plot(
kind="area", fontsize=27, figsize=(20, 8), table=True, colormap="rainbow"
)
plt.xlabel(
"Statistics",
)
plt.ylabel("Value")
plt.title("General Statistics of Corona Virus Dataset")
fig = sns.jointplot(x="Confirmed", y="Recovered", data=df, kind="reg")
import seaborn as sns
sns.set(style="ticks", color_codes=True)
g = sns.pairplot(df)
df["Date"] = pd.to_datetime(df["Date"])
df["Last Update"] = pd.to_datetime(df["Last Update"])
df["Day"] = df["Date"].dt.day
df["Month"] = df["Date"].dt.month
df["Week"] = df["Date"].dt.week
df["WeekDay"] = df["Date"].dt.weekday
# Layout Customization
displayed_cols = ["Confirmed", "Deaths", "Recovered"]
def fig_plot():
fig = plt.figure(constrained_layout=True, figsize=(45, 18))
grid = gridspec.GridSpec(ncols=4, nrows=2, figure=fig)
ax1 = fig.add_subplot(grid[0, :2])
ax1.set_title("Daily Reports")
df.groupby(["Date"]).sum()[displayed_cols].plot(ax=ax1)
fig_plot()
# Layout Customization
displayed_cols = ["Confirmed", "Deaths", "Recovered"]
def fig_plot():
fig = plt.figure(constrained_layout=True, figsize=(18, 9))
grid = gridspec.GridSpec(ncols=4, nrows=2, figure=fig)
ax1 = fig.add_subplot(grid[0, :2])
ax1.set_title("Weekly Reports")
weekdays = df.groupby("Week").nth(-1)["Date"]
df[df["Date"].isin(weekdays)].groupby("Date")[displayed_cols].sum().plot(
kind="density", ax=ax1
)
fig_plot()
# f,ax=plt.subplots(1,2,figsize=(28,8))
# df['Country'].value_counts().plot.pie(ax=ax[0],shadow=True)
# ax[0].set_title('Share of Countries')
# ax[0].set_ylabel('Count')
# sns.countplot('Country',data=df,ax=ax[1],order=df['Country'].value_counts().index)
# ax[1].set_title('Count of Country')
# plt.show()
df_countries = pd.DataFrame(
df.Country.value_counts().reset_index().values, columns=["country", "count"]
)
df_countries.head()
# Count of Coronavirus cases detected in various countries
import plotly.graph_objects as go
fig = go.Figure(
go.Bar(
x=df_countries["count"].tolist(),
y=df_countries["country"].tolist(),
orientation="h",
)
)
fig.show()
date = pd.DataFrame(
df.Date.value_counts().reset_index().values, columns=["Date", "Count"]
)
date.head()
# Counts of deaths due to Coronavirus datewise
import plotly.express as px
# df = date
fig = px.scatter(df, x="Date", y="Deaths")
# If you print fig, you'll see that it's just a regular figure with data and layout
# print(fig)
fig.show()
import plotly.express as px
# df = px.data.gapminder().query("continent == 'Europe' and year == 2007 and pop > 2.e6")
fig = px.bar(df, y="Recovered", x="Date", text="Confirmed")
fig.update_traces(texttemplate="%{text:.2s}", textposition="outside")
fig.update_layout(uniformtext_minsize=8, uniformtext_mode="hide")
fig.show()
import plotly.express as px
import plotly.express as px
# df = px.data.gapminder().query("country=='Brazil'")
fig = px.line_3d(df, x="Confirmed", y="Deaths", z="Recovered", color="Country")
fig.show()
|
# # Contents
# * Some EDA,
# * several classifier : SVC, KNN, GaussianProcessClassifier, ExtraTreesClassifier
# * VotingClassifier
# Inspired by several public discussions or notebooks, mainly from [AmbrosM](https://www.kaggle.com/ambrosm)
# In this notebook, I took care to not overfit because we have only few train data.
# The more features we are using for training, the more we will overfit.
# And as [AmbrosM](https://www.kaggle.com/ambrosm) had explained, I did repeteated k folds, and repeated them with several seeds, to be sure of my choices.
# # Libraries
SEED = 1984
target = "target"
import numpy as np
import pandas as pd
pd.set_option("max_columns", 100)
pd.set_option("max_rows", 200)
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.stats import spearmanr
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
from itertools import product
from sklearn.model_selection import RepeatedStratifiedKFold
from sklearn.metrics import roc_auc_score
from sklearn.pipeline import make_pipeline, Pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import (
StandardScaler,
PolynomialFeatures,
MinMaxScaler,
RobustScaler,
FunctionTransformer,
)
from sklearn.kernel_approximation import Nystroem
from sklearn.calibration import CalibrationDisplay, CalibratedClassifierCV
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.ensemble import VotingClassifier, ExtraTreesClassifier
# # Data & EDA
train = pd.read_csv("../input/playground-series-s3e12/train.csv", index_col="id")
test = pd.read_csv("../input/playground-series-s3e12/test.csv", index_col="id")
print(f"Shape for Train {train.shape} and Test {test.shape}")
print(
f"Nan values in Train : {train[test.columns].isna().sum().sum()} | in Test : {train.isna().sum().sum()}"
)
print(f"Available columns for training : \n {test.columns}")
train.head()
origin = pd.read_csv(
"/kaggle/input/kidney-stone-prediction-based-on-urine-analysis/kindey stone urine analysis.csv"
)
print(f"Shape for origin {origin.shape}")
print(f"Nan values in origin : {origin[test.columns].isna().sum().sum()}")
# ## Target
fig, ax = plt.subplots(1, 2, figsize=(15, 2))
ax = ax.flatten()
train[target].value_counts().sort_index().plot.barh(ax=ax[0], color="skyblue").set(
title="Target in Train"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2d", padding=2)
(train[target].value_counts(normalize=True) * 100).sort_index().plot.barh(
ax=ax[1], color="skyblue"
).set(title="% Class in Train")
ax[1].bar_label(ax[1].containers[0], fmt="%.1f%%", padding=2)
for i in range(2):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
fig, ax = plt.subplots(1, 2, figsize=(15, 2))
ax = ax.flatten()
origin[target].value_counts().sort_index().plot.barh(ax=ax[0], color="gold").set(
title="Target in Origin"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2d", padding=2)
(origin[target].value_counts(normalize=True) * 100).sort_index().plot.barh(
ax=ax[1], color="gold"
).set(title="% Class in Origin")
ax[1].bar_label(ax[1].containers[0], fmt="%.1f%%", padding=2)
for i in range(2):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
# ## Other columns
fig, ax = plt.subplots(1, 3, figsize=(15, 5), sharey=True)
train[[f for f in test.columns]].nunique().plot.barh(ax=ax[0], color="skyblue").set(
title="Unique values per column in Train"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.1d", padding=2)
test.nunique().plot.barh(ax=ax[1], color="g").set(
title="Unique values per column in Test"
)
ax[1].bar_label(ax[1].containers[0], fmt="%.1d", padding=2)
origin[[f for f in test.columns]].nunique().plot.barh(ax=ax[2], color="gold").set(
title="Unique values per column in Origin"
)
ax[2].bar_label(ax[2].containers[0], fmt="%.1d", padding=2)
for i in range(3):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
for df, name, color in zip(
[train, test, origin], ["Train", "Test", "Origin"], ["skyblue", "green", "gold"]
):
fig, ax = plt.subplots(1, 3, figsize=(15, 3), sharey=True)
plt.suptitle(f"Mix/Max in {name}", y=1.2, fontsize=20)
df[[f for f in test.columns]].min().plot.barh(ax=ax[0], color=color).set(
title=f"Min in {name}"
)
ax[0].bar_label(ax[0].containers[0], fmt="%.2f", padding=2)
df[[f for f in test.columns]].median().plot.barh(ax=ax[1], color=color).set(
title=f"Median in {name}"
)
ax[1].bar_label(ax[1].containers[0], fmt="%.2f", padding=2)
df[[f for f in test.columns]].max().plot.barh(ax=ax[2], color=color).set(
title=f"Max in {name}"
)
ax[2].bar_label(ax[2].containers[0], fmt="%.2f", padding=2)
for i in range(3):
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
df_temp1 = pd.concat(
[train.loc[train[target] == 1], train.loc[train[target] == 0]], axis=0
)
df_temp2 = pd.concat(
[test, train[test.columns].sample(frac=test.shape[0] / train.shape[0])], axis=0
)
df_temp2["is_test"] = 0
df_temp2.loc[test.index, "is_test"] = 1
fig, ax = plt.subplots(len(test.columns), 4, figsize=(16, len(test.columns) * 3))
for i, f in enumerate(test.columns):
if i == 0:
legend = True
else:
legend = False
sns.kdeplot(data=df_temp1, hue="target", x=f, legend=legend, ax=ax[i, 0])
sns.boxplot(
data=train, x="target", y=f, ax=ax[i, 1], palette=["skyblue", "lightsalmon"]
)
sns.boxplot(
data=origin, x="target", y=f, ax=ax[i, 2], palette=["skyblue", "lightsalmon"]
)
sns.kdeplot(data=df_temp2, hue="is_test", x=f, legend=legend, ax=ax[i, 3])
ax[i, 1].set_title(f"{f}", loc="right", weight="bold", fontsize=20)
ax[i, 1].set_xlabel("Target in Train", fontsize=10)
ax[i, 2].set_xlabel("Target in Origin", fontsize=10)
for g in range(4):
ax[i, g].spines[["top", "right"]].set_visible(False)
# fig.legend([1, 0], loc='upper left', fontsize = 10, ncol=3, bbox_to_anchor=(0.12, 1))
# fig.legend(["train", "test"], loc='upper right', fontsize = 10, ncol=3, bbox_to_anchor=(0.9, 1))
plt.tight_layout()
plt.show()
# ## Conclusion
# * pH seems to be useless
# * features are not one the scale : transformations should be necessary
# * *train* and *origin* seem to be quite similar
# * *train* and *test" seem to be similar
# ## Prediction with calc
# Due to https://www.kaggle.com/code/seascape/target-calc
# AUC on public with **calc** only is 0.8573
for f in test.columns:
print(f"AUC with {f} only : {roc_auc_score(train[target], train[f]):.4f}")
# **calc** is the most important features.
auc_benchmark = roc_auc_score(train[target], train["calc"])
auc_public_lb = 0.8573
print(f"AUC with train['calc'] : {auc_benchmark:.5f}")
for v in [0.82, 0.821, 0.822]:
print(
f"Expected AUC on public LB with {v} by CV : {auc_public_lb * v / auc_benchmark:.5f}"
)
# ## Correlation
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
for i, (df, t) in enumerate(zip([train, origin, test], ["Train", "Origin", "Test"])):
matrix = df[test.columns].corr()
sns.heatmap(
matrix, annot=True, fmt=".1f", cmap="coolwarm", mask=np.triu(matrix), ax=ax[i]
)
ax[i].set_title(f"Correlations in {t}", fontsize=15)
# The less corraleted column to **calc** among features correlated to **target**, is **cond**. **cond** should bring additionnal information to **calc**.
fig, ax = plt.subplots(
len(test.columns), len(test.columns) - 1, figsize=(16, (len(test.columns) - 1) * 2)
)
plt.subplots_adjust(hspace=0.4, wspace=0.3)
for i, c1 in enumerate(test.columns):
for j, c2 in enumerate(test.columns[:-1]):
if j < i:
sns.scatterplot(
data=train, x=c1, y=c2, hue=target, legend=False, ax=ax[i - 1, j]
)
ax[i - 1, j].spines[["top", "right"]].set_visible(False)
ax[i - 1, j].set(xticklabels=[], yticklabels=[])
ax[i - 1, j].set_xlabel(c1, fontsize=9)
ax[i - 1, j].set_ylabel(c2, fontsize=9)
else:
fig.delaxes(ax[i - 1, j])
fig.legend([0, 1], loc="upper center", fontsize=10, ncol=3, bbox_to_anchor=(0.8, 1))
plt.tight_layout()
plt.show()
# # Future engineering
# **sum** was inspired by [broccoli beef](https://www.kaggle.com/siukeitin) comment in https://www.kaggle.com/competitions/playground-series-s3e12/discussion/401344
for df in [train, origin, test]:
df["sum"] = df["calc"] + df["gravity"] - 8.20807805874e-3 * df["cond"]
# on the scale as calc
df["gravity2"] = (df["gravity"] - 1) * 350
for c in ["calc", "gravity", "osmo", "cond", "urea"]:
df[f"log_{c}"] = np.log(df[c])
for c in ["gravity", "osmo", "cond", "urea"]:
df[f"calc/{c}"] = df["calc"] / (1 + df["calc"] + df[c])
for c in ["gravity", "osmo", "urea"]:
df[f"cond/{c}"] = df["cond"] / (1 + df["cond"] + df[c])
feats = [
"calc",
"gravity2",
"log_calc",
"cond",
"calc/cond",
"gravity",
"cond/gravity",
"calc/gravity",
"sum",
]
df_temp1 = pd.concat(
[train.loc[train[target] == 1], train.loc[train[target] == 0]], axis=0
)
df_temp2 = pd.concat(
[test, train[test.columns].sample(frac=test.shape[0] / train.shape[0])], axis=0
)
df_temp2["is_test"] = 0
df_temp2.loc[test.index, "is_test"] = 1
fig, ax = plt.subplots(len(feats), 4, figsize=(16, len(feats) * 3))
for i, f in enumerate(feats):
if i == 0:
legend = True
else:
legend = False
sns.kdeplot(data=df_temp1, hue="target", x=f, legend=legend, ax=ax[i, 0])
sns.boxplot(
data=train, x="target", y=f, ax=ax[i, 1], palette=["skyblue", "lightsalmon"]
)
sns.boxplot(
data=origin, x="target", y=f, ax=ax[i, 2], palette=["skyblue", "lightsalmon"]
)
sns.kdeplot(data=df_temp2, hue="is_test", x=f, legend=legend, ax=ax[i, 3])
ax[i, 1].set_title(f"{f}", loc="right", weight="bold", fontsize=20)
ax[i, 1].set_xlabel("Target in Train", fontsize=10)
ax[i, 2].set_xlabel("Target in Origin", fontsize=10)
for g in range(4):
ax[i, g].spines[["top", "right"]].set_visible(False)
# fig.legend([1, 0], loc='upper left', fontsize = 10, ncol=3, bbox_to_anchor=(0.12, 1))
# fig.legend(["train", "test"], loc='upper right', fontsize = 10, ncol=3, bbox_to_anchor=(0.9, 1))
plt.tight_layout()
plt.show()
# # Functions for CV
# Inspired by **score_model** function in https://www.kaggle.com/code/ambrosm/pss3e10-7-winning-model#Cross-validation by [AmbrosM](https://www.kaggle.com/ambrosm)
results = []
def cv_score(
model,
features,
label=None,
add_origin=False,
random_state=SEED,
plot_model=False,
verbose=False,
):
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=20, random_state=random_state)
trn_scores, val_scores = [], []
oof = np.zeros_like(train[target], dtype=float)
for fold, (trn_idx, val_idx) in enumerate(cv.split(train, train[target])):
X_trn, X_val = train.iloc[trn_idx][features], train.iloc[val_idx][features]
y_trn, y_val = train.iloc[trn_idx][target], train.iloc[val_idx][target]
if add_origin:
model.fit(
pd.concat([X_trn, origin[features]], axis=0),
pd.concat([y_trn, origin[target]], axis=0),
)
else:
model.fit(X_trn, y_trn)
use_predict, use_predict_proba1 = False, False
m = model
if type(m) == Pipeline:
m = m.steps[-1][1]
if type(m) == CalibratedClassifierCV:
m = m.calibrated_classifiers_[0].base_estimator
# if type(m) == LogisticGAM: use_predict_proba1 = True
y_trn_pred = (
model.predict(X_trn)
if use_predict
else model.predict_proba(X_trn)
if use_predict_proba1
else model.predict_proba(X_trn)[:, 1]
)
y_val_pred = (
model.predict(X_val)
if use_predict
else model.predict_proba(X_val)
if use_predict_proba1
else model.predict_proba(X_val)[:, 1]
)
oof[val_idx] += y_val_pred / 20
trn_scores.append(roc_auc_score(y_trn, y_trn_pred))
val_scores.append(roc_auc_score(y_val, y_val_pred))
if verbose:
print(
f"Fold {fold+1}: AUC = {val_scores[fold]:.5f} Overfitting : {trn_scores[fold] - val_scores[fold]:.5f}"
)
_mean_overfit = np.mean(np.array(trn_scores) - np.array(val_scores))
print(
f"Valid {np.mean(val_scores):.5f} Training {np.mean(trn_scores):.4f} Overfitting {_mean_overfit:.4f} Std {np.std(val_scores):.4f}"
)
if label is not None:
if label in [f[0] for f in results]:
del results[[f[0] for f in results].index(label)]
results.append(
(
label,
model,
np.mean(val_scores),
_mean_overfit,
np.std(val_scores),
oof,
add_origin,
features,
)
)
if plot_model:
display_model(label, oof)
def display_model(label, oof):
"""Plot two diagrams with the oof values (calibration and histogram)"""
fig, ax = plt.subplots(1, 2, figsize=(10, 4))
plt.suptitle(label, y=1.0, fontsize=20)
ax[0].set_title("Calibration")
CalibrationDisplay.from_predictions(
train[target], oof, n_bins=50, strategy="quantile", ax=ax[0]
)
ax[1].set_title("Histogram")
ax[1].hist(oof, bins=100)
for i in range(2):
ax[i].spines[["top", "right"]].set_visible(False)
# # SVC
model_svc1 = SVC(
C=0.3, gamma="scale", probability=True, random_state=SEED, kernel="rbf"
)
cv_score(model_svc1, features=["calc"], label="svc1", add_origin=False, plot_model=True)
# for seed in [42, 666, 1984, 911, 69]:
# for g in [.1, "scale"]:
# cv_score(SVC(C = .3, gamma = g, probability=True, random_state = seed, kernel = "rbf"), features=["calc"], random_state=seed)
model_svc2 = SVC(
C=0.226, gamma="scale", probability=True, random_state=SEED, kernel="rbf"
)
cv_score(model_svc2, features=["sum"], label="svc2", add_origin=False, plot_model=True)
# model_svc3 = SVC(C = .01, gamma = "scale", probability=True, random_state = SEED, kernel = "rbf")
model_svc3 = SVC(C=0.01, gamma=1.2, probability=True, random_state=SEED, kernel="rbf")
cv_score(model_svc3, features=["log_calc"], add_origin=False, plot_model=True)
cv_score(
make_pipeline(
RobustScaler(),
SVC(C=0.4, gamma=0.5, probability=True, random_state=SEED, kernel="rbf"),
),
features=["cond/gravity", "calc", "calc/gravity"],
add_origin=True,
)
# It's overfitting more that *svc1*
# for e in product([.3, .35, .4, .45], [.4, .45, .5, .55]):
# print(e[0], e[1])
# cv_score(make_pipeline(RobustScaler(), SVC(C=e[0], gamma=e[1], probability=True, random_state = SEED, kernel = "rbf"))
# , features=["calc/gravity", "calc"], add_origin = False, plot_model = True)
# # KNN
model_knn = make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.009, 1]])), KNeighborsClassifier(64)
)
cv_score(
model_knn, features=["cond", "calc"], label="knn", add_origin=False, plot_model=True
)
# for n in range(57, 70):
# model_knn2 = make_pipeline(FunctionTransformer(lambda X: X * np.array([[0.009, 1, .02]])), KNeighborsClassifier(n))
# cv_score(model_knn2, features=["cond", "calc", "gravity2"], label="knn2")
model_knn2 = make_pipeline(
FunctionTransformer(lambda X: X * np.array([[0.009, 1, 0.02]])),
KNeighborsClassifier(64),
)
cv_score(
model_knn2, features=["cond", "calc", "gravity2"], label="knn2", plot_model=True
)
# # Gaussian Process Classifier
model_gaussian = make_pipeline(
RobustScaler(), GaussianProcessClassifier(random_state=SEED)
)
cv_score(
model_gaussian,
features=["calc", "calc/gravity"],
label="gaussian",
add_origin=False,
plot_model=True,
)
# # Logistic
cv_score(
make_pipeline(
Nystroem(gamma=6, n_components=130),
CalibratedClassifierCV(LogisticRegression(C=0.1), method="isotonic"),
),
features=["calc"],
add_origin=True,
)
# # Extratrees
for l in range(10, 15):
model_et = ExtraTreesClassifier(
n_estimators=50,
max_features=2,
min_samples_leaf=l,
random_state=SEED,
criterion="entropy",
)
cv_score(model_et, features=["calc", "cond"])
model_et = ExtraTreesClassifier(
n_estimators=50,
max_features=2,
min_samples_leaf=12,
random_state=SEED,
criterion="entropy",
)
cv_score(
model_et,
features=["calc", "cond"],
label="extratrees",
add_origin=False,
plot_model=True,
)
# # Ensemble
# Inspired by several [AmbrosM](https://www.kaggle.com/ambrosm) notebooks
oof = pd.DataFrame(index=train.index)
for m in results:
oof[m[0]] = m[5]
corr = spearmanr(oof).correlation
# Ensure the correlation matrix is symmetric
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1)
# We convert the correlation matrix to a distance matrix before performing
# hierarchical clustering using Ward's linkage.
distance_matrix = 1 - np.abs(corr)
dist_linkage = hierarchy.ward(squareform(distance_matrix))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(10, 5))
dendro = hierarchy.dendrogram(
dist_linkage, labels=oof.columns, ax=ax1, leaf_rotation=90
)
dendro_idx = np.arange(0, len(dendro["ivl"]))
matrix = corr[dendro["leaves"], :][:, dendro["leaves"]]
sns.heatmap(
matrix, annot=True, fmt=".2f", cmap="coolwarm", mask=np.triu(matrix), ax=ax2
)
ax2.set_xticklabels(dendro["ivl"], rotation="vertical")
ax2.set_yticklabels(dendro["ivl"], rotation="horizontal")
fig.tight_layout()
plt.show()
def plot_results(oof, lib_blend, best_model=None):
fig, ax = plt.subplots(1, 3, figsize=(10, len(oof.columns)), sharey=True)
res = [m[2] for m in results if m[0] in oof.columns]
overfits = [m[3] for m in results if m[0] in oof.columns]
stds = [m[4] for m in results if m[0] in oof.columns]
color = ["skyblue" for i in range(len(res))]
for l in lib_blend:
color[oof.columns.get_loc(l)] = "orange"
if best_model is not None:
color[oof.columns.get_loc(best_model)] = "green"
pd.Series(res, index=oof.columns).plot.barh(ax=ax[0], color=color).set(title="AUC")
ax[0].set_xlim(0.79, 0.85)
pd.Series(overfits, index=oof.columns).plot.barh(ax=ax[1], color=color).set(
title="Overfitting"
)
pd.Series(stds, index=oof.columns).plot.barh(ax=ax[2], color=color).set(
title="OOF std"
)
for i in range(3):
ax[i].bar_label(ax[i].containers[0], fmt="%.5f", padding=2)
ax[i].spines[["right", "bottom"]].set_visible(False)
ax[i].xaxis.set_ticks_position("top")
clf1 = make_pipeline(ColumnTransformer([("pt", "passthrough", ["sum"])]), model_svc2)
clf2 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc"])]), model_knn
)
cv_score(
VotingClassifier(
estimators=[("svc", clf1), ("knn", clf2)], weights=[1, 1], voting="soft"
),
features=["cond", "calc", "calc/gravity", "sum"],
label="voting_knn_svc2",
add_origin=False,
plot_model=True,
)
clf1 = make_pipeline(ColumnTransformer([("pt", "passthrough", ["sum"])]), model_svc2)
clf2 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc"])]), model_knn
)
clf3 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc"])]), model_et
)
cv_score(
VotingClassifier(
estimators=[("svc", clf1), ("knn", clf2), ("et", clf3)],
weights=[1, 1, 0.5],
voting="soft",
),
features=["cond", "calc", "calc/gravity", "sum"],
label="voting_knn_svc2_et",
add_origin=False,
plot_model=True,
)
clf0 = make_pipeline(ColumnTransformer([("pt", "passthrough", ["calc"])]), model_svc1)
clf1 = make_pipeline(ColumnTransformer([("pt", "passthrough", ["sum"])]), model_svc2)
clf2 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc", "gravity2"])]), model_knn2
)
clf3 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc"])]), model_knn
)
cv_score(
VotingClassifier(
estimators=[("svc1", clf0), ("svc2", clf1), ("knn1", clf2), ("knn2", clf3)],
weights=[1, 1, 1, 1],
voting="soft",
),
features=["cond", "calc", "calc/gravity", "sum", "gravity2"],
label="voting_knn2_svc2",
add_origin=False,
plot_model=True,
)
oof = pd.DataFrame(index=train.index)
for m in results:
oof[m[0]] = m[5]
plot_results(
oof,
lib_blend=["voting_knn_svc2", "voting_knn_svc2_et", "voting_knn2_svc2"],
best_model="voting_knn_svc2",
)
# There is amost no overfitting with **voting_knn_svc2**, which is using a **SVC** with a combination of **calc**, **cond** and **gravity**, and is using a **KNN** classifier with **cond** and **calc**.
# There is much more overfitting when we add an Extratrees classifier, and than AUC is lower.
# ## Try ensemble with several seeds
# To be sure that ensemble **voting_knn_svc2** is better than simple **KNN** and **SVC2**
for seed in [666, 42, 69]:
results = []
cv_score(model_svc2, features=["sum"], label="svc2", random_state=seed)
cv_score(model_knn, features=["cond", "calc"], label="knn", random_state=seed)
clf1 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["sum"])]), model_svc2
)
clf2 = make_pipeline(
ColumnTransformer([("pt", "passthrough", ["cond", "calc"])]), model_knn
)
cv_score(
VotingClassifier(
estimators=[("svc", clf1), ("knn", clf2)], weights=[1, 1], voting="soft"
),
features=["cond", "calc", "calc/gravity", "sum"],
label="voting_knn_svc2",
random_state=seed,
)
oof = pd.DataFrame(index=train.index)
for m in results:
oof[m[0]] = m[5]
plot_results(oof, lib_blend=["voting_knn_svc2"])
# Ensemble is always better.
# # Submission
# Inference
sub = pd.DataFrame(index=test.index)
model = VotingClassifier(
estimators=[("svc", clf1), ("knn", clf2)], weights=[1, 1], voting="soft"
)
model.fit(train[["sum", "cond", "calc"]], train[target])
sub["target"] = model.predict_proba(test[["sum", "cond", "calc"]])[:, 1]
# Export
sub[target].to_csv("submission.csv")
# Final control
sub = pd.read_csv("/kaggle/working/submission.csv")
plt.figure(figsize=(5, 4))
plt.title("Preds")
plt.hist(sub[target], bins=100)
plt.show()
sub.head(10)
|
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
print(tf.__version__)
def constr(a, b):
return lambda x: tf.clip_by_value(x, a, b)
x = tf.Variable(1.0, trainable=True, dtype=tf.float64, name="x")
y = tf.Variable(
1.0, trainable=True, dtype=tf.float64, name="y", constraint=constr(0, -1)
)
z = tf.Variable(
1.0, trainable=True, dtype=tf.float64, name="z", constraint=constr(0, -1)
)
def objective():
return x + y + z
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
obj_vals = []
for i in range(10):
print(
f"obj = {objective().numpy():.3f}, x = {x.numpy():.3f}, y = {y.numpy():.3f}, z = {z.numpy():.3f}"
)
obj_vals.append(objective().numpy())
opt.minimize(objective, var_list=[x, y, z])
plt.plot(obj_vals, ".-")
plt.xlabel("Step")
plt.ylabel("Objective")
plt.show()
# With gradients
# x1, x2 = reset()
# opt = tf.keras.optimizers.SGD(learning_rate=0.1)
# for i in range(50):
# with tf.GradientTape() as tape:
# y = fu(x1, x2)
# grads = tape.gradient(y, [x1, x2])
# processed_grads = [g for g in grads]
# grads_and_vars = zip(processed_grads, [x1, x2])
# print ('y = {:.1f}, x1 = {:.1f}, x2 = {:.1f}, grads0 = {:.1f}, grads1 = {:.1f} '.format(y.numpy(), x1.numpy(), x2.numpy(), grads[0].numpy(), grads[1].numpy()))
# opt.apply_gradients(grads_and_vars)
# # Minimize interesting 2d function and plot 2d image with trajectory. chose 2 starting points.
def constr(a, b):
return lambda x: tf.clip_by_value(x, a, b)
x = tf.Variable(0.5, trainable=True, dtype=tf.float64, name="x")
y = tf.Variable(0.5, trainable=True, dtype=tf.float64, name="y")
def objective():
return (
-5.5 * tf.exp(-20.0 * (x - 0.3) ** 2 - 40.0 * (y - 0.3) ** 2)
- 3.5 * tf.exp(-15.0 * (x - 0.6) ** 2 - 10.0 * (y - 0.85) ** 2)
- 2.0 * tf.sin(2.0 * (x - y))
)
opt = tf.keras.optimizers.SGD(learning_rate=0.1)
obj_vals = []
coords = []
for i in range(20):
print(f"obj = {objective().numpy():.3f}, x = {x.numpy():.3f}, y = {y.numpy():.3f}")
obj_vals.append(objective().numpy())
coords.append((x.numpy(), y.numpy()))
opt.minimize(objective, var_list=[x, y])
# start points.
start1 = [0.4, 0.9]
start2 = [0.0, 0.0]
def objective(x, y):
return (
-5.5 * np.exp(-20.0 * (x - 0.3) ** 2 - 40.0 * (y - 0.3) ** 2)
- 3.5 * np.exp(-15.0 * (x - 0.6) ** 2 - 10.0 * (y - 0.85) ** 2)
- 2.0 * np.sin(2.0 * (x - y))
)
x = np.linspace(0, 1, 400)
X, Y = np.meshgrid(x, x)
Z = obj2(X, Y)
plt.figure(figsize=(16, 6))
plt.subplot(121)
plt.contourf(X, Y, Z, 60, cmap="RdGy")
plt.xlabel("x", fontsize=19)
plt.ylabel("y", fontsize=19)
plt.tick_params(axis="both", which="major", labelsize=14)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=14)
# TDODO: plot trajectory line.
plt.subplot(122)
plt.plot(obj_vals, ".-")
plt.xlabel("Step", fontsize=17)
plt.ylabel("Objective", fontsize=17)
plt.tick_params(axis="both", which="major", labelsize=14)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt # visualization
import datetime # datetime functions
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv(
"/kaggle/input/ethereum-prices-by-ethereumpriceorg/2016-02-01_2023-04-16_ethereumprice_org.csv"
)
df.head()
df["timestamp_datetime"] = df["timestamp"].transform(
lambda x: datetime.datetime.fromtimestamp(x)
)
df
df["opencorrect"] = pd.to_numeric(df.open, errors="coerce")
df["highcorrect"] = pd.to_numeric(df.high, errors="coerce")
df["lowcorrect"] = pd.to_numeric(df.low, errors="coerce")
df["closecorrect"] = pd.to_numeric(df.close, errors="coerce")
df
plt.plot(df["timestamp_datetime"], df["opencorrect"], label="Open")
plt.plot(df["timestamp_datetime"], df["highcorrect"], label="High")
plt.plot(df["timestamp_datetime"], df["lowcorrect"], label="Low")
plt.plot(df["timestamp_datetime"], df["closecorrect"], label="Close")
plt.legend(loc="best")
|
# # Titanic Tutorial
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Load the training data
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
# Load the test data
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
# Explore a pattern
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
# output.to_csv('submission.csv', index=False)
# print("Your submission was successfully saved!")
# # My Predictions
# Load a copy of the training dataset
training_data = pd.read_csv("/kaggle/input/titanic/train.csv")
# Filter for relevant data by discarding the "Name" and "Ticket" columns that are unlikely to be useful for predicting survival
filtered_data = training_data.drop(columns=["Name", "Ticket"])
# filtered_data.head()
# Explore patterns by calculating survival percentage by "Pclass", "Sex", "Age", "SibSp", "Parch", "Embarked"
columnsToSearch = ["Pclass", "Sex", "Age", "SibSp", "Parch", "Embarked"]
for column in columnsToSearch:
for unique_value in filtered_data[column].unique():
survived = filtered_data.loc[filtered_data[column] == unique_value]["Survived"]
if len(survived > 0):
print(
"Survival rate "
+ column
+ ", "
+ str(unique_value)
+ ": "
+ str(int(sum(survived) / len(survived) * 100))
)
print()
# Explore "Fare" by plotting against "Survived"
import matplotlib.pyplot as plt
plt.scatter(filtered_data["Fare"], filtered_data["Survived"])
plt.show()
# Explore "Cabin" by dropping rows with NaN, taking the first character as the cabin group and calculating survival percentage
group_cabin = filtered_data.copy(True)
group_cabin = group_cabin.dropna(subset=["Cabin"])
group_cabin.head()
# The analysis above informs the following observations:
# - "Pclass" is a strong indicator of survival, particularly for 1st class
#
# - "Sex" is a strong indicator of survival, particularly for women
#
# - "Age" is a strong indicator of survival, particularly for the young and old
#
# - "SibSp", "Parch", "Fare", and "Embarked" alone don't suggest a strong rule
#
# - "Cabin" does not have enough valid data to be relevant
# Strategy:
# Try RandomForest model using "Pclass", "Sex", and "Age" as features
# Load training and test sets
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
# Check for NaN values
print(train_data.isna().sum())
print(test_data.isna().sum())
# Drop unused columns
Y_train = train_data["Survived"]
X_train = train_data.drop(
columns=[
"PassengerId",
"Survived",
"Name",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
]
)
X_test = test_data.drop(
columns=[
"PassengerId",
"Name",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
]
)
# Fill NaN age with mean age
X_train = X_train.fillna(X_train.Age.mean())
X_test = X_test.fillna(X_test.Age.mean())
# Reassign "Sex" as binary value
sex = {"male": 1, "female": 0}
X_train.Sex = [sex[item] for item in X_train.Sex]
X_test.Sex = [sex[item] for item in X_test.Sex]
# Verify training set features
X_train.head()
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X_train, Y_train)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("jp_submission.csv", index=False)
print("Your submission was successfully saved!")
print(len(output.index))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **1. Import Libraries**
# data
import pandas as pd
import numpy as np
# visualization
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# **2. Read Dataset**
df = pd.read_csv(
"/kaggle/input/most-subscribed-1000-youtube-channels/topSubscribed.csv"
)
df.head()
# **3. Data Processing**
df.shape
df.dtypes
# *Found the columns of Subscribers, Video Views and Video Count have string data types , So I change to integer data types.*
df["Subscribers"] = df["Subscribers"].str.replace(",", "").astype(int)
df["Video Views"] = df["Video Views"].str.replace(",", "").astype(int)
df["Video Count"] = df["Video Count"].str.replace(",", "").astype(int)
df.head()
df.dtypes
# *Delete some column that not related.*
df = df.drop(["Rank"], axis=1)
df.head()
# *Check missing values.*
df.isnull().sum()
# *Check unique data of Category column*
df.Category.unique()
df.groupby("Category")["Category"].count()
# *Found 30 rows that are unknown category as an URL. Remove that unknown category*
df = df[
df["Category"]
!= "https://us.youtubers.me/global/all/top-1000-most_subscribed-youtube-channels"
]
df.Category.unique()
df.shape
# 30 rows have been removed
df.describe()
# **4. Data Visualization**
# **Percentage of Youtube Channel By Category**
categories = df["Category"].value_counts()
fig = px.pie(
values=categories.values,
names=categories.index,
color_discrete_sequence=px.colors.sequential.RdBu,
title="Percentage of Youtube Channel By Category",
template="presentation",
)
fig.update_traces(textposition="inside", textfont_size=11, textinfo="percent+label")
fig.show()
# *Entertainment, Music and People & Blogs are top 3 of category proportion*
# **Video views and Subscribers By Categories**
fig = px.scatter(
df,
x="Subscribers",
y="Video Views",
size="Video Views",
color="Category",
log_x=True,
size_max=50,
title="Video views and Subscribers By Categories",
marginal_y="rug",
)
fig.show()
# *Musics, Education and Shows ranked top 3 higest of Video Views and Subscribers.*
# **Top 10 Music Youtube Channels with The Most Video Count**
df_music = df[df.Category == "Music"]
df_music.head()
new_index = df_music["Video Count"].sort_values(ascending=False).index.values
sorted_df_music = df_music.reindex(new_index)
sorted_df_music_10 = sorted_df_music[:10]
plt.figure(figsize=(15, 10))
sns.barplot(sorted_df_music_10, x="Video Count", y="Youtube Channel", palette="pastel")
plt.xticks(rotation=90)
plt.xlabel("Video Count", fontsize=15)
plt.ylabel("Youtube Channel", fontsize=15)
plt.title("Top 10 Music Youtube Channels with The Most Video Count", fontsize=20)
# *Indosiar has the most video count of Music category*
# **Top 10 Education Youtube Channels with The Most Video Views**
df_edu = df[df.Category == "Education"]
df_edu.head()
new_index2 = df_edu["Video Views"].sort_values(ascending=False).index.values
sorted_df_edu = df_edu.reindex(new_index2)
sorted_df_edu_10 = sorted_df_edu[:10]
plt.figure(figsize=(15, 10))
sns.barplot(sorted_df_edu_10, x="Video Views", y="Youtube Channel", palette="pastel")
plt.xticks(rotation=90)
plt.xlabel("Video Views", fontsize=15)
plt.ylabel("Youtube Channel", fontsize=15)
plt.title("Top 10 Education Youtube Channels with The Most Video Views", fontsize=20)
# *Cocomelon has the most video view of Education category*
# **Trend in the YouTube Channels Created Each Year**
year = df["Started"].value_counts()
plt.figure(figsize=(20, 8))
sns.pointplot(x=year.index, y=year.values, color="violet")
plt.xlabel("Year")
plt.ylabel("Count")
plt.title("Trend in the YouTube Channels Created Each Year", size=30, color="maroon")
# *The maximum number of the Youtube Channels was in 2014 and then kept decreasing since then*
# **Correlation between variables**
sns.scatterplot(data=df, x="Subscribers", y="Video Views")
sns.scatterplot(data=df, x="Subscribers", y="Video Count")
sns.scatterplot(data=df, x="Video Views", y="Video Count")
# **The heat map explains the correlation**
plt.figure(figsize=(20, 8))
sns.set_theme("notebook")
sns.heatmap(df.corr(), annot=True, center=True)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
df = pd.read_csv("../input/titanic/train.csv")
df.head()
df.isna().sum()
# ## fillna()
# 'constant_value'
# ffill
# bfill
# df['col'].mean()
df["Embarked"].fillna("S", inplace=True)
df.isna().sum()
# ## Impute
# just as same as .fillna(), and is complex
from sklearn.impute import SimpleImputer
imp = SimpleImputer(missing_values=np.nan, strategy="mean")
imp.fit(df[["Age"]])
df[["Age"]] = imp.transform(df[["Age"]])
df.isna().sum()
# ## dropna()
# axis = 0, drops rows with na
# axis = 1, drops columns with na
df.dropna(axis=1, inplace=True)
df.isna().sum()
|
# ## Using Scikit-learn to Implement a Simple Support Vector Classifier
# As a follow-up to my previous postings [here](https://www.kaggle.com/chrised209/decision-tree-modeling-of-the-iris-dataset) and [here](https://www.kaggle.com/chrised209/k-nearest-neighbors-modeling-of-iris-dataset), I'm going to continue to look into the [classic iris data set](https://en.wikipedia.org/wiki/Iris_flower_data_set), but this time we'll be exploring scikit-learn's support vector classifier.
# We will again begin by running the standard import cell common to kaggle notebooks:
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# To this we will again add our needed scikit-learn libraries and the plotting functionality from matplotlib.
# In this notebook we'll also add in a performance metric called the confusion matrix and another validation tool to split our data into training and testing sets.
# Import the needed matplotlib functionality for scatter plot visualization.
import matplotlib.pyplot as plt
# import the needed dataset.
from sklearn.datasets import load_iris
# Import the model.
from sklearn.svm import SVC
# Import the confusion matrix tool
from sklearn.metrics import confusion_matrix
# Import the train-test split functionality
from sklearn.model_selection import train_test_split
# Import the unique_labels function to support plotting of the confusion matrix
from sklearn.utils.multiclass import unique_labels
# We'll now define a function to handle the plotting of the confusion matrix (this is implemented natively in the latest stable 0.22.1 release of scikit-learn, but not in the version 0.21.3 available in Kaggle. This function is therefore lifted directly from the [0.21.3 documentation](https://scikit-learn.org/0.21/auto_examples/model_selection/plot_confusion_matrix.html#sphx-glr-auto-examples-model-selection-plot-confusion-matrix-py)).
#
def plot_confusion_matrix(
y_true, y_pred, classes, normalize=False, title=None, cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if not title:
if normalize:
title = "Normalized confusion matrix"
else:
title = "Confusion matrix, without normalization"
# Compute confusion matrix
cm = confusion_matrix(y_true, y_pred)
# Only use the labels that appear in the data
classes = classes[unique_labels(y_true, y_pred)]
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
print(cm)
fig, ax = plt.subplots()
im = ax.imshow(cm, interpolation="nearest", cmap=cmap)
ax.figure.colorbar(im, ax=ax)
# We want to show all ticks...
ax.set(
xticks=np.arange(cm.shape[1]),
yticks=np.arange(cm.shape[0]),
# ... and label them with the respective list entries
xticklabels=classes,
yticklabels=classes,
title=title,
ylabel="True label",
xlabel="Predicted label",
)
# Rotate the tick labels and set their alignment.
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(
j,
i,
format(cm[i, j], fmt),
ha="center",
va="center",
color="white" if cm[i, j] > thresh else "black",
)
fig.tight_layout()
return ax
np.set_printoptions(precision=2)
# Here, we will load the data set, label the pieces more conveniently, and split the data into training and testing sets.
# Load the iris dataset from scikit-learn (note the use of from [library] import [function] above)
iris = load_iris()
# Define X values from the measurements.
X = iris.data
# Define Y values from the classification indices.
y = iris.target
# Define the classifications of each sample.
class_names = iris.target_names
# Split the data into training and testing sets.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3333, random_state=0
)
# From here we can now apply our model (with default arguments for the time being) specifically to this split of training and testing data.
# Define the classifier model object (with the kernel coefficient updated to the default used in v 0.22.1)...
classifier = SVC(gamma="scale")
# ... to fit the decision tree classifier model on the training data only.
y_pred = classifier.fit(X_train, y_train).predict(X_test)
# Set the size of the figure used to contain the confusion matrices to be generated.
plt.figure(figsize=(15, 15))
# Plot non-normalized confusion matrix comparing the predicted y_pred labels to the actual y_test values
plot_confusion_matrix(
y_test, y_pred, classes=class_names, title="Confusion Matrix Without Normalization"
)
# Plot normalized confusion matrix
plot_confusion_matrix(
y_test,
y_pred,
classes=class_names,
normalize=True,
title="Normalized Confusion Matrix",
)
plt.show()
# As we observed with the k nearest neighbor classifier, the results here are again not half bad! We have a bit of misclassification of some versicolor plants as virginica, but all in all this is a very solid result.
# It is worth bearing in mind, however, that the models above were run with all parameters set to their defaults. A tweaking of the default values may lead to a more sensible and more readily interpretable model.
# Is it perhaps possible to get an improvement in the confusion matrix by choosing a different kernel from the default radial basis function?
# Define a list of kernels to consider
kernels = ["rbf", "linear", "poly", "sigmoid"]
# Initialize a list of classifier models
classifiers = []
# Initalize a list of y_pred values
y_preds = []
# Iterating through each kernel
for i, kernel in enumerate(kernels):
# Define the classifier model object...
classifier = SVC(gamma="scale", kernel=kernel)
classifiers.append(classifier)
# ... to fit the classifier model on the training data only.
y_pred = classifier.fit(X_train, y_train).predict(X_test)
# Set the size of the figure used to contain the confusion matrices to be generated.
plt.figure(figsize=(15, 15))
# Plot non-normalized confusion matrix comparing the predicted y_pred labels to the actual y_test values
plot_confusion_matrix(
y_test,
y_pred,
classes=class_names,
title=f"Confusion Matrix Without Normalization ({kernel} kernel)",
)
plt.show()
# It seems that linear, polynomial and radial basis functions give the best results, all else being equal, with the sigmoid kernel fails miserably without further parameter tuning.
# Since the polynomial kernel offers a bit of extra flexibility in the form of its maximum degree, let's consider optimizing the degree to try and improve its showing.
# Define a list of kernels to consider
degrees = [3, 4, 5, 6, 7]
# Initialize a list of classifier models
classifiers = []
# Initalize a list of y_pred values
y_preds = []
# Iterating through each degree
for i, degree in enumerate(degrees):
# Define the classifier model object...
classifier = SVC(gamma="scale", kernel="poly", degree=degree)
classifiers.append(classifier)
# ... to fit the classifier model on the training data only.
y_pred = classifier.fit(X_train, y_train).predict(X_test)
# Set the size of the figure used to contain the confusion matrices to be generated.
plt.figure(figsize=(15, 15))
# Plot non-normalized confusion matrix comparing the predicted y_pred labels to the actual y_test values
plot_confusion_matrix(
y_test,
y_pred,
classes=class_names,
title=f"Confusion Matrix Without Normalization (degree {degree} polynomial kernel)",
)
plt.show()
|
print(tf.__version__)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import tensorflow as tf
from tensorflow.keras.layers import Flatten, Dense
from keras.optimizers import RMSprop
x_train = pd.read_csv("../input/fashionmnist/fashion-mnist_train.csv")
y_train = x_train.label
x_train.drop(["label"], inplace=True, axis=1)
x_test = pd.read_csv("../input/fashionmnist/fashion-mnist_test.csv")
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.Sequential()
model.add(Dense(1024, activation="relu", input_dim=784))
model.add(Dense(512, activation="relu"))
model.add((Dense(256, activation="relu")))
model.add(Dense(10, activation="softmax"))
optimizer = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0)
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.fit(x_train, y_train, epochs=50, verbose=1, batch_size=512)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import VotingClassifier
df = pd.read_csv("/kaggle/input/suv-nanze/suv.csv")
df.drop("User ID", axis=1, inplace=True)
df.head(5)
df.Gender = pd.get_dummies(df.Gender, drop_first=True)
X = df.to_numpy()
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
y = X[:, -1]
X = X[:, :-1]
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
# baggingclassifier > one base model > n times create
# voting classifier > different classifiers
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
clfs = [
("LR", LogisticRegression()),
("KNN3", KNeighborsClassifier(n_neighbors=3)),
("KNN5", KNeighborsClassifier(n_neighbors=5)),
("KNN7", KNeighborsClassifier(n_neighbors=7)),
("KNN9", KNeighborsClassifier(n_neighbors=9)),
("SVM_l", SVC(kernel="linear")),
("SVM_r", SVC(kernel="rbf")),
("DT2", DecisionTreeClassifier(max_depth=2)),
("DT3", DecisionTreeClassifier(max_depth=3)),
]
# write code manually to see how each one of them acts seperately
for name, clf in clfs:
clf.fit(X_train, y_train)
print(
"{}:\ttrain_acc:{},\ttest_acc:{}".format(
name,
clf.score(X_train, y_train),
clf.score(X_test, y_test),
)
)
# clfs > list of dict
en_clf = VotingClassifier(clfs, n_jobs=-1)
# voting > hard,soft
# weights > the less the acc the less wi
# fit each of the models seperately
en_clf.fit(X_train, y_train)
print(en_clf.score(X_train, y_train))
print(en_clf.score(X_test, y_test))
|
# In this notebook I have used fancyimpute technique to impute missing values in the entire WiDS2020 dataset.
# I have used LGBM to train the model. I got accuracy of 0.88. Parameter tuning might help in bettring this score.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
from fancyimpute import KNN
from sklearn.preprocessing import OrdinalEncoder
from sklearn.model_selection import train_test_split
import lightgbm as lgb
# read the data and drop noisy columns
train = pd.read_csv("/kaggle/input/widsdatathon2020/training_v2.csv")
test = pd.read_csv("/kaggle/input/widsdatathon2020/unlabeled.csv")
solution_template = pd.read_csv("/kaggle/input/solutiontemplate/solution_template.csv")
trainv1 = train.drop(
[
"encounter_id",
"patient_id",
"icu_id",
"hospital_id",
"readmission_status",
"ethnicity",
],
axis=1,
)
testv1 = test.drop(
[
"encounter_id",
"patient_id",
"icu_id",
"hospital_id",
"readmission_status",
"hospital_death",
"ethnicity",
],
axis=1,
)
print("number of rows and columns in training set is \n", trainv1.shape)
print("number of rows and columns in test set is \n", testv1.shape)
# Exploring the data
trainv1.info()
trainv1.describe()
trainv1.isna().sum()
testv1.isna().sum()
trainv1["hospital_death"].value_counts() * 100 / len(trainv1["hospital_death"])
sns.countplot(trainv1["hospital_death"])
# Seperate categorical and numerical variables
cattrain = trainv1.select_dtypes("object")
numtrain = trainv1.select_dtypes("number")
cattest = testv1.select_dtypes("object")
numtest = testv1.select_dtypes("number")
# encoding categorical test variables
# instantiate both packages to use
encoder = OrdinalEncoder()
imputer = KNN()
# create a list of categorical columns to iterate over
cat_cols = cattest.columns
def encode(data):
"""function to encode non-null data and replace it in the original data"""
# retains only non-null values
nonulls = np.array(data.dropna())
# reshapes the data for encoding
impute_reshape = nonulls.reshape(-1, 1)
# encode data
impute_ordinal = encoder.fit_transform(impute_reshape)
# Assign back encoded values to non-null values
data.loc[data.notnull()] = np.squeeze(impute_ordinal)
return data
# create a for loop to iterate through each column in the data
for columns in cat_cols:
encode(cattest[columns])
# encoding categorical train variables
# instantiate both packages to use
encoder = OrdinalEncoder()
imputer = KNN()
# create a list of categorical columns to iterate over
cat_cols = cattrain.columns
def encode(data):
"""function to encode non-null data and replace it in the original data"""
# retains only non-null values
nonulls = np.array(data.dropna())
# reshapes the data for encoding
impute_reshape = nonulls.reshape(-1, 1)
# encode data
impute_ordinal = encoder.fit_transform(impute_reshape)
# Assign back encoded values to non-null values
data.loc[data.notnull()] = np.squeeze(impute_ordinal)
return data
# create a for loop to iterate through each column in the data
for columns in cat_cols:
encode(cattrain[columns])
# splitting train values into sections for faster imputing
numtrain1 = numtrain[0:20000]
numtrain2 = numtrain[20000:40000]
numtrain3 = numtrain[40000:60000]
numtrain4 = numtrain[60000:80000]
numtrain5 = numtrain[80000:]
cattrain1 = cattrain[0:20000]
cattrain2 = cattrain[20000:40000]
cattrain3 = cattrain[40000:60000]
cattrain4 = cattrain[60000:80000]
cattrain5 = cattrain[80000:]
# splitting test values into sections for faster imputing
cattest1 = cattest[0:20000]
cattest2 = cattest[20000:]
numtest1 = numtest[0:20000]
numtest2 = numtest[20000:]
# impute catgorical test data and convert
encode_testdata1 = pd.DataFrame(
np.round(imputer.fit_transform(cattest1)), columns=cattest.columns
)
encode_testdata2 = pd.DataFrame(
np.round(imputer.fit_transform(cattest2)), columns=cattest.columns
)
# impute catgorical train data and convert
encode_data1 = pd.DataFrame(
np.round(imputer.fit_transform(cattrain1)), columns=cattrain.columns
)
encode_data2 = pd.DataFrame(
np.round(imputer.fit_transform(cattrain2)), columns=cattrain.columns
)
encode_data3 = pd.DataFrame(
np.round(imputer.fit_transform(cattrain3)), columns=cattrain.columns
)
encode_data4 = pd.DataFrame(
np.round(imputer.fit_transform(cattrain4)), columns=cattrain.columns
)
encode_data5 = pd.DataFrame(
np.round(imputer.fit_transform(cattrain5)), columns=cattrain.columns
)
cattrainfill = pd.concat(
[encode_data1, encode_data2, encode_data3, encode_data4, encode_data5]
)
cattestfill = pd.concat([encode_testdata1, encode_testdata2])
# impute numerical test data
encode_testdatanum = pd.DataFrame(
np.round(imputer.fit_transform(numtest1)), columns=numtest.columns
)
encode_testdatanum2 = pd.DataFrame(
np.round(imputer.fit_transform(numtest2)), columns=numtest.columns
)
# impute numerical train data
encode_datanum1 = pd.DataFrame(
np.round(imputer.fit_transform(numtrain1)), columns=numtrain.columns
)
encode_datanum2 = pd.DataFrame(
np.round(imputer.fit_transform(numtrain2)), columns=numtrain.columns
)
encode_datanum3 = pd.DataFrame(
np.round(imputer.fit_transform(numtrain3)), columns=numtrain.columns
)
encode_datanum4 = pd.DataFrame(
np.round(imputer.fit_transform(numtrain4)), columns=numtrain.columns
)
encode_datanum5 = pd.DataFrame(
np.round(imputer.fit_transform(numtrain5)), columns=numtrain.columns
)
numtrainfill = pd.concat(
[
encode_datanum1,
encode_datanum2,
encode_datanum3,
encode_datanum4,
encode_datanum5,
]
)
numtestfill = pd.concat([encode_testdatanum, encode_testdatanum2])
trainv6 = pd.concat([numtrainfill, cattrainfill], axis=1, join="inner")
y = trainv6["hospital_death"]
trainv7 = trainv6.drop(["hospital_death"], axis=1)
# Split into training and validation set
x_train, x_val, y_train, y_val = train_test_split(
trainv7, y, test_size=0.25, random_state=1
)
# Model building
d_train = lgb.Dataset(x_train, label=y_train)
params = {}
params["learning_rate"] = 0.003
params["boosting_type"] = "gbdt"
params["objective"] = "binary"
params["metric"] = "binary_logloss"
params["sub_feature"] = 0.5
params["num_leaves"] = 100
params["min_data"] = 50
params["max_depth"] = 10
clf = lgb.train(params, d_train, 100)
# Prediction
y_pred = clf.predict(x_val)
y_pred1 = np.round(y_pred)
# Measure accuracy
# Confusion matrix
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(y_val, y_pred1)
# Accuracy
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_pred1, y_val)
# Prediction on Test variables
pred_on_test = clf.predict(testv6)
solution_template.hospital_death = pred_on_test
solution_template.to_csv("submissionlgbm.csv", index=0)
|
import os
import json
import random
import collections
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
import seaborn as sns
def convert_image_id_2_path(image_id: str, is_train: bool = True) -> str:
folder = "train" if is_train else "test"
return "../input/g2net-gravitational-wave-detection/{}/{}/{}/{}/{}.npy".format(
folder, image_id[0], image_id[1], image_id[2], image_id
)
train_df = pd.read_csv(
"../input/g2net-gravitational-wave-detection/training_labels.csv"
)
train_df
sns.countplot(data=train_df, x="target")
def visualize_sample(
_id,
target,
colors=("black", "red", "green"),
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
):
path = convert_image_id_2_path(_id)
x = np.load(path)
plt.figure(figsize=(16, 7))
for i in range(3):
plt.subplot(4, 1, i + 1)
plt.plot(x[i], color=colors[i])
plt.legend([signal_names[i]], fontsize=12, loc="lower right")
plt.subplot(4, 1, 4)
plt.plot(x[i], color=colors[i])
plt.subplot(4, 1, 4)
plt.legend(signal_names, fontsize=12, loc="lower right")
plt.suptitle(f"id: {_id} target: {target}", fontsize=16)
plt.show()
for i in random.sample(train_df.index.tolist(), 3):
_id = train_df.iloc[i]["id"]
target = train_df.iloc[i]["target"]
visualize_sample(_id, target)
import librosa
import librosa.display
def visualize_sample_spectogram(
_id, target, signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo")
):
x = np.load(convert_image_id_2_path(_id))
plt.figure(figsize=(16, 5))
for i in range(3):
X = librosa.stft(x[i] / x[i].max())
Xdb = librosa.amplitude_to_db(abs(X))
plt.subplot(1, 3, i + 1)
librosa.display.specshow(
Xdb, sr=2048, x_axis="time", y_axis="hz", vmin=-30, vmax=50
)
plt.colorbar()
plt.title(signal_names[i], fontsize=14)
plt.suptitle(f"id: {_id} target: {target}", fontsize=16)
plt.show()
for i in random.sample(train_df.index.tolist(), 3):
_id = train_df.iloc[i]["id"]
target = train_df.iloc[i]["target"]
visualize_sample_spectogram(_id, target)
def visualize_sample_mfcc(
_id,
target,
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
sr=2048,
):
x = np.load(convert_image_id_2_path(_id))
plt.figure(figsize=(16, 5))
for i in range(3):
mfccs = librosa.feature.mfcc(x[i] / x[i].max(), sr=sr)
plt.subplot(1, 3, i + 1)
librosa.display.specshow(
mfccs, sr=sr, x_axis="time", vmin=-200, vmax=50, cmap="coolwarm"
)
plt.title(signal_names[i], fontsize=14)
plt.colorbar()
plt.suptitle(f"id: {_id} target: {target}", fontsize=16)
plt.show()
#!pip install pycbc -qq
# import pycbc.types
import torch
from nnAudio.Spectrogram import CQT1992v2
Q_TRANSFORM = CQT1992v2(sr=2048, fmin=20, fmax=1024, hop_length=32)
def visualize_sample_qtransform(
_id,
target,
signal_names=("LIGO Hanford", "LIGO Livingston", "Virgo"),
sr=2048,
):
x = np.load(convert_image_id_2_path(_id))
plt.figure(figsize=(16, 5))
for i in range(3):
waves = x[i] / np.max(x[i])
waves = torch.from_numpy(waves).float()
image = Q_TRANSFORM(waves)
plt.subplot(1, 3, i + 1)
plt.imshow(image.squeeze())
plt.title(signal_names[i], fontsize=14)
plt.suptitle(f"id: {_id} target: {target}", fontsize=16)
plt.show()
for i in random.sample(train_df.index.tolist(), 5):
_id = train_df.iloc[i]["id"]
target = train_df.iloc[i]["target"]
visualize_sample(_id, target)
visualize_sample_qtransform(_id, target)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from statsmodels.stats.outliers_influence import variance_inflation_factor
from sklearn.ensemble import (
RandomForestRegressor,
ExtraTreesRegressor,
GradientBoostingRegressor,
AdaBoostRegressor,
)
import xgboost as xgb
from sklearn.metrics import (
mean_absolute_error,
mean_squared_error,
r2_score,
mean_squared_log_error,
mean_absolute_percentage_error,
explained_variance_score,
)
from sklearn.linear_model import LinearRegression, LassoCV, RidgeCV
from sklearn.preprocessing import MinMaxScaler, PolynomialFeatures
from sklearn.model_selection import train_test_split, RepeatedKFold
from lightgbm import LGBMRegressor
from catboost import CatBoostRegressor
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=0.7) # tick size
import warnings
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Importing Data
sample = pd.read_csv("/kaggle/input/playground-series-s3e11/sample_submission.csv")
train = pd.read_csv("/kaggle/input/playground-series-s3e11/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e11/test.csv")
original = pd.read_csv("/kaggle/input/media-campaign-cost-prediction/train_dataset.csv")
train.drop("id", axis=1, inplace=True)
test.drop("id", axis=1, inplace=True)
train.head(10)
train.shape
original.head()
original.shape
# # Dataset Description
# Food Mart (CFM) is a chain of convenience stores in the United States. The private company's headquarters are located in Mentor, Ohio, and currently, approximately 325 stores are located in the US. Convenient Food Mart operates on the franchise system.
# Food Mart was the nation's third-largest chain of convenience stores as of 1988.
# Your Task is to devise a Machine Learning Model that helps us predict the cost of media campaigns in the food marts on the basis of the features provided.
# store_sales(in millions) - Items sold from a retail location (in million dollars) (Float)
# unit_sales(in millions) - total sales of a product in a given period (in millions) in stores Quantity (Float)
# Total_children - total children at home (Integer)
# avg_cars_at home(approx) - avg_cars_at home(approx) (Integer)
# num_children_at_home - num_children_at_home AS PER CUSTOMERS FILLED DETAILS (Integer)
# Gross_weight - gross_weight of items purchased (Float)
# Recyclable_package - FOOD ITEM IS recyclable_package (Binary)
# Low_fat - Food item is low on fat (Binary)
# Units_per_case - UNITS AVAILABLE IN EACH STORE SHELVES (Integer)
# Store_sqft - STORE AREA AVAILABLE IN SQFT (Float)
# Coffee_bar - COFFEE BAR available in store (Binary)
# Video_store - VIDEO STORE/gaming store available (Binary)
# Salad_bar - SALAD BAR available in store (Binary)
# Prepared_food - prepared food available in store (Binary)
# Florist - flower shelves available in store (Binary)
# Cost - COST ON ACQUIRING A CUSTOMERS in dollars (Float)
print(
"Memory usage of train DataFrame before optimization is {:.2f} MB.".format(
train.memory_usage().sum() / 1024**2
)
)
train.dtypes
original.dtypes
# # Data Preparation
train.rename(columns={"avg_cars_at home(approx).1": "avg_cars_at_home"}, inplace=True)
original.rename(
columns={"avg_cars_at home(approx).1": "avg_cars_at_home"}, inplace=True
)
test.rename(columns={"avg_cars_at home(approx).1": "avg_cars_at_home"}, inplace=True)
train.columns
# Number of unique values in each column
print("Number of unique values per column:\n", train.nunique())
# The data type of many columns take up too much memory, those needs to be reduced to a lower sized data type
train = train.astype(
{
"store_sales(in millions)": "float32",
"unit_sales(in millions)": "int8",
"total_children": "int8",
"num_children_at_home": "int8",
"avg_cars_at_home": "int8",
"gross_weight": "float32",
"recyclable_package": "float32",
"low_fat": "float32",
"units_per_case": "float32",
"store_sqft": "float64",
"coffee_bar": "float32",
"video_store": "float32",
"salad_bar": "float32",
"prepared_food": "float32",
"florist": "float32",
"cost": "float64",
}
)
original = original.astype(
{
"store_sales(in millions)": "float32",
"unit_sales(in millions)": "int8",
"total_children": "int8",
"num_children_at_home": "int8",
"avg_cars_at_home": "int8",
"gross_weight": "float32",
"recyclable_package": "float32",
"low_fat": "float32",
"units_per_case": "float32",
"store_sqft": "float64",
"coffee_bar": "float32",
"video_store": "float32",
"salad_bar": "float32",
"prepared_food": "float32",
"florist": "float32",
"cost": "float64",
}
)
print(
"Memory usage of train DataFrame after optimization is {:.2f} MB.".format(
train.memory_usage().sum() / 1024**2
)
)
# new data types
train.dtypes
print("Memory usage in KiloBytes:\n", train.memory_usage(deep=True) / 1024)
train.info()
original.info()
train.describe()
original.columns
fig, ax = plt.subplots(4, 4, figsize=(16, 10))
for ax, col in zip(ax.flat, test.columns.to_list()):
# getting the column names from test dataset because it doesn't have the target: cost.
# And also, we're not plotting the distribution of the target variable here.
sns.histplot(data=train, x=col, bins=80, kde=True, ax=ax)
sns.histplot(data=original, x=col, bins=80, kde=True, ax=ax, color="green")
sns.histplot(data=test, x=col, bins=80, kde=True, ax=ax, color="red")
fig.subplots_adjust(hspace=0.5, wspace=0.25)
fig.suptitle("Histogram plots of all numerical features")
# ### Distribution plot outcome:
# - No feature is normally distributed.
# - These features are categorical: ~~unit_sales(in millions),~~ `total_children, num_children_at_home, avg_cars_at_home, recyclable_package, low_fat, coffee_bar, video_store, salad_bar, prepared_food, florist.`
# - Rest are numeric.
# - Distribution of train and test datasets are almost equal. There's no distributional or covariate shift.
# ## Distribution plot of target feature
fig = plt.figure(figsize=(5, 5))
ax = sns.histplot(data=train, kde=True, x="cost")
ax = sns.histplot(data=original, kde=True, x="cost")
fig.suptitle("Histogram plots of the target feature: cost")
# # EDA
train.avg_cars_at_home.describe()
print(
"Number of customers having cars: {0}, out of {1} total customers.".format(
train[train.avg_cars_at_home > 0].avg_cars_at_home.count(), train.shape[0]
)
)
print(
"No. of customers according to available cars:",
train.groupby("avg_cars_at_home").cost.count().rename_axis([""]),
)
# What's the gross weight of purchased items for customers having cars?
train.groupby(["avg_cars_at_home"]).gross_weight.mean().round(2).rename_axis(
["Gross Weight per Available Cars"]
)
# # Combining both original and train dataset
train = pd.concat([train, original])
train.shape
# # Baseline Regression Model
X = train.iloc[:, :-1]
y = train.iloc[:, -1]
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=0, test_size=0.33
)
base_model_0 = RandomForestRegressor()
base_model_0.fit(X_train, y_train)
base_pred_0 = base_model_0.predict(X_train)
base_test_pred_0 = base_model_0.predict(X_test)
# MAE output is non-negative floating point. The best value is 0.0.
print(
"Train MAE: {:.2f}".format(mean_absolute_error(y_true=y_train, y_pred=base_pred_0))
)
print("Test MAE: {:.2f}".format(mean_absolute_error(y_test, base_test_pred_0)))
print(
"Test RMSLE: {}".format(
mean_squared_log_error(y_test, base_test_pred_0, squared=False)
)
)
# A non-negative floating point value (the best value is 0.0).
print(
"\nTrain RMSE: {:.2f}".format(
mean_squared_error(y_train, base_pred_0, squared=False)
)
)
print(
"Test RMSE: {:.2f}".format(
mean_squared_error(y_test, base_test_pred_0, squared=False)
)
)
# Best possible score is 1.0 and it can be negative
# (because the model can be arbitrarily worse).
print("\nTrain r2: {:.2f}".format(r2_score(y_train, base_pred_0)))
print("Test r2: {:.2f}".format(r2_score(y_test, base_test_pred_0)))
# MAPE output is non-negative floating point. The best value is 0.0.
# But note that bad predictions can lead to arbitrarily large MAPE values,
# especially if some y_true values are very close to zero.
print(
"\nTrain MAPE: {:.2f}".format(mean_absolute_percentage_error(y_train, base_pred_0))
)
print(
"Test MAPE: {:.2f}".format(mean_absolute_percentage_error(y_test, base_test_pred_0))
)
# Best possible score is 1.0, lower values are worse.
print(
"\nTrain Explained Variance: {:.2f}".format(
explained_variance_score(y_train, base_pred_0)
)
)
print(
"Test Explained Variance: {:.2f}".format(
explained_variance_score(y_test, base_test_pred_0)
)
)
# # Data Scaling and Feature Selection
scaler = MinMaxScaler()
scaler.fit(X_train)
X_train_scaled = pd.DataFrame(data=scaler.transform(X_train), columns=X_train.columns)
X_test_scaled = pd.DataFrame(data=scaler.transform(X_test), columns=X_test.columns)
plt.figure(figsize=(12, 10))
mask = np.triu(np.ones_like(train.corr().round(2), dtype=bool))
ax = sns.heatmap(
train.corr().round(2),
annot=True,
linewidths=0.1,
annot_kws={"size": 8},
cbar=True,
mask=mask,
cmap="YlGnBu",
)
ax.set(facecolor="white")
ax.set_title("Correlation Plot of All Numerical Variables", fontsize=14)
# ### Features having absolute correlation above or equal to 0.6:
matrix = train.corr()
matrix = matrix.unstack()
# Here, we're excluding the self correlated variables which always have 1 correlation
matrix = matrix[(abs(matrix) >= 0.6) & (abs(matrix) != 1)]
print(matrix)
# ## Check for Multicorrelated features with Variance Inflation Factor
# ### VIF Scores
# A VIF value of 1 indicates there is no correlation between a given explanatory variable and any other explanatory variables in the model.
# A VIF value between 1 and 5 indicates moderate correlation between a given explanatory variable and other explanatory variables in the model,
# but this is often not severe enough to require attention.
# A VIF value greater than 5 indicates potentially severe correlation between a given explanatory variable and other explanatory variables in the model. In this case, the coefficient estimates and p-values in the regression output are likely unreliable.
# #### We use `statsmodels.stats.outliers_influence.variance_inflation_factor`
#
# train_num = df.select_dtypes(include='number') # Entire dataset is numeric
vif_data = pd.DataFrame()
vif_data["column_name"] = train.columns
vif_data["VIF"] = [
variance_inflation_factor(train.values, i) for i in range(train.shape[1])
]
vif_data
# select columns with VIF > 5
large_VIF_list = list(vif_data[vif_data["VIF"] > 5].column_name)
print("Features to drop: ", len(large_VIF_list))
# drop columns with VIF values accordingly
df_VIF = train.copy()
df_VIF.drop(large_VIF_list, axis=1, inplace=True)
print(
"Left over features after applying VIF to remove multicollineared features: ",
df_VIF.columns.to_list(),
)
# # Apply Different Regression Models
# ### 1.1 LinearRegression
# With scaled data
linReg = LinearRegression()
linReg.fit(X_train_scaled, y_train)
linPred = linReg.predict(X_test_scaled)
print("RMSLE: {}".format(mean_squared_log_error(y_test, linPred, squared=False)))
# ### 1.2 LinearRegression with features left after applying VIF
# With the features left after applying VIF
X_train_vif = X_train_scaled[
[
"total_children",
"num_children_at_home",
"recyclable_package",
"low_fat",
"units_per_case",
"coffee_bar",
"video_store",
"florist",
]
]
X_test_vif = X_test_scaled[
[
"total_children",
"num_children_at_home",
"recyclable_package",
"low_fat",
"units_per_case",
"coffee_bar",
"video_store",
"florist",
]
]
linRegVIF = LinearRegression()
linRegVIF.fit(X_train_vif, y_train)
linPredVIF = linRegVIF.predict(X_test_vif)
print(mean_absolute_error(y_pred=linPredVIF, y_true=y_test))
print("RMSLE: {}".format(mean_squared_log_error(y_test, linPredVIF, squared=False)))
# *Even worse score!*
# ### 2. LassoCV
# lassoCV_model = LassoCV(alphas=np.arange(0, 1, 0.01))
# lassoCV_model.fit(X_train_scaled, y_train)
# lassoCV_pred = lassoCV_model.predict(X_test_scaled)
# print("MAE score: {:.2f}".format(mean_absolute_error(y_test, lassoCV_pred)))
# print("RMSLE: {}".format(mean_squared_log_error(y_test, lassoCV_pred, squared=False)))
# ### 3. RidgeCV
# ridgeCV_model = RidgeCV(alphas = [0.0001, 0.001,0.01, 0.1, 1, 10])
# ridgeCV_model.fit(X_train_scaled, y_train)
# ridgeCV_pred = ridgeCV_model.predict(X_test_scaled)
# print("The test score for RidgeCV model is: {:.2f}".format(mean_absolute_error(y_test, ridgeCV_pred)))
# print("RMSLE: {}".format(mean_squared_log_error(y_test, ridgeCV_pred, squared=False)))
# ### 4. ExtraTreesRegressor
# etr = ExtraTreesRegressor(random_state=0).fit(X_train, y_train) # unscaled data
# etr_pred = etr.predict(X_test)
# print("MAE: {:.2f}".format(mean_absolute_error(y_test, etr_pred)))
# print("RMSLE: {}".format(mean_squared_log_error(y_test, etr_pred, squared=False)))
# ### 5. GradientBoostingRegerssor
# gbr = ExtraTreesRegressor(random_state=0).fit(X_train, y_train) # unscaled data
# gbr_pred = gbr.predict(X_test)
# print("MAE: {:.2f}".format(mean_absolute_error(y_test, gbr_pred)))
# print("RMSLE: {}".format(mean_squared_log_error(y_test, gbr_pred, squared=False)))
# ### 6. XGBRegressor
xgbr = xgb.XGBRegressor().fit(X_train, y_train)
xgbr_pred = xgbr.predict(X_test)
print("MAE: {:.2f}".format(mean_absolute_error(y_test, xgbr_pred)))
print("r2: {:.2f}".format(r2_score(y_test, xgbr_pred)))
print("RMSLE: {}".format(mean_squared_log_error(y_test, xgbr_pred, squared=False)))
# ### 7. LGBMRegressor
lgbm = LGBMRegressor(learning_rate=0.09, max_depth=-5, random_state=0).fit(
X_train, y_train
)
lgbm_pred = lgbm.predict(X_test)
print("RMSLE: {}".format(mean_squared_log_error(y_test, lgbm_pred, squared=False)))
# ### _So far, XGBRegressor produces the best RMSLE score._
# # Final Prediction & submission
final_pred = xgbr.predict(test)
sample["cost"] = final_pred
sample.head()
sample.to_csv("submission.csv", index=False)
|
# Data Manipulation Libraries
import pandas as pd
import numpy as np
import csv
# EDA libraries
import matplotlib.pyplot as plt
import seaborn as sns
# Date libraries
import calendar
from datetime import datetime
# Jupyter Notebook Configuration
sns.set()
import warnings
warnings.filterwarnings("ignore")
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
ecom = pd.read_csv("/kaggle/input/ecommerce-data/data.csv", encoding="unicode_escape")
ecom.head()
ecom.shape
ecom.info()
ecom["InvoiceDate"] = pd.to_datetime(ecom["InvoiceDate"])
ecom.info()
# Numero de pedidos
ecom.InvoiceNo.nunique()
# total de compras
ecom.Quantity.sum()
|
# # Importing required libraries
import pandas as pd
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import numpy as np
import unicodedata
from textblob import TextBlob
import plotly.express as px
import contractions
import nltk
from time import time
from nltk.util import ngrams
import regex as re
import seaborn as sns
from tqdm.notebook import tqdm
import matplotlib.pyplot as plt
nltk.download("stopwords")
from nltk.stem import WordNetLemmatizer, SnowballStemmer
nltk.download("averaged_perceptron_tagger")
nltk.download("omw-1.4")
from nltk.corpus import stopwords
import multiprocessing
from imblearn.combine import SMOTETomek
from imblearn.under_sampling import TomekLinks
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score, confusion_matrix
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import MultinomialNB
from sklearn.svm import SVC
from xgboost import XGBClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import GridSearchCV
from gensim.models import Word2Vec, KeyedVectors
import warnings
warnings.filterwarnings("ignore")
# Importing the datasets
train = pd.read_csv(
"../input/poem-classification-nlp/Poem_classification - train_data.csv",
encoding="UTF-8",
)
test = pd.read_csv(
"../input/poem-classification-nlp/Poem_classification - test_data.csv",
encoding="UTF-8",
)
train.head()
train.info()
print(train.shape)
train = train.dropna(inplace=False)
train = train.drop_duplicates()
train = train.reset_index()
print(train.shape)
# There were a couple of NaN poem values but that's been dealt with.
print(test.shape)
test = test.dropna(inplace=False)
test = test.drop_duplicates()
test = test.reset_index()
print(test.shape)
train.Genre.value_counts()
train["Length"] = train["Poem"].apply(len)
test["Length"] = test["Poem"].apply(len)
fig = px.bar(train, x="Length", y="Genre", color="Genre")
fig.show()
# We observe that **'Affection'** is pretty low compared to the other three genres. However, the other three are pretty similar when frequency's concerned. This can be dealt with by adding synthetic points using **SMOTE** so as to have a balanced dataset in our hands.
# # Feature Extraction
def get_avg_word_len(x):
words = x.split()
word_len = 0
for word in words:
word_len = word_len + len(word)
return word_len / len(words)
def feature_extract(df, d):
stop = set(stopwords.words("english"))
df["count"] = df[d].apply(lambda x: len(str(x).split()))
df["char_count"] = df[d].apply(lambda x: len(x))
df["nwords"] = df[d].apply(lambda x: len(x.split(" ")))
# df['avg word_len'] = df[d].apply(lambda x:get_avg_word_len(x))
df["stop_words_len"] = df[d].apply(
lambda x: len([t for t in x.split() if t in stop])
)
df["polarity"] = df[d].map(lambda text: TextBlob(text).sentiment.polarity)
feature_extract(train, "Poem")
train.head()
feature_extract(test, "Poem")
test.head()
# Could think of only these features when extraction's concerned but suggestions to add more are welcome.
# # Frequency Distribution of POS Tags based on Genre
def countPosbyGenre(dataframe, pos):
genreDict = {}
for i in range(len(dataframe)):
if dataframe.Genre.iloc[i] not in genreDict.keys():
genreDict[dataframe.Genre.iloc[i]] = dataframe.Poem.iloc[i]
else:
genreDict[dataframe.Genre.iloc[i]] += dataframe.Poem.iloc[i]
for key in genreDict.keys():
genreDict[key] = nltk.pos_tag(genreDict[key].split())
counter = 0
for i in range(len(genreDict[key])):
if genreDict[key][i][1] == pos:
counter += 1
genreDict[key] = counter
return genreDict
tags = ["NN", "RB", "VB", "PRP"]
for tag in tags:
print(f"{tag} --> {countPosbyGenre(train, tag)}")
for tag in tags:
print(f"{tag} --> {countPosbyGenre(test, tag)}")
# Comapring just a couple of tags, we observe that:
# * Noun (NN) tag occurs more for the 'Affection' genre in Test set whereas it's the opposite scenario when it comes to Train set (which might affect while model building ig).
# * Adverbs (RB) are pretty low in comparison to the other tags which could be kind of expected when it comes to poems.
# * Even in Verbs (VB) and Pronouns (PRP) we do have a dissimilarity problem when it comes to comparing the Train and Test sets.
# # Text Preprocessing
def clean_it(df):
lemmatizer = WordNetLemmatizer()
snow = SnowballStemmer(language="english")
stop = set(stopwords.words("english"))
for i in tqdm(range(len(df))):
df["Poem"][i] = str(df["Poem"][i])
df["Poem"][i] = re.sub("\n", " ", df["Poem"][i])
df["Poem"][i] = re.sub("_", "", df["Poem"][i])
df["Poem"][i] = " ".join(df["Poem"][i].split())
df["Poem"][i] = re.sub(r"\d+", "", df["Poem"][i])
df["Poem"][i] = re.sub("[\(\[].*?[\)\]]", "", df["Poem"][i])
df["Poem"][i] = re.sub("[()]", "", df["Poem"][i])
df["Poem"][i] = df["Poem"][i].lower()
df["Poem"][i] = contractions.fix(df["Poem"][i])
df["Poem"][i] = re.sub(r"[^\w\s]", "", df["Poem"][i])
df["Poem"][i] = re.sub(" +", " ", df["Poem"][i])
t = ""
for word in df["Poem"][i].split(" "):
if word not in stop and len(word) > 2:
# t += ' ' + snow.stem(word)
t += " " + lemmatizer.lemmatize(word)
df["Poem"][i] = t.strip()
# Preferred lemmatization here instead of stemming as the former considers the context of the word rather than chopping off to just its root form. However, it takes longer time when it comes to lemmatization hence should be looked at when used for larger datasets.
# Other than that, fixed contractions and removed the stopwords which would otherwise prove to be a nuisance during the model building stage.
clean_it(train)
clean_it(test)
def generate_ngrams(text, ngram):
stop = set(stopwords.words("english"))
text = " ".join(word for word in text.split(" ") if word not in stop)
words = [word for word in text.split(" ")]
temp = zip(*[words[i:] for i in range(0, ngram)])
ans = [" ".join(ngram) for ngram in temp]
return ans
grams = []
for i in range(len(train)):
grams.extend(generate_ngrams(train["Poem"][i], 3))
fdist = nltk.FreqDist(grams)
fdist.plot(30)
# # Word Embeddings
# Not much here, really. Just wanted to look a for a paveway towards Information Retreival wherein if some query is provided, the model would provide similar documents as its output. However, my knowledge pertaining to Word2Vec isn't that far fetched hence had to settle with similar words and stuff.
cores = multiprocessing.cpu_count()
w2v_model = Word2Vec(
min_count=20,
window=2,
vector_size=300,
sample=6e-5,
alpha=0.03,
min_alpha=0.0007,
negative=20,
workers=cores - 1,
)
# Building the vocab
t = time()
w2v_model.build_vocab([g.split() for g in grams], progress_per=10000)
print("Time to build vocab: {} mins".format(round((time() - t) / 60, 2)))
len(w2v_model.wv.key_to_index.keys())
# Parameters of the training:
# * total_examples = int - Count of sentences
# * epochs = int - Number of iterations over the corpus
t = time()
w2v_model.train(
train.Poem, total_examples=w2v_model.corpus_count, epochs=30, report_delay=1
)
print("Time to train the model: {} mins".format(round((time() - t) / 60, 2)))
# As we do not plan to train the model any further, we are calling init_sims(), which will make the model much more memory-efficient
w2v_model.init_sims(replace=True)
# Saving the model
w2v_model.wv.save_word2vec_format("custom_glove_300d.txt")
# Loading it
w2v_model = KeyedVectors.load_word2vec_format("custom_glove_300d.txt")
# Most dissimilar terms
w2v_model.most_similar(negative=["blue"])
# Most similar terms
w2v_model.most_similar(positive=["dark"])
# Most similar to word
w2v_model.similar_by_word("child")
# # Topic Modelling with LDA
# The reason behind trying Topic Modelling out was to discover the themes that are inherently hidden by analyzing the words of the original texts.
stop = set(stopwords.words("english"))
vect = TfidfVectorizer(stop_words=stop, max_features=1000)
vect_text = vect.fit_transform(train["Poem"])
search_params = {"n_components": [0, 2, 4, 8, 12], "learning_decay": [0.5, 0.7]}
lda = LatentDirichletAllocation()
model = GridSearchCV(lda, param_grid=search_params)
model.fit(vect_text)
model.best_params_
n_topics = 4
model_lda = LatentDirichletAllocation(n_components=n_topics, random_state=0)
model_lda.fit(vect_text)
# Print the top 10 words per topic
n_words = 10
feature_names = vect.get_feature_names()
topic_list = []
for topic_idx, topic in enumerate(model_lda.components_):
top_n = [feature_names[i] for i in topic.argsort()[-n_words:]][::-1]
top_features = " ".join(top_n)
topic_list.append(f"topic_{'_'.join(top_n[:3])}")
print(f"Topic {topic_idx}: {top_features}")
print("\n\n\n")
topic_result = model_lda.transform(vect_text)
train["topic"] = topic_result.argmax(axis=1)
# These are the four different topics that the poems got classified with.
train
train[train.topic == 1]["Poem"]
train.head()
train.Genre.value_counts()
train.topic.value_counts()
# We notice a similar frequency distribution of Genre and topics.
# # Model Building
# # 1. **Naive Bayes**
sns.countplot(train["Genre"])
sns.countplot(test["Genre"])
# Encoding the **'Genre'** variable.
le = LabelEncoder()
train["Genre"] = le.fit_transform(train["Genre"])
test["Genre"] = le.transform(test["Genre"])
y_train = train.Genre.values
y_test = test.Genre.values
x_train = train.drop("Genre", axis=1)
x_test = test.drop("Genre", axis=1)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
tfidf_vect = TfidfVectorizer()
x_train_tfidf = tfidf_vect.fit_transform(x_train.Poem)
x_train_tfidf.shape
x_test_tfidf = tfidf_vect.transform(x_test.Poem)
# Using Multinomial Naive Bayes in this case.
clf = MultinomialNB().fit(x_train_tfidf, train.Genre)
predicted = clf.predict(x_test_tfidf)
np.mean(predicted == y_test)
# Time for Gaussian NB.
clf = GaussianNB().fit(x_train_tfidf.toarray(), train.Genre)
predicted = clf.predict(x_test_tfidf.toarray())
print(np.round(np.mean(predicted == y_test), 3))
# # 2. **Support Vector Classifier**
clf = SVC().fit(x_train_tfidf.toarray(), train.Genre)
predicted = clf.predict(x_test_tfidf.toarray())
np.mean(predicted == y_test)
# # 3. **xG Boost Classifier**
clf = XGBClassifier().fit(x_train_tfidf.toarray(), train.Genre)
predicted = clf.predict(x_test_tfidf.toarray())
np.mean(predicted == y_test)
# Out of all the ML models, xG boost fared out with 33 percent accuracy.
# # 4. **Using Tensorflow**
data = pd.concat([train, test], axis=0)
data = data.dropna()
poems = np.array(data["Poem"])
genres = np.array(data["Genre"])
poems.shape, genres.shape
train_poems = poems[:900]
test_poems = poems[900:]
train_genres = genres[:900]
test_genres = genres[900:]
tokenizer = Tokenizer(num_words=1000, oov_token="<OOV>")
tokenizer.fit_on_texts(train_poems)
word_index = tokenizer.word_index
training_sequences = tokenizer.texts_to_sequences(train_poems)
training_padded = pad_sequences(
training_sequences, maxlen=200, padding="post", truncating="post"
)
testing_sequences = tokenizer.texts_to_sequences(test_poems)
testing_padded = pad_sequences(
testing_sequences, maxlen=200, padding="post", truncating="post"
)
training_padded = np.array(training_padded)
train_genres = np.array(train_genres)
testing_padded = np.array(testing_padded)
test_genres = np.array(test_genres)
model = tf.keras.Sequential()
model.add(tf.keras.layers.Embedding(10000, 16, input_length=200))
model.add(tf.keras.layers.GlobalAveragePooling1D())
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(50, activation="relu"))
model.add(tf.keras.layers.Dense(25, activation="relu"))
model.add(tf.keras.layers.Dense(4, activation="softmax"))
opt = tf.keras.optimizers.Adam()
model.compile(
loss="sparse_categorical_crossentropy", optimizer=opt, metrics=["accuracy"]
)
model.summary()
num_epochs = 100
history = model.fit(
training_padded,
train_genres,
epochs=num_epochs,
validation_data=(testing_padded, test_genres),
verbose=2,
)
|
# ### Análise Exploratória dos Dados
#
# ##### Disponibilizados pelo AirBnb: http://insideairbnb.com/get-the-data.html
# Em um primeiro momento, irei realizar uma análise destes dados, para depois avançar na criação de um Modelo de previsão de preços de imóveis do AirBnb no Rio, de maneira semelhante como ja é feito na sugestão dos preços no próprio aplicativo, tentarei replicar esse algoritmo de uma forma diferente.
# ### Fonte dos dados
# ###### Overview do notebook:
# Este dataset possui em torno de 33.715 observações com 16 colunas, as categorias estão misturadas em textos e valores numéricos.
# ### Adquirindo os dados:
# ##### Importando as bibliotecas necessárias:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
airbnb = pd.read_csv("../input/airbnb-rio/Rio_listings.csv")
airbnb.head(10)
# checking amount of rows in given dataset to understand the size we are working with
len(airbnb)
# checking type of every column in the dataset
airbnb.dtypes
# ##### Neste momento, é necessário limpar estes dados.
# Verificar se há dados que precisam ser removidos do dataset
airbnb.isnull().sum()
# Neste momento, é necessário tomar a decisão se é necessário ou não manter algumas categorias.
# Tiramos estas variáveis:
# 'id','host_name','last_review','neighbourhood_group','reviews_per_month'
# ID e Host_name - Por questões de ética, não são necessárias nesse modelo, mesmo que poderíamos traçar um perfil e desenvolver um algoritmo para prever a chance da pessoa utilizar o aplicativo. Não é o nosso intuito.
# Last_review, neighbourhood_group e reviews_per_month - Irrelevância no modelo.
#
# Embora informações sobre qual categoria da vizinhança poderiam ser relevantes no modelo, em nosso dataset estes dados estão vazios.
# Excluindo as colunas:
airbnb.drop(
["id", "host_name", "last_review", "neighbourhood_group", "reviews_per_month"],
axis=1,
inplace=True,
)
# Verificando como ficou:
airbnb.head(5)
# examinando os valores únicos da vizinhança:
airbnb.neighbourhood.unique()
# Quais são os tipos de Acomodação?
airbnb.room_type.unique()
# ### Explorando de fato os dados
# Vamos entender qual é o High User, e demonstrar seu local na distribuição, além de entender cada uma das colunas e como elas podem nos ajudar na construção do modelo de previsão.
# Começaremos com host_id, descobrindo quantos hosts únicos temos, depois mostrar qual os maiores hosts.
top_host = airbnb.host_id.value_counts().head(10)
top_host
# coming back to our dataset we can confirm our fidnings with already existing column called 'calculated_host_listings_count'
top_host_check = airbnb.calculated_host_listings_count.max()
top_host_check
# setting figure size for future visualizations
sns.set(rc={"figure.figsize": (10, 8)})
viz_1 = top_host.plot(kind="bar")
viz_1.set_title("Hosts with the most listings in Rio")
viz_1.set_ylabel("Count of listings")
viz_1.set_xlabel("Host IDs")
viz_1.set_xticklabels(viz_1.get_xticklabels(), rotation=45)
# Boa distribuição, o maior Host possui mais de 200 acomodações.
# Vamos descobrir mais sobre as regiões: 'Copacabana', 'Ipanema', 'Barra da Tijuca', 'Flamengo', 'Leblon'
# Copacabana
sub_1 = airbnb.loc[airbnb["neighbourhood"] == "Copacabana"]
price_sub1 = sub_1[["price"]]
# Ipanema
sub_2 = airbnb.loc[airbnb["neighbourhood"] == "Ipanema"]
price_sub2 = sub_2[["price"]]
# Barra da Tijuca
sub_3 = airbnb.loc[airbnb["neighbourhood"] == "Barra da Tijuca"]
price_sub3 = sub_3[["price"]]
# Flamengo
sub_4 = airbnb.loc[airbnb["neighbourhood"] == "Flamengo"]
price_sub4 = sub_4[["price"]]
# Leblon
sub_5 = airbnb.loc[airbnb["neighbourhood"] == "Leblon"]
price_sub5 = sub_5[["price"]]
# putting all the prices' dfs in the list
price_list_by_n = [price_sub1, price_sub2, price_sub3, price_sub4, price_sub5]
# creating an empty list that we will append later with price distributions for each neighbourhood
p_l_b_n_2 = []
# creating list with known values in neighbourhood_group column
nei_list = ["Copacabana", "Ipanema", "Barra da Tijuca", "Flamengo", "Leblon"]
# creating a for loop to get statistics for price ranges and append it to our empty list
for x in price_list_by_n:
i = x.describe(percentiles=[0.25, 0.50, 0.75])
i = i.iloc[3:]
i.reset_index(inplace=True)
i.rename(columns={"index": "Stats"}, inplace=True)
p_l_b_n_2.append(i)
# changing names of the price column to the area name for easier reading of the table
p_l_b_n_2[0].rename(columns={"price": nei_list[0]}, inplace=True)
p_l_b_n_2[1].rename(columns={"price": nei_list[1]}, inplace=True)
p_l_b_n_2[2].rename(columns={"price": nei_list[2]}, inplace=True)
p_l_b_n_2[3].rename(columns={"price": nei_list[3]}, inplace=True)
p_l_b_n_2[4].rename(columns={"price": nei_list[4]}, inplace=True)
# finilizing our dataframe for final view
stat_df = p_l_b_n_2
stat_df = [df.set_index("Stats") for df in stat_df]
stat_df = stat_df[0].join(stat_df[1:])
stat_df
# we can see from our statistical table that we have some extreme values, therefore we need to remove them for the sake of a better visualization
# creating a sub-dataframe with no extreme values / less than 500
sub_6 = airbnb[airbnb.price < 500]
# using violinplot to showcase density and distribtuion of prices
viz_2 = sns.violinplot(data=sub_6, x="neighbourhood", y="price")
viz_2.set_title("Density and distribution of prices for each neighberhood")
# ##### Tentar entender isso:
# Great, with a statistical table and a violin plot we can definitely observe a couple of things about distribution of prices for Airbnb in NYC boroughs. First, we can state that Manhattan has the highest range of prices for the listings with \\$150 price as average observation, followed by Brooklyn with \\$90 per night. Queens and Staten Island appear to have very similar distributions, Bronx is the cheapest of them all. This distribution and density of prices were completely expected; for example, as it is no secret that Manhattan is one of the most expensive places in the world to live in, where Bronx on other hand appears to have lower standards of living.
# as we saw earlier from unique values for neighbourhood there are way too many to concentrate on;
# therefore, let's grab just top 10 neighbourhoods that have the most listings in them
# finding out top 10 neighbourhoods
airbnb.neighbourhood.value_counts().head(10)
# let's now combine this with our boroughs and room type for a rich visualization we can make
# grabbing top 10 neighbourhoods for sub-dataframe
sub_7 = airbnb.loc[
airbnb["neighbourhood"].isin(
[
"Copacabana",
"Barra da Tijuca",
"Ipanema",
"Jacarepaguá",
"Botafogo ",
"Recreio dos Bandeirantes",
"Leblon",
"Santa Teresa",
"Centro",
"Flamengo",
]
)
]
# using catplot to represent multiple interesting attributes together and a count
viz_3 = sns.catplot(
x="neighbourhood", hue="neighbourhood", col="room_type", data=sub_7, kind="count"
)
viz_3.set_xticklabels(rotation=90)
# let's what we can do with our given longtitude and latitude columns
# let's see how scatterplot will come out
viz_4 = sub_6.plot(
kind="scatter",
x="longitude",
y="latitude",
label="availability_365",
c="price",
cmap=plt.get_cmap("jet"),
colorbar=True,
alpha=0.4,
figsize=(10, 8),
)
viz_4.legend()
# Good, scatterplot worked just fine to output our latitude and longitude points. However, it would be nice to have a map bellow for fully immersive heatmap in ourcase - let's see what we can do!
print(airbnb["latitude"].min)
import folium
from folium.plugins import HeatMap
m = folium.Map([-22.96592, -43.17896], zoom_start=11)
HeatMap(
airbnb[["latitude", "longitude"]].dropna(),
radius=8,
gradient={0.2: "blue", 0.4: "purple", 0.6: "orange", 1.0: "red"},
).add_to(m)
display(m)
# let's comeback now to the 'name' column as it will require litte bit more coding and continue to analyze it!
# initializing empty list where we are going to put our name strings
_names_ = []
# getting name strings from the column and appending it to the list
for name in airbnb.name:
_names_.append(name)
# setting a function that will split those name strings into separate words
def split_name(name):
spl = str(name).split()
return spl
# initializing empty list where we are going to have words counted
_names_for_count_ = []
# getting name string from our list and using split function, later appending to list above
for x in _names_:
for word in split_name(x):
word = word.lower()
_names_for_count_.append(word)
# we are going to use counter
from collections import Counter
# let's see top 25 used words by host to name their listing
_top_25_w = Counter(_names_for_count_).most_common()
_top_25_w = _top_25_w[0:25]
# now let's put our findings in dataframe for further visualizations
sub_w = pd.DataFrame(_top_25_w)
sub_w.rename(columns={0: "Words", 1: "Count"}, inplace=True)
# we are going to use barplot for this visualization
viz_5 = sns.barplot(x="Words", y="Count", data=sub_w)
viz_5.set_title("Counts of the top 25 used words for listing names")
viz_5.set_ylabel("Count of words")
viz_5.set_xlabel("Words")
viz_5.set_xticklabels(viz_5.get_xticklabels(), rotation=80)
# Palavras-chave em inglês podem significar a procura por turismo internacional, principalmente nas praias.
# last column we need to look at is 'number_of_reviews'
# let's grab 10 most reviewed listings in NYC
top_reviewed_listings = airbnb.nlargest(10, "number_of_reviews")
top_reviewed_listings
price_avrg = top_reviewed_listings.price.mean()
print("Average price per night: {}".format(price_avrg))
|
import pandas as pd, numpy as np, gc
from sklearn.model_selection import KFold, GroupKFold
from xgboost import XGBClassifier
from sklearn.metrics import f1_score
import jo_wilder
# tmp = pd.read_csv('/kaggle/input/predict-student-performance-from-game-play/train.csv', usecols=[0])
# tmp = tmp.groupby('session_id')['session_id'].agg('count')
env = jo_wilder.make_env()
iter_test = env.iter_test()
for i in range(10):
test, ss = next(iter_test)
print(f"Submission batch {i}")
print("Session ids in test data:", test["session_id"].unique())
print("Game levels in test data", test["level"].unique())
print(
"Session ids in sample submission",
ss["session_id"].apply(lambda x: x.split("_")[0]).astype("int64").unique(),
)
print(
"Questions in sample submission",
ss["session_id"].apply(lambda x: x.split("_")[-1][1:]).astype("int8").unique(),
)
print()
env.predict(ss) # Need to submit before loading next batch of test data
test_env.predict(ss)
test, ss = next(iter_test)
test["session_id"].unique(), test["level"].unique()
ss["session_id"].apply(lambda x: x.split("_")[-1][1:]).astype("int8")
ss["session_id"].apply(lambda x: x.split("_")[0]).astype("int64")
PIECES = 10
CHUNK = int(np.ceil(len(tmp) / PIECES))
reads = []
skips = [0]
train_labels = pd.read_csv(
"/kaggle/input/predict-student-performance-from-game-play/train_labels.csv"
)
train_labels["q"] = (
train_labels["session_id"].apply(lambda s: s.split("_")[-1][1:]).astype("int8")
)
train_labels["session_id"] = (
train_labels["session_id"].apply(lambda s: s.split("_")[0]).astype("int64")
)
train_labels.groupby("session_id")["q"].nunique()
session_id = 20090312431273200
train_labels.loc[train_labels["session_id"] == session_id]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/cancer-patients-and-air-pollution-a-new-link/cancer patient data sets.csv"
)
df.info()
df["Level"].value_counts()
df.columns
X = df.drop(["index", "Patient Id", "Level"], axis=1).values
y = df["Level"]
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
encoder.fit(list(y))
y = encoder.fit_transform(y)
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=1 / 3, random_state=3
)
# ### KNN.
from sklearn.neighbors import KNeighborsClassifier
KNN = KNeighborsClassifier(n_neighbors=5).fit(x_train, y_train)
y_pred = KNN.predict(x_test)
from sklearn.metrics import accuracy_score
acc = accuracy_score(y_test, y_pred)
acc
|
# # Word and line count per character in ST:TNG
# To start, let's have a look at the most lines delivered by each character in Star Trek: The Next Generation (TNG), using a dataset that has already grouped lines by character for every episode. We'll see that this dataset is unreliable and leaves out key characters.
# Let's start with our basic imports for numpy and pandas.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Functions.
# We will reuse these functions to show the difference between two datasets.
# Takes two variables as input:
# - series_lines: all lines in a series, grouped by episode
# - use_episode_name_as_index: True if series_lines uses 'episode 0' as the indexing convention, False if it uses numbers
# Returns two variables:
# - total WORD count by character
# - total LINE count by character
def word_line_counts(series_lines, use_episode_name_as_index):
total_word_counts = {}
total_line_counts = {}
for i, ep in enumerate(episodes):
# Different datasets require different index types
if use_episode_name_as_index:
index = ep
else:
index = i
if series_lines[index] is not np.NaN:
for member in list(series_lines[index].keys()):
total_words_by_member_in_ep = sum(
[len(line.split()) for line in series_lines[index][member]]
)
total_lines_by_member_in_ep = len(series_lines[index][member])
if member in total_word_counts.keys():
total_word_counts[member] = (
total_word_counts[member] + total_words_by_member_in_ep
)
total_line_counts[member] = (
total_line_counts[member] + total_lines_by_member_in_ep
)
else:
total_word_counts[member] = total_words_by_member_in_ep
total_line_counts[member] = total_lines_by_member_in_ep
return total_word_counts, total_line_counts
# Creates two graphs:
# - a bar chart showing most WORDS spoken by the top 25 characters
# - a bar chart showing most LINES spoken by the top 25 characters
def graph_word_line_counts(word_counts, line_counts):
# Place the figures side-by-side
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 5))
# Plot number of words
words_df = pd.DataFrame(
list(word_counts.items()), columns=["Character", "No. of Words"]
)
most_words = words_df.sort_values(by="No. of Words", ascending=False).head(25)
most_words.plot.bar(x="Character", y="No. of Words", ax=ax1)
# Plot number of lines
lines_df = pd.DataFrame(
list(line_counts.items()), columns=["Character", "No. of Lines"]
)
most_lines = lines_df.sort_values(by="No. of Lines", ascending=False).head(25)
most_lines.plot.bar(x="Character", y="No. of Lines", ax=ax2)
plt.show()
# ## Import data.
# This dataset contains every series of Star Trek. We will only import the data for TNG.
# Dataset where all lines are grouped into episodes, and then grouped by character
all_series_lines = pd.read_json("../input/start-trek-scripts/all_series_lines.json")
# Isolate TNG episodes and lines
episodes = all_series_lines["TNG"].keys()
series_lines = all_series_lines["TNG"]
# ## Graph data.
# Now we can use our functions to graph the data!
word_counts, line_counts = word_line_counts(series_lines, True)
graph_word_line_counts(word_counts, line_counts)
# ## Analyze data.
# There is a popular recurring character in The Next Generation, Q, that is conspicuously missing from the above graphs. Manually looking at the data, there are several lines ending in "Q". There must have been a data-cleaning error, where some character's names end up at the end of another character's spoken lines, thus erasing those lines from the dataset entirely.
# Let's fetch some examples of such dirty data.
already_seen_dirty = []
for i, ep in enumerate(episodes):
if series_lines[ep] is not np.NaN:
for member in list(series_lines[ep].keys()):
for line in series_lines[ep][member]:
# If the line ends in an uppercase letter, we can assume it's dirty
if (
len(line) > 0
and line[-1].isupper()
and line[-1] not in already_seen_dirty
):
already_seen_dirty.append(line[-1])
print(line)
# There are lots of different characters losing out on line counts! Out of curiousity, how many lines in this dataset are dirty?
total_line_count = 0
dirty_line_count = 0
for i, ep in enumerate(episodes):
if series_lines[ep] is not np.NaN:
for member in list(series_lines[ep].keys()):
for line in series_lines[ep][member]:
total_line_count += 1
# If the line ends in an uppercase letter, we can assume it's dirty
if len(line) > 0 and line[-1].isupper():
dirty_line_count += 1
print(f"Total lines: {total_line_count}")
print(f"Dirty lines: {dirty_line_count}")
print(f"Percent dirty: {round((dirty_line_count / total_line_count)*100, 3)}%")
# # Processing the data
# Let's process the raw data ourselves, to ensure we get an accurate count.
# ## Import raw data.
# Dataset where lines are grouped into episodes, and all lines are stored in raw text
all_scripts_raw = pd.read_json("../input/start-trek-scripts/all_scripts_raw.json")
# Isolate TNG episodes and lines
episodes = all_scripts_raw["TNG"].keys()
series_lines_raw = all_scripts_raw["TNG"]
# ## Clean data.
import re
def clean_episode(episode_text):
# Remove text at the beginning of each episode
cleaned = re.sub(
"(The Next Generation Transcripts(.|\n)*?)([A-Z']+:+)", r"\3", episode_text
)
# Remove text at the end of each episode
cleaned = re.sub("<Back(.|\n)*", "", cleaned)
# Remove instances of " [OC]", denoting when a character is speaking off-camera.
cleaned = cleaned.replace(" [OC]", "")
# Remove any descriptions of the scene, which are surrounded by [] and () parentheses.
cleaned = re.sub(" ?[\(\[](.|\n)*?[\)\]]", "", cleaned)
# Remove non-breaking space characters
cleaned = cleaned.replace("\u00a0", "")
# Remove multiple instances of '\n'. Could be done better but I'm tired right now.
cleaned = cleaned.replace("\n \n", "\n")
cleaned = cleaned.replace("\n\n", "\n")
cleaned = cleaned.replace("\n\n", "\n")
cleaned = cleaned.replace("\n\n", "\n")
# Remove multiple instances of ':'.
cleaned = cleaned.replace("::", ":")
# Remove lines starting with "Captain's log", since they do not include data on which character spoke them.
cleaned = re.sub(
"((Captain's log|Ship's log|First [Oo]fficer's log)(.|\n)*?)([A-Z']+:)",
r"\4",
cleaned,
)
# Remove newlines that are in the middle of a spoken line
cleaned = re.sub("(?<! )\n(?!([A-Z']*?:))", " ", cleaned)
# Remove any leading and trailing spaces/newlines
cleaned = cleaned.strip()
return cleaned
# ### Before and after cleaning.
cleaned_episode_example = clean_episode(series_lines_raw[1])
print("★★★★★ ORIGINAL ★★★★★")
print(series_lines_raw[1][0:850])
print("\n★★★★★ CLEANED ★★★★★")
print(cleaned_episode_example[0:301])
series_lines_clean = []
for i, ep in enumerate(episodes):
series_lines_clean.append(clean_episode(series_lines_raw[i]))
# ## Group lines by character.
def group_by_character(episode_text):
lines_by_character = {}
split_lines = episode_text.split("\n")
for line in split_lines:
name = re.search("([A-Z']+)(?=:+)", line)
words = re.search("(?<=:)(.*)", line)
if name is not None:
name = name.group(0).strip()
words = words.group(0).strip()
if name in lines_by_character.keys():
lines_by_character[name].append(words)
else:
lines_by_character[name] = [words]
return lines_by_character
series_lines_by_character = []
for i, ep in enumerate(episodes):
series_lines_by_character.append(group_by_character(series_lines_clean[i]))
# ## Graph the cleaned data.
clean_word_counts, clean_line_counts = word_line_counts(
series_lines_by_character, False
)
graph_word_line_counts(clean_word_counts, clean_line_counts)
# ## Analyze graphs.
# As expected, Q now shows up on the graphs. Ro has also appeared! Looking at the two
print("Original graphs")
word_counts, line_counts = word_line_counts(series_lines, True)
graph_word_line_counts(word_counts, line_counts)
print("Cleaned graphs")
clean_word_counts, clean_line_counts = word_line_counts(
series_lines_by_character, False
)
graph_word_line_counts(clean_word_counts, clean_line_counts)
|
import numpy as np
import keras
import pandas as pd
from keras.models import Sequential
from keras.layers import (
Dense,
Conv2D,
BatchNormalization,
Dropout,
MaxPool2D,
Input,
Softmax,
Activation,
Flatten,
)
from keras.models import Model
from keras import optimizers
from keras.utils.np_utils import to_categorical
from keras.layers import concatenate, AveragePooling2D
from keras.callbacks import (
ModelCheckpoint,
CSVLogger,
LearningRateScheduler,
ReduceLROnPlateau,
)
from keras.regularizers import l2
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import preprocess_input, decode_predictions
from keras.preprocessing import image
from keras.preprocessing.image import ImageDataGenerator, load_img, img_to_array
from keras.layers import Input
import os
import cv2
import scipy
import skimage
from keras.callbacks import ModelCheckpoint
from keras.utils.vis_utils import plot_model
import matplotlib.pyplot as plt
from glob import glob
# # Display Some Example Images
from pathlib import Path
import random
train_path = "../input/blood-cells/dataset2-master/dataset2-master/images/TRAIN/"
test_path = "../input/blood-cells/dataset2-master/dataset2-master/images/TEST/"
classes = []
for cell in os.listdir(train_path):
classes.append(cell)
class_to_ind = dict(zip(classes, range(len(classes))))
ind_to_class = dict(zip(range(len(classes)), classes))
def load_sample_imgs(path):
rows = 10
cols = 5
sorted_dirs = sorted(os.listdir(path))
fig, axes = plt.subplots(rows, cols, figsize=(30, 10))
class_arr = ["MONOCYTE", "EOSINOPHIL", "NEUTROPHIL", "LYMPHOCYTE"]
for i in range(rows):
for j in range(cols):
cell = random.choice(class_arr)
all_files = os.listdir(path + "/" + cell)
rand_img = random.choice(all_files)
img = plt.imread(path + "/" + cell + "/" + rand_img)
axes[i][j].imshow(img)
ec = (0, 0.6, 0.1)
fc = (0, 0.7, 0.2)
axes[i][j].text(
0,
-20,
cell,
size=10,
rotation=0,
ha="left",
va="top",
bbox=dict(boxstyle="round", ec=ec, fc=fc),
)
plt.setp(axes, xticks=[], yticks=[])
plt.tight_layout(rect=[0, 0.03, 1, 0.95])
load_sample_imgs(train_path)
print(class_to_ind)
print(ind_to_class)
# # Load Test and Train Data
def get_data(path):
X = []
y = []
for cell in class_to_ind:
for image_name in os.listdir(path + "/" + cell):
img_file = cv2.imread(path + "/" + cell + "/" + image_name)
if img_file is not None:
img_file = cv2.resize(img_file, (60, 80))
img = np.asarray(img_file)
X.append(img)
y.append(class_to_ind[cell])
X = np.asarray(X)
y = np.asarray(y)
return X, y
X_train, y_train = get_data(train_path)
X_test, y_test = get_data(test_path)
print("done loading data")
print(X_train.shape)
# # Encode values to one-hot vectors
y_train_cat = to_categorical(y_train, num_classes=4)
y_test_cat = to_categorical(y_test, num_classes=4)
print(y_train_cat.shape)
print(y_test_cat.shape)
# # Augment Data
train_datagen = ImageDataGenerator(
featurewise_center=False, # set input mean to 0 over the dataset
samplewise_center=False, # set each sample mean to 0
featurewise_std_normalization=False, # divide inputs by std of the dataset
samplewise_std_normalization=False, # divide each input by its std
zca_whitening=False, # apply ZCA whitening
rotation_range=0, # randomly rotate images in the range (degrees, 0 to 180)
width_shift_range=0.2, # randomly shift images horizontally (fraction of total width)
height_shift_range=0.2, # randomly shift images vertically (fraction of total height)
horizontal_flip=True, # randomly flip images
vertical_flip=False, # randomly flip images"
zoom_range=[0.8, 1],
channel_shift_range=30,
fill_mode="reflect",
)
train_generator = train_datagen.flow(X_train, y_train_cat, batch_size=32)
test_datagen = ImageDataGenerator()
test_generator = test_datagen.flow(X_test, y_test_cat, batch_size=32)
# # Create Model
inp = Input(shape=(80, 60, 3))
k = BatchNormalization()(inp)
k = Conv2D(32, (7, 7), padding="same", activation="relu", strides=(2, 2))(k)
k = MaxPool2D(pool_size=(3, 3), padding="same", strides=(2, 2))(k)
k = Conv2D(32, (3, 3), padding="same", activation="relu", strides=(1, 1))(k)
k = MaxPool2D(pool_size=(3, 3), padding="same", strides=(2, 2))(k)
for j in range(1, 4 + 1):
out_conv = []
for i in [(1, 1), (3, 3), (5, 5), (0, 0)]:
p = k
if i == (1, 1):
p = Conv2D(32, (1, 1), padding="same", activation="relu")(p)
out_conv.append(Conv2D(32, (1, 1), padding="same", activation="relu")(p))
elif i == (0, 0):
p = MaxPool2D(pool_size=(2, 2), padding="same", strides=(1, 1))(p)
out_conv.append(Conv2D(32, (1, 1), padding="same", activation="relu")(p))
else:
p = Conv2D(32, (1, 1), padding="same", activation="relu")(p)
p = Conv2D(32, i, padding="same", activation="relu")(p)
out_conv.append(Conv2D(32, i, padding="same", activation="relu")(p))
x = concatenate(out_conv, axis=-1)
# if j%2 == 0:
# x = MaxPool2D(pool_size=(3, 3), padding="same",strides=(2,2))(x)
# x = BatchNormalization(axis=-1)(x)
k = x
# x = Dropout(0.5)(k)
x = MaxPool2D(pool_size=(7, 7), padding="same", strides=(2, 2))(x)
x = Flatten()(x)
# x = Dense(1024,activation="relu")(x)
# x = BatchNormalization()(x)
# x = Dropout(0.5)(x)
y = Dense(4, activation="softmax")(x)
# z = Dense(2,activation="softmax")(x)
model = Model(inp, y)
opt = optimizers.Adam(lr=0.01, decay=0.0001)
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
print(model.summary())
# # Train Model
history = model.fit(
X_train, y_train_cat, batch_size=32, epochs=5, validation_data=(X_test, y_test_cat)
)
y_test_pred = model.evaluate(X_test, y_test_cat, verbose=1)
# # Prediction Loss and Accuracy
print(y_test_pred)
# # Make Some Predictions
for cell in class_to_ind:
for i in range(5):
image_arr = os.listdir(test_path + "/" + cell)
random_img = random.choice(image_arr)
img_file = cv2.imread(test_path + "/" + cell + "/" + random_img)
if img_file is not None:
img_file = cv2.resize(img_file, (60, 80))
img = np.asarray(img_file)
X = []
X.append(img)
X = np.asarray(X)
cell_pred = model.predict(X)
cell_top_pred = np.argmax(cell_pred, axis=1)
print("Current cell: " + cell)
print("Current Prediction: " + ind_to_class[cell_top_pred[0]])
|
# # MNIST Exploratory Data Analysis (EDA)
# In this notebook, we explore the MNIST data set to gain a better understanding of it.
# ## Import packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Define constants
IMAGE_SIZE = (28, 28)
# ## Load data
# We load both the train set and test set as Pandas data frames.
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
train_features = train_data.drop("label", axis=1)
train_label = train_data["label"]
# ### Train data set
train_data
train_data.describe()
# ### Test data set
test_data
test_data.describe()
# ## Check for missing values
# There are no missing values for either the train or test set.
train_data.isna().sum().sum()
test_data.isna().sum().sum()
# ## Check for categorical variables
# There are no categorical variables in our data set.
train_data.dtypes
test_data.dtypes
# ## Plot label distribution
# From our label distribution, we see that our data set is balanced since the labels share similar frequencies.
plt.hist(train_label)
plt.title("Digit Label Distribution")
plt.xlabel("Label")
plt.ylabel("Frequency")
plt.show()
# ## Plot examples
# ### Example image from train set
def plot_train_example(idx):
example = train_features.loc[idx, :]
example = np.array(example)
example = np.reshape(example, IMAGE_SIZE)
example_plot = plt.imshow(example, plt.cm.gray)
plt.colorbar()
fig = plt.figure(figsize=(18, 6), dpi=80)
for i in range(0, 10):
plt.subplot(2, 5, i + 1)
plot_train_example(i)
fig.suptitle("(a)", fontsize="xx-large")
plt.savefig("train_sample.png", dpi=fig.dpi)
plt.show()
# ## Example image from test set
def plot_test_example(idx):
example = test_data.loc[idx, :]
example = np.array(example)
example = np.reshape(example, IMAGE_SIZE)
example_plot = plt.imshow(example, plt.cm.gray)
plt.colorbar()
fig = plt.figure(figsize=(18, 6), dpi=80)
for i in range(0, 10):
plt.subplot(2, 5, i + 1)
plot_test_example(i)
fig.suptitle("(b)", fontsize="xx-large")
plt.savefig("test_sample.png", dpi=fig.dpi)
plt.show()
# ## Plot Pixel Value Histogram
# From the histogram, we see that the images mostly consist of 0s (long tail distribution).
fig = plt.figure()
plt.hist(train_features.values.flatten(), log=True, bins=255)
plt.title("(a) Train Set Pixel Value Distribution")
plt.xlabel("Pixel Value")
plt.ylabel("Frequency")
plt.savefig("pixel-distribution-train.png", dpi=fig.dpi)
plt.show()
fig = plt.figure()
plt.hist(train_features.values.flatten(), log=True, bins=255)
plt.title("(b) Test Set Pixel Value Distribution")
plt.xlabel("Pixel Value")
plt.ylabel("Frequency")
plt.savefig("pixel-distribution-test.png", dpi=fig.dpi)
plt.show()
fig = plt.figure()
plt.hist(
np.concatenate((train_features.values.flatten(), test_data.values.flatten())),
log=True,
bins=255,
)
plt.title("(c) Total Pixel Value Distribution")
plt.xlabel("Pixel Value")
plt.ylabel("Frequency")
plt.savefig("pixel-distribution-total.png", dpi=fig.dpi)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import pickle
import matplotlib.pylab as plt
import time
import json
import keras
from keras.models import Model, Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.layers.convolutional import Conv2D
from keras.layers.pooling import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D
from keras.layers import Input, Concatenate, Flatten, UpSampling2D
from keras.layers.normalization import BatchNormalization
from keras.regularizers import l2
from keras.preprocessing.image import load_img
import keras.backend as K
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
from keras.utils import np_utils
from keras.utils import plot_model
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import GridSearchCV
from keras.callbacks import LearningRateScheduler, EarlyStopping, ModelCheckpoint
from keras_applications.resnext import ResNeXt50
from keras_applications.resnet import ResNet50
from keras.initializers import VarianceScaling
from sklearn.metrics import accuracy_score, precision_score, f1_score, recall_score
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
fl = os.path.join(dirname, filename)
# if fl != '/kaggle/working/__notebook_source__.ipynb':
print(os.path.join(dirname, filename))
# os.remove(fl)
# Any results you write to the current directory are saved as output.
def get_data(trainPath, testPath):
"""Cette fonction retourne les données d'apprentissage et de text
params:
---> trainPath : chemin de la directory des images d'apprentissage
---> trainPath : chemin de la directory des images de test
retour :
---> trainGen : générateur d'image d'apprentissage
---> trainGen : générateur d'image de test
---> train_x : tableau d'image d'apprentissage
---> train_y: tableau des classes d'apprentissage
---> test_x : tableau d'image de test
---> test_y : tableau des classes de test
"""
# instancier un objet ImageDataGenerator pou l'augmentation des donnees train
trainAug = ImageDataGenerator(
rescale=1.0 / 255, horizontal_flip=True, fill_mode="nearest"
)
testAug = ImageDataGenerator(rescale=1.0 / 255)
# definir la moyenne des images ImageNet par plan RGB pour normaliser les images de la base AFF20
mean = np.array([123.68, 116.779, 103.939], dtype="float32") / 255
trainAug.mean = mean
testAug.mean = mean
# initialiser le generateur de train
trainGen = trainAug.flow_from_directory(
trainPath,
class_mode="categorical",
target_size=(224, 224),
color_mode="rgb",
shuffle=True,
batch_size=16,
)
# initialiser le generateur de test
testGen = testAug.flow_from_directory(
testPath,
class_mode="categorical",
target_size=(224, 224),
color_mode="rgb",
shuffle=False,
batch_size=16,
)
# Lire les données sous forme de tableaux numpy, pour l'évalusation
# puisque la fonction fit de la class gridsearchcv prend en paramétre des
# tableaux et non pas des générateur.
# pour cette partie on peut bien lire la base de données manuelement (des boucle for)
# mais dans ce cas on fera l'évaluation avec des données non augmenter, et l'apprentissage
# avec des données augmenter. pour cela on extrait les tableaux à partir des générateur eux même.
# c'est aussi plus rapide que d'utiliser des boucles.
# les dimension des deux bases
n_train = trainGen.samples
n_test = testGen.samples
# initialiser le generateur de train
trainGen_tmp = trainAug.flow_from_directory(
trainPath,
class_mode="categorical",
target_size=(224, 224),
color_mode="rgb",
shuffle=True,
batch_size=n_train,
)
# initialiser le generateur de test
testGen_tmp = testAug.flow_from_directory(
testPath,
class_mode="categorical",
target_size=(224, 224),
color_mode="rgb",
shuffle=False,
batch_size=n_test,
)
train_x = trainGen_tmp.next()[0]
train_y = trainGen_tmp.next()[1]
test_x = testGen_tmp.next()[0]
test_y = testGen_tmp.next()[1]
print("x_train_shape:", train_x.shape)
print("y_train_shape:", test_y.shape)
print("x_test_shape:", test_x.shape)
print("y_test_shape:", test_y.shape)
return trainGen, testGen, train_x, train_y, test_x, test_y
trainPath = "/kaggle/input/db-inf907/AFF20_crops/AFF20_crops/AFF20_crops_train/"
testPath = "/kaggle/input/db-inf907/AFF20_crops/AFF20_crops/AFF20_crops_test/"
trainGen, testGen, train_x, train_y, test_x, test_y = get_data(trainPath, testPath)
# Un réseau en couches séquentielles
Auto_encoder = Sequential()
# Premier niveau de convolution - pooling
Auto_encoder.add(
Conv2D(
16,
(3, 3),
padding="Same",
activation="relu",
input_shape=(224, 224, 3),
kernel_initializer="he_uniform",
)
)
Auto_encoder.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
# Deuxième niveau de convolution - pooling
Auto_encoder.add(
Conv2D(
32, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
# Troisième niveau de convolution - pooling
Auto_encoder.add(
Conv2D(
64, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
# Quatreième niveau de convolution - pooling
Auto_encoder.add(
Conv2D(
128, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(MaxPooling2D(pool_size=(2, 2), padding="same"))
# Premier niveau de deconvolution - UpSampling2D
Auto_encoder.add(
Conv2D(
128, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(UpSampling2D((2, 2)))
# Premier niveau de deconvolution - UpSampling2D
Auto_encoder.add(
Conv2D(
64, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(UpSampling2D((2, 2)))
# Deuxième niveau de deconvolution - UpSampling2D
Auto_encoder.add(
Conv2D(
32, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(UpSampling2D((2, 2)))
# Troisième niveau de deconvolution - UpSampling2D
Auto_encoder.add(
Conv2D(
16, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
Auto_encoder.add(UpSampling2D((2, 2)))
# Dérnier convolution pour arrivé à la taille de l'image initial (244,244,3)
Auto_encoder.add(
Conv2D(
3, (3, 3), activation="relu", padding="same", kernel_initializer="he_uniform"
)
)
# Compilation du CNN décrit
Auto_encoder.compile(optimizer="adadelta", loss="MSE", metrics=["accuracy"])
Auto_encoder.summary()
# instancier un objet ImageDataGenerator pou l'augmentation des donnees train pour l'auto-encodeur
trainAug = ImageDataGenerator(horizontal_flip=True, fill_mode="nearest")
testAug = ImageDataGenerator()
# definir la moyenne des images ImageNet par plan RGB pour normaliser les images de la base AFF20
mean = np.array([123.68, 116.779, 103.939], dtype="float32")
trainAug.mean = mean
testAug.mean = mean
# initialiser le generateur de train
trainGen_autoencod = trainAug.flow_from_directory(
trainPath,
class_mode="input",
target_size=(224, 224),
color_mode="rgb",
shuffle=True,
batch_size=64,
)
# initialiser le generateur de test
testGen_autoencod = testAug.flow_from_directory(
testPath,
class_mode="input",
target_size=(224, 224),
color_mode="rgb",
shuffle=False,
batch_size=64,
)
# Entrainement du Réseau
epochs = 400
batch_size = 64
Auto_encoder.fit(train_x, train_x, batch_size=batch_size, epochs=epochs)
predicted_test_img = Auto_encoder.predict(test_x)
n = 9
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(test_x[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + n + 1)
plt.imshow(predicted_test_img[i])
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
|
import pandas as pd
a = pd.read_csv("/kaggle/input/student-performance-in-mathematics/exams.csv")
a
a = pd.read_csv(
"/kaggle/input/student-performance-in-mathematics/exams.csv",
usecols=["math score"],
squeeze=True,
)
a
a[5]
a[a.count() - 1]
a[0:11:2]
# **Customized Index**
a = pd.read_csv("/kaggle/input/student-performance-in-mathematics/exams.csv")
a
a = pd.read_csv(
"/kaggle/input/student-performance-in-mathematics/exams.csv",
usecols=["parental level of education"],
squeeze=True,
)
a
a[1]
a.get("male")
a.get("Contra", default="Please enter the right value")
a.value_counts()
def category(num):
if num > 100:
return "vgood"
elif num > 250:
return "good enough"
else:
return "Okayish"
b = a.apply(category)
b
c = pd.Series(["Mumbai", "Delhi", "Banglore"], index=["West", "North", "South"])
c
d = pd.Series(
["Gateway of India", "India Gate", "Nandi Hills"],
index=["Mumbai", "Delhi", "Banglore"],
)
d
c.map(d)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
df_2014 = pd.read_csv(
"/kaggle/input/200-financial-indicators-of-us-stocks-20142018/2014_Financial_Data.csv"
)
df_2015 = pd.read_csv(
"/kaggle/input/200-financial-indicators-of-us-stocks-20142018/2015_Financial_Data.csv"
)
df_2016 = pd.read_csv(
"/kaggle/input/200-financial-indicators-of-us-stocks-20142018/2016_Financial_Data.csv"
)
df_2017 = pd.read_csv(
"/kaggle/input/200-financial-indicators-of-us-stocks-20142018/2017_Financial_Data.csv"
)
df_2018 = pd.read_csv(
"/kaggle/input/200-financial-indicators-of-us-stocks-20142018/2018_Financial_Data.csv"
)
# Let's Check Shape of Each data set
print("Shape of data for Year 2014 is {}".format(df_2014.shape))
print("Shape of data for Year 2015 is {}".format(df_2015.shape))
print("Shape of data for Year 2016 is {}".format(df_2016.shape))
print("Shape of data for Year 2017 is {}".format(df_2017.shape))
print("Shape of data for Year 2018 is {}".format(df_2018.shape))
df_2014.info()
# Data missing information for 2014 year
data_info = pd.DataFrame(df_2014.dtypes).T.rename(index={0: "column type"})
data_info = data_info.append(
pd.DataFrame(df_2014.isnull().sum()).T.rename(index={0: "null values (nb)"})
)
data_info = data_info.append(
pd.DataFrame(df_2014.isnull().sum() / df_2014.shape[0] * 100).T.rename(
index={0: "null values (%)"}
)
)
display(data_info)
# Data missing information for 2015 year
data_info = pd.DataFrame(df_2015.dtypes).T.rename(index={0: "column type"})
data_info = data_info.append(
pd.DataFrame(df_2015.isnull().sum()).T.rename(index={0: "null values (nb)"})
)
data_info = data_info.append(
pd.DataFrame(df_2015.isnull().sum() / df_2015.shape[0] * 100).T.rename(
index={0: "null values (%)"}
)
)
display(data_info)
# #### Imputation :
# ** Imputing the null with 0 because In our case missing values refects companies don't have value for that year
#
df_2014.fillna(0, inplace=True)
df_2015.fillna(0, inplace=True)
df_2016.fillna(0, inplace=True)
df_2017.fillna(0, inplace=True)
df_2018.fillna(0, inplace=True)
# ## Ratio Analysis
# Note : I am performing all analysis on one organization (Procter & Gamble). We can all analysis in similar manner
# Organization can be identify by ticker sysmbol given in column 'Unnamed: 0'
# *** Financial Ratio is quick way to understand any company position. There are around more than 59 Financial ratios
# Quick Ratio : current assest - Inventory /current liabilities ( This is best way to understand liquidity of organization, how quick organization is able to pay all liabilities)
# Considering Procter & Gamble company only
df_2014 = df_2014[df_2014["Unnamed: 0"] == "PG"]
df_2015 = df_2015[df_2015["Unnamed: 0"] == "PG"]
df_2016 = df_2016[df_2016["Unnamed: 0"] == "PG"]
df_2017 = df_2017[df_2017["Unnamed: 0"] == "PG"]
df_2018 = df_2018[df_2018["Unnamed: 0"] == "PG"]
df = df_2014.append([df_2015, df_2016, df_2017, df_2018])
df.fillna(0, inplace=True)
df.index = [2014, 2015, 2016, 2017, 2018]
print(
"Mean of Quick Ratio for P&G last 5 year is {:.2f} ".format(df["quickRatio"].mean())
)
plt.figure(figsize=(15, 7))
df["quickRatio"].plot.bar(color="y")
plt.xlabel("Years")
plt.ylabel("Quick Ratio")
plt.title("Quick Ratio analysis P&G ")
plt.grid(False)
plt.show()
# **P&G has 0.57 Quick ratio this mean company has more than double liabilities compare to current assets, This mean company can not fulfill short term obligations**
# **Before we reach any conclusion about short term obligation ,we need analyze to current ratio also **
# * ### Current Ratio : Current Assest+Inventory / Current Liabilites
# * ### Good Current ratio in between 1 and 2 , if current ratio more than 2 it mean company unable to make use of inventory
#
# Current ratio
print(
"Mean of Quick Ratio for P&G last 5 year is {:.2f} ".format(
df["currentRatio"].mean()
)
)
plt.figure(figsize=(15, 7))
df["currentRatio"].plot.bar()
plt.xlabel("Years")
plt.ylabel("Current Ratio")
plt.title("Current Ratio analysis P&G ")
plt.grid(True)
plt.show()
# 1. **P&G has good Current ratio (0.95) but still company has more current liabilites compare to current assests**
# 2. **Let's Analyze debt to equity Ratio : The debt-to-equity (D/E) ratio is calculated by dividing a company’s total liabilities by its shareholder equity**
# **it reflects the ability of shareholder equity to cover all outstanding debts in the event of a business downturn.**
# 3. ** Debt/Equity= Total Liabilities/Shareholder Equity**
# 4. **Total Asset = Liabilities + Shareholder equity**
# Debt to Equity ratio
print(
"Mean of Debt to Equity Ratio for P&G last 5 year is {:.2f} ".format(
df["debtEquityRatio"].mean()
)
)
plt.figure(figsize=(15, 7))
df["debtEquityRatio"].plot.bar()
plt.xlabel("Years")
plt.ylabel("debtEquity Ratio")
plt.title("Debt Equity Ratio analysis P&G ")
plt.grid(True)
plt.show()
# * **For investors it is good sign : Many investors look for a company to have a debt ratio between 0.3 and 0.6 **
# * **Let's analyze Inventory Trunover : the Inventory turnover is a measure of the number of times inventory is sold or used in a time period such as a year. It is calculated to see if a business has an excessive inventory in comparison to its sales level**
# * **Inventory Turnover = Sales/Average Inventory**
# * **Average Inventory = (Start Inventory - ending Inventory)/2**
# Return on equity
print(
"Mean of Inventory Turnover for P&G last 5 year is {:.2f} ".format(
df["inventoryTurnover"].mean()
)
)
plt.figure(figsize=(15, 7))
df["inventoryTurnover"].plot.bar()
plt.xlabel("Years")
plt.ylabel("Inventory Turnover")
plt.title("Inventory Turnover analysis P&G ")
plt.grid(True)
plt.show()
# # Balance Sheet Analysis
# * ** Balance sheet has Two Section Asset and liabilites**
# * ** Asset Section has two option : short term assets and long term assets **
# * ** Liabilities section also has two option : short term liabilities and long term Liabilities **
# * ** Short Term Asset : Cash and cash reserve,cash equivalent,Inventories,account receviables,securities etc..**
# * ** Long Term Asset : Property ,plant and equipment,long term investment,all intengible assets**
# * ** Short term Liabilities : Short term debts, dividend payable,trade account payable,customer deposits,current position of long term debts**
# * ** Long Term Liabilities : Long term loans,deffered revenues, deffered compenstions etc..**
# * ** Let's analyze one by one **
# ### Short Term Assets of P&G
# **I am considering following Parameters**
# * **Cash and Cash Equivalent**
# * **Cash and Short-Term Investment**
# * **Inventories**
# * **Average Receivables**
# * **Investment **
df["ShortTermAssest"] = (
df["Cash and cash equivalents"]
+ df["Cash and short-term investments"]
+ df["Inventories"]
+ df["Average Receivables"]
+ df["Investments"]
+ df["Investment purchases and sales"]
+ df["Short-term investments"]
)
df["liquidcash"] = (
df["Cash and cash equivalents"] + df["Cash and short-term investments"]
)
# Short Term Assets
n_year = 5
index = np.arange(n_year)
bar_width = 0.35
opacity = 0.7
print(
"Mean of short Term assest for P&G last 5 year is {:.2f} ".format(
df["ShortTermAssest"].mean()
)
)
plt.figure(figsize=(15, 7))
plt.bar(
index,
df["ShortTermAssest"],
bar_width,
alpha=opacity,
color="b",
label="Short Term Assest",
)
plt.bar(
index + bar_width,
df["liquidcash"],
bar_width,
alpha=opacity,
color="y",
label="liquid Cash",
)
plt.xlabel("Years")
plt.ylabel("Short Term Assests")
plt.title("Short Term Assest or Current Assest analysis P&G ")
plt.xticks(index + 0.20, df.index)
plt.grid(False)
plt.legend()
plt.show()
# ### Long Term Asset
# **Long Term Asset ,I am considering following parameters**
# * **Property,Plant and Equipment**
# * **Goodwill and Intangible Assets**
# * **Long-Term Investment**
# Long Term Asset and Short-Term Asset
df["LongTermAsset"] = (
df["Property, Plant & Equipment Net"]
+ df["Goodwill and Intangible Assets"]
+ df["Long-term investments"]
)
# Long term Asset
n_year = 5
index = np.arange(n_year)
bar_width = 0.3
opacity = 0.7
print(
"Mean of Long Term assest for P&G last 5 year is {:.2f} ".format(
df["LongTermAsset"].mean()
)
)
print(
"Mean Percentage Long Term asset out of Total Asset for P&G last 5 year is {:.2f}% ".format(
(df["LongTermAsset"].mean() / df["Total assets"].mean()) * 100
)
)
plt.figure(figsize=(15, 7))
plt.bar(
index,
df["LongTermAsset"],
bar_width,
alpha=opacity,
color="b",
label="Long Term Asset",
)
plt.bar(
index + bar_width,
df["ShortTermAssest"],
bar_width,
alpha=opacity,
color="y",
label="short Term Asset",
)
plt.bar(
index - bar_width,
df["Total assets"],
bar_width,
alpha=opacity,
color="g",
label="Total Asset",
)
plt.xlabel("Years")
plt.ylabel("Asset Analysis")
plt.title("Short Term Assest and Long Assest analysis P&G ")
plt.xticks(index + 0.10, df.index)
plt.grid(False)
plt.legend()
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.tsa.seasonal import seasonal_decompose
from statsmodels.tsa.stattools import adfuller
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
from pandas.plotting import autocorrelation_plot
from statsmodels.tsa.arima_model import ARIMA
import matplotlib.pyplot as plt
# Suppress warnings
import warnings
warnings.filterwarnings("ignore")
# ### Loading in a data
df = pd.read_csv("../input/industrial-production-index-in-usa/INDPRO.csv")
df.head()
df.columns = ["Date", "IPI"]
df.head()
# to check NAs
df.info()
df.isnull().sum()
# setting up Date column as an Index
df.set_index("Date", inplace=True)
df.index
# Date slicing
df_new = df["2008-01-01":]
df_new.head()
df_new.describe().transpose()
df_new.plot(c="r", figsize=(16, 10), rot=20)
# rot = rotates labels at the bottom
# fontsize = labels size
# grid False or True
df_new.boxplot("IPI", rot=80, fontsize="12", grid=True)
# ### Stationarity
# The mean, varience and covarience of the i-th and the (i+m)-th term of the series should not depend on time. Most statistical methods assume or require the series to be stationary.
time_series = df["IPI"]
type(time_series)
time_series.rolling(12).mean().plot(label="12 Months Rolling Mean", figsize=(16, 10))
time_series.rolling(12).std().plot(label="12 Months Rolling Std")
time_series.plot()
plt.legend()
# Since rolling mean and rolling standard deviation increase over time, the time series are non-stationary.
# ### Dickey-Fuller Test
# Null hypothesis that the series are non-stationary.
# Alternative hypothesis: series are stationary. The data is considered to be stationary if the p-value is less than 0.05 and ADF Statistic is close to the critical values.
result = adfuller(df_new["IPI"])
# to make it readable
def adf_check(time_series):
result = adfuller(time_series)
print("Augmented Dickey-Fuller Test")
labels = ["ADF Test Statistic", "p-value", "# of lags", "Num of Obs used"]
print("Critical values:")
for key, value in result[4].items():
print("\t{}: {}".format(key, value))
for value, label in zip(result, labels):
print(label + " : " + str(value))
if (
(result[1] <= 0.05 and result[0] <= result[4]["1%"])
or (result[1] <= 0.05 and result[0] <= result[4]["5%"])
or (result[1] <= 0.05 and result[0] <= result[4]["10%"])
):
print("Reject null hypothesis")
print("Data has no unit root and is stationary")
else:
print("Fail to reject null hypothesis")
print("Data has a unit root and it is non-stationary")
adf_check(df_new["IPI"])
# ADF statistic is higher than Critical values and p-value is above 0.05. Conclusion: This data is non-stationary
# ## How to make it stationary
# In order to conclude that the series is stationary, the p-value has to be less than the significance level. As well as, ADF t-statistic must be below the Critical values.
# ### 1. Taking a first difference
df_new["Dif_1"] = df_new["IPI"] - df_new["IPI"].shift(1)
df_new["Dif_1"].plot(rot=80, figsize=(14, 8))
# #### Checking for stationarity again:
adf_check(df_new["Dif_1"].dropna())
# If need to take a second difference
# df_new['Dif_2'] = df_new['Dif_1'] - df_new['Dif_1'].shift(1)
# adf_check(df_new['Dif_2'].dropna())
# ### 2. Seasonal Difference
# #### 2.1 Taking seasonal difference
df_new["Dif_Season"] = df_new["IPI"] - df_new["IPI"].shift(12)
df_new["Dif_Season"].plot(rot=80, figsize=(14, 8))
adf_check(df_new["Dif_Season"].dropna())
# Even though T-stats value is close to the Critical values, p-value is too high. Result: The data is non-stationary.
# #### 2.2 Seasonal effect of the first difference:
df_new["Dif_Season_1"] = df_new["Dif_1"] - df_new["Dif_1"].shift(12)
df_new["Dif_Season_1"].plot(rot=80, figsize=(14, 8))
adf_check(df_new["Dif_Season_1"].dropna())
# ### 4. Log of the dependent variable
# #### 4.1 Taking the first difference
df_log = np.log(df)
df_log["Dif_1"] = df_log["IPI"] - df_log["IPI"].shift(1)
df_log["Dif_1"].plot(rot=80, figsize=(14, 8))
adf_check(df_log["Dif_1"].dropna())
# #### 4.2 Substracting the mean
df_log["Dif_mean"] = df_log["IPI"] - df_log["IPI"].rolling(12).mean()
df_log["Dif_mean"].plot(rot=80, figsize=(14, 8))
adf_check(df_log["Dif_mean"].dropna())
# #### 4.3 Exponential Decay
df_log["Exp_decay"] = (
df_log["IPI"] - df_log["IPI"].ewm(halflife=12, min_periods=0, adjust=True).mean()
)
df_log["Exp_decay"].plot(rot=80, figsize=(14, 8))
adf_check(df_log["Exp_decay"].dropna())
df_log["Dif_1"] = df_log["IPI"] - df_log["IPI"].shift(1)
df_log["Dif_1"].plot(rot=80, figsize=(14, 8))
adf_check(df_log["Dif_1"].dropna())
# ### Seasonal Decomposition of Time Series Components
# #### Trend:
# Upward or downward movement of the data over time
# #### Seasonality:
# Seasonal varience
# #### Noise:
# Spikes and drops at random intervals
# freq can be set to True or number of periods. Ex: 12 months
decomp = seasonal_decompose(time_series, freq=12)
fig = decomp.plot()
fig.set_size_inches(15, 8)
# ### Autocorrelation Function (ACF) and Partial Autocorrelation Function (PACF)
auto_cor_f = plot_acf(df_new["Dif_1"].dropna())
acf_seasonal = plot_acf(df_new["Dif_Season_1"].dropna())
pacf_plot = plot_pacf(df_new["Dif_Season_1"].dropna())
# to delete vlines:
acf_seasonal = plot_acf(df_new["Dif_Season_1"].dropna(), use_vlines=False)
# ### ARIMA Model
# create additional future dates:
# from pandas.tseries.offsets import DateOffset
# forecast_dates = [df_new.index[-1] + DateOffset(months=x) for x in range(1,24)]
# df_future = pd.DataFrame(index=forecast_dates, columns = df_new.columns)
# df_final = pd.concat([df_new, df_future])
# for seasonal ARIMA Model:
# model = sm.tsa.statespace.SARIMAX(df_new['IPI'], order = (O,1,0), seasonal_order = (1,1,1,12))
# results = model.fit()
# print(results.summary())#to plot residuals:
# to plot residuals:
# results.resid.plot()
# KDE plot:
# results.resid.plot(kind = 'kde')
# df_new['Forecast'] = results.predict(start=150,end=250)
# df_new[['IPI'],'forecast']].plot(figsize=(10,6));
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
gender_submission = pd.read_csv("../input/titanic/gender_submission.csv")
test = pd.read_csv("../input/titanic/test.csv")
train = pd.read_csv("../input/titanic/train.csv")
# PassengerId (sorted in any order)
# Survived (contains your binary predictions: 1 for survived, 0 for deceased)
train.head()
num_cols = [col for col in train.columns if train[col].dtype in ["int64", "float64"]]
train[num_cols].describe()
cat_cols = [col for col in train.columns if train[col].dtype in ["O"]]
train[cat_cols].describe()
for col in cat_cols[1:]:
unq = np.unique(train[col].astype("str"))
print("-" * 50)
print("column: {}, # of col: {}".format(col, len(unq)))
print("contents: {}".format(unq))
# EDA idea
# plot (survived ~ pclass + age + sex)
# pclass is ordinal, age is continuous, survived & sex is categorical
# correlation ?? https://rfriend.tistory.com/405
train[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train[train["Age"].isnull()]
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import export_graphviz
from sklearn.datasets import load_iris
iris_data = load_iris()
# iris_data.data; iris_data.target
X_train, X_test, y_train, y_test = train_test_split(
iris_data.data, iris_data.target, test_size=0.2, random_state=11
)
dt_clf = DecisionTreeClassifier(random_state=156)
# dt_clf(X_train, y_train)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
stud_data = pd.read_csv("../input/stud_data.csv")
stud_data
stud_data.sort_values("Stud_Roll")
stud_data[stud_data.isna().any(axis=1)]
stud_data.fillna(0, inplace=True)
q1 = np.percentile(stud_data["Marks_English"], 25)
q3 = np.percentile(stud_data["Marks_English"], 75)
IQR = q3 - q1
least = q1 - 1.5 * q3
maxi = q3 + 1.5 * q3
stud_data.loc[stud_data["Marks_English"] < least]
stud_data.loc[stud_data["Marks_English"] > maxi]
plt.hist(stud_data["Marks_English"])
plt.show()
plt.scatter(stud_data["Marks_English"], stud_data["Marks_Maths"])
plt.show()
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
pd.options.display.max_rows = 50
# ## Memory Usage
# This Cell is to understand how to calculate the memory usage in tables
mem = df_student.memory_usage().sum() / 1024**2
print("Memory usage is {:.2f} MB".format(mem))
# ## EDA Phase
path = "/kaggle/input/student-performance-in-mathematics/exams.csv"
df_student = pd.read_csv(path)
# Table Information
print("The Dataset Is {} Rows Long".format(len(df_student)))
print("The Dataset Has {} Columns".format(len(df_student.columns)))
print()
# Checking For Any Missing Values
missing_values = df_student.isna().sum().sum()
if missing_values == 0:
print("The Dataset has {} Missing Values".format(missing_values))
else:
print("The Dataset has {} Missing Values".format(missing_values))
df_student.info()
df_student.head(10).style.format(
{
columns[0]: "{:.2f} %",
columns[1]: "{:.2f} %",
columns[2]: "{:.2f} %",
}
).background_gradient(cmap="inferno", low=1, axis=1)
df_student.describe()
df_student_copy = df_student.copy()
df_student.columns
def dist_graphs(data: pd.DataFrame):
"""Get the scores distributions
data : The copied Dataset"""
colors = ["#FF6D60", "#98D8AA", "#F9D949"]
# This is gonna select only the score cols
n_cols = data.select_dtypes(include="int64")
uni = np.unique(n_cols.columns)
fig, axs = plt.subplots(1, 3, figsize=(14, 4))
axs = axs.flat
for colm, ax, color in zip(uni, axs, colors):
sns.histplot(ax=ax, data=n_cols, x=colm, kde=True, color=color)
ax.set_xlabel(str(colm).capitalize())
ax.set_ylabel("Occurence Count")
plt.tight_layout(pad=0.8)
plt.show()
dist_graphs(df_student_copy)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
r = pd.read_csv(
"/kaggle/input/web-page-phishing-detection-dataset/dataset_phishing.csv"
)
print(r)
import pandas as pd
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import balanced_accuracy_score
from xgboost import XGBRegressor
# import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
r.columns
from sklearn.model_selection import train_test_split
y = r.status
X = r.drop(["status"], axis=1)
X_train_full, X_valid_full, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
categorical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].nunique() < 10 and X_train_full[cname].dtype == "object"
]
numerical_cols = [
cname
for cname in X_train_full.columns
if X_train_full[cname].dtype in ["int64", "float64"]
]
my_cols = categorical_cols + numerical_cols
X_train = X_train_full[my_cols].copy()
X_valid = X_valid_full[my_cols].copy()
numerical_transformer = SimpleImputer(strategy="constant")
categorical_transformer = Pipeline(
steps=[
("imputer", SimpleImputer(strategy="most_frequent")),
("onehot", OneHotEncoder(handle_unknown="ignore")),
]
)
preprocessor = ColumnTransformer(
transformers=[
("num", numerical_transformer, numerical_cols),
("cat", categorical_transformer, categorical_cols),
]
)
# print(r.describe)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y_train = le.fit_transform(y_train)
y_valid = le.fit_transform(y_valid)
# print(y_train)
import xgboost as xgb
from sklearn.metrics import accuracy_score
model = xgb.XGBClassifier()
my_pipeline = Pipeline(steps=[("preprocessor", preprocessor), ("model", model)])
my_pipeline.fit(X_train, y_train)
preds = my_pipeline.predict(X_valid)
print(accuracy_score(preds, y_valid))
|
# In this notebook, I'll try to stock prediction of the Microsoft shares only in the simplest code I can!!
# First, **import the necessary models**.
import pandas as pd
import matplotlib.pyplot as plt
plt.style.use("ggplot")
# Suppress warnings
import warnings
warnings.filterwarnings("ignore", category=UserWarning)
# ## Quick EDA
df = pd.read_csv("/kaggle/input/stock/Microsoft.csv", index_col="Date")
df.head()
df.describe()
df.info()
df.index = pd.to_datetime(df.index)
df["Volume"].plot(figsize=(10, 7), title="Volume Of Microsoft Stock Prices", c="green")
plt.ylabel("Volume")
plt.show()
def plot(type, color, i):
# plt.figure(figsize=(10,7))
plt.subplot(2, 2, i)
df[type].plot(c=color)
plt.ylabel(type)
# plt.xlabel('Year')
# plt.title(type +"price of Microsoft")
plt.tight_layout()
plot("Open", "black", 1)
plot("High", "green", 2)
plot("Low", "blue", 3)
plot("Close", "orange", 4)
# With the help of the other predictors, the goal is to predict the closing price of the stock.
predictor = ["Open", "High", "Low", "Close", "Adj Close", "Volume"]
# ## Analyzing our Data
plt.subplot(2, 1, 1)
plt.plot(df.index, df.Open, color="g")
plt.subplot(2, 1, 2)
plt.plot(df.index, df.Close)
# **In time series analysis, the target is to use the information of one day to predict the closing stock price of next day.**
# So, the target can be set as follows:
df["target"] = df.shift(-1)["Close"]
df = df.iloc[:-1, :].copy()
df
# Also, the random splitting of the data must not be done.
train = df.loc[:"2018-03-16"]
test = df.loc["2018-03-16":]
# ## Creating a Model
from sklearn.linear_model import LinearRegression
reg = LinearRegression()
reg.fit(train[predictor], train["target"])
predictions = reg.predict(test[predictor])
# ## Evaluating the result
combined = pd.concat([test["target"], pd.Series(predictions, index=test.index)], axis=1)
combined.columns = ["actual", "prediction"]
combined.plot()
|
# Published on April 11, 2023. By Marília Prata, mpwolke
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objs as go
import plotly
plotly.offline.init_notebook_mode(connected=True)
import warnings
warnings.simplefilter(action="ignore", category=Warning)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #Determining whether a number is an integer is Not easy for me. Fancy if I will teach AI?
# "Correctly determining whether a number is an integer.It is easy for humans,but is it easy for AI? Let's give it a try.
# Note:This is not a programming competition. The purpose of this competition is to build a model for AI to determine what is an integer on its own."
# "This dataset randomly generates numbers ranging from 1 to 10000 and divides them by 10. Most of the data is non integer numbers."
# https://www.kaggle.com/competitions/teach-ai-what-is-integer/overview/description
# https://www.forbes.com/sites/tomvanderark/2020/02/12/how-to-teach-artificial-intelligence/?sh=2f9300145eac
# AI-focused learning objectives for K-12 schools.AI4K12
# #Competition Citation
# @misc{teach-ai-what-is-integer,
# author = {yunsuxiaozi},
#
# title = {Teach AI what is integer},
#
# publisher = {Kaggle},
#
# year = {2023},
#
# url = {https://kaggle.com/competitions/teach-ai-what-is-integer}
#
# }
train = pd.read_csv(
"/kaggle/input/teach-ai-what-is-integer/teach AI Integer/train.csv",
delimiter=",",
encoding="ISO-8859-2",
)
train.tail()
# #What is an Integer?
# "Integers are the collection of whole numbers and their corresponding negative numbers. Similar to whole numbers, integers also do not include fractional numbers. Thus, we can say that integers are numbers that can be positive, negative, or zero, but cannot be a fraction."
# Note: The notation Z for the set of integers comes from the German word ‘Zahlen’, which means ‘numbers’.
# https://codinghero.ai/what-is-an-integer/
test = pd.read_csv(
"/kaggle/input/teach-ai-what-is-integer/teach AI Integer/test.csv",
delimiter=",",
encoding="ISO-8859-2",
)
test.tail()
sub = pd.read_csv(
"/kaggle/input/teach-ai-what-is-integer/teach AI Integer/sample_submission.csv",
delimiter=",",
encoding="ISO-8859-2",
)
sub.head()
# #No Missing values
train.isnull().sum()
# #That 3D is cool though I don't know how to interpret it
# By Satoshi S https://www.kaggle.com/code/satoshiss/titanic-binary-classification
from mpl_toolkits import mplot3d
fig = plt.figure(figsize=(10, 10))
ax = plt.axes(projection="3d")
sc = ax.scatter(
train.id, train.number, train.label, c=train.label, cmap="viridis", linewidth=0.5
)
# legend
plt.legend(*sc.legend_elements(), bbox_to_anchor=(1.05, 1), loc=2)
# Code by Lucas Abrahão https://www.kaggle.com/lucasabrahao/trabalho-manufatura-an-lise-de-dados-no-brasil
train["label"].value_counts().plot.barh(
color=["blue", "#f5005a"], title="What is Integer AI?"
)
# #i don't know why Fares wrote that snippet below
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
def print_score(clf, X_train, y_train, X_test, y_test, train=True):
if train:
pred = clf.predict(X_train)
clf_report = pd.DataFrame(
classification_report(y_train, pred, output_dict=True)
)
print("Train Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_train, pred) * 100:.2f}%")
print("_______________________________________________")
print(f"CLASSIFICATION REPORT:\n{clf_report}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_train, pred)}\n")
elif train == False:
pred = clf.predict(X_test)
clf_report = pd.DataFrame(classification_report(y_test, pred, output_dict=True))
print("Test Result:\n================================================")
print(f"Accuracy Score: {accuracy_score(y_test, pred) * 100:.2f}%")
print("_______________________________________________")
print(f"CLASSIFICATION REPORT:\n{clf_report}")
print("_______________________________________________")
print(f"Confusion Matrix: \n {confusion_matrix(y_test, pred)}\n")
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.preprocessing import StandardScaler, MinMaxScaler, OrdinalEncoder
from sklearn.compose import make_column_transformer
from sklearn.model_selection import train_test_split
X = train.drop(["id", "label"], axis=1)
y = train["label"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
# cat_columns = []
num_columns = ["number"]
ct = make_column_transformer(
(MinMaxScaler(), num_columns),
(StandardScaler(), num_columns),
remainder="passthrough",
)
X_train = ct.fit_transform(X_train)
X_test = ct.transform(X_test)
# #Implementing Logistic Regression in Scikit-Learn
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.linear_model import LogisticRegression
lr_clf = LogisticRegression(solver="liblinear")
lr_clf.fit(X_train, y_train)
print_score(lr_clf, X_train, y_train, X_test, y_test, train=True)
print_score(lr_clf, X_train, y_train, X_test, y_test, train=False)
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.ensemble import RandomForestClassifier
rf_clf = RandomForestClassifier(n_estimators=1000)
rf_clf.fit(X_train, y_train)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=True)
print_score(rf_clf, X_train, y_train, X_test, y_test, train=False)
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.metrics import precision_recall_curve
def plot_precision_recall_vs_threshold(precisions, recalls, thresholds):
plt.plot(thresholds, precisions[:-1], "b--", label="Precision")
plt.plot(thresholds, recalls[:-1], "g--", label="Recall")
plt.xlabel("Threshold")
plt.legend(loc="upper left")
plt.title("Precisions/recalls tradeoff")
precisions, recalls, thresholds = precision_recall_curve(y_test, lr_clf.predict(X_test))
plt.figure(figsize=(15, 10))
plt.subplot(2, 2, 1)
plot_precision_recall_vs_threshold(precisions, recalls, thresholds)
plt.subplot(2, 2, 2)
plt.plot(precisions, recalls)
plt.xlabel("Precision")
plt.ylabel("Recall")
plt.title("PR Curve: precisions/recalls tradeoff")
# #That Precisions above are awful. No precision. Threshold is even empty.
# #The Receiver Operating Characteristics (ROC) Curve
# Instead of plotting precision versus recall, the ROC curve plots the true positive rate (another name for recall) against the false positive rate. The false positive rate (FPR) is the ratio of negative instances that are incorrectly classified as positive. It is equal to one minus the true negative rate, which is the ratio of negative instances that are correctly classified as negative.
# The TNR is also called specificity. Hence the ROC curve plots sensitivity (recall) versus 1 - specificity.
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.metrics import roc_curve
def plot_roc_curve(fpr, tpr, label=None):
plt.plot(fpr, tpr, linewidth=2, label=label)
plt.plot([0, 1], [0, 1], "k--")
plt.axis([0, 1, 0, 1])
plt.xlabel("False Positive Rate")
plt.ylabel("True Positive Rate")
plt.title("ROC Curve")
fpr, tpr, thresholds = roc_curve(y_test, lr_clf.predict(X_test))
plt.figure(figsize=(9, 6))
plot_roc_curve(fpr, tpr)
plt.show()
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.metrics import roc_auc_score
roc_auc_score(y_test, lr_clf.predict(X_test))
# #Another bad result above.
# #Logistic Regression Hyperparameter tuning
# Fares Sayah https://www.kaggle.com/code/faressayah/logistic-regression-for-binary-classification-task
from sklearn.model_selection import GridSearchCV
lr_clf = LogisticRegression()
penalty = ["l1", "l2"]
C = [0.5, 0.6, 0.7, 0.8]
class_weight = [{1: 0.5, 0: 0.5}, {1: 0.4, 0: 0.6}, {1: 0.6, 0: 0.4}, {1: 0.7, 0: 0.3}]
solver = ["liblinear", "saga"]
param_grid = dict(penalty=penalty, C=C, class_weight=class_weight, solver=solver)
lr_cv = GridSearchCV(
estimator=lr_clf, param_grid=param_grid, scoring="f1", verbose=1, n_jobs=-1, cv=10
)
lr_cv.fit(X_train, y_train)
best_params = lr_cv.best_params_
print(f"Best parameters: {best_params}")
lr_clf = LogisticRegression(**best_params)
lr_clf.fit(X_train, y_train)
print_score(lr_clf, X_train, y_train, X_test, y_test, train=True)
print_score(lr_clf, X_train, y_train, X_test, y_test, train=False)
|
# ## Import library
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow_hub as hub
import tensorflow as tf
import tensorflow.keras.backend as K
import gc
import sys
import os
import sys
import glob
import torch
import re
import math
import pickle
import datetime
import string
import nltk
import spacy
import tensorflow.keras.backend as K
from scipy.stats import spearmanr
from math import floor, ceil
from tqdm.notebook import tqdm
from sklearn.model_selection import GroupKFold
from scipy import spatial
from nltk.tokenize import sent_tokenize
from nltk import wordpunct_tokenize
from sklearn.linear_model import MultiTaskElasticNet
from scipy.stats import spearmanr, rankdata
sys.path.insert(0, "../input/transformers/transformers-master/")
import transformers as ppb
# ## Read data
DEVICE = torch.device("cuda")
root_path = "../input/google-quest-challenge/"
ss = pd.read_csv(root_path + "/sample_submission.csv")
train = pd.read_csv(root_path + "/train.csv")
test = pd.read_csv(root_path + "/test.csv")
# ### Concat sentences
# train.columns
# train['full_text']
# technology=train[train.category == "TECHNOLOGY"]
# ## Exploratory
train[["question_title", "question_body", "answer"]]
train["question_title"] = train["question_title"] + "?"
train["question_body"] = train["question_body"] + "?"
train["answer"] = train["answer"] + "."
train["full_question"] = train["question_title"] + " [SEP] " + train["question_body"]
test["full_question"] = test["question_title"] + " [SEP] " + test["question_body"]
# count = 0
# for i in train.answer:
# print(count)
# print(i)
# print("-"*100)
# count += 1
# if count == 10:
# break
# ## Preprocessing
DEVICE = torch.device("cuda")
bert_model_config = (
"../input/pretrained-bert-models-for-pytorch/bert-base-uncased/bert_config.json"
)
bert_config = ppb.BertConfig.from_json_file(bert_model_config)
tokenizer = ppb.BertTokenizer.from_pretrained(
"../input/pretrained-bert-models-for-pytorch/bert-base-uncased-vocab.txt"
)
bert_model = ppb.BertModel.from_pretrained(
"../input/pretrained-bert-models-for-pytorch/bert-base-uncased/", config=bert_config
)
bert_model.to(DEVICE)
# text = 'i love you embedding'
# print(tokenizer.tokenize(text))
# print(tokenizer.vocab)
# -*- coding: utf-8 -*-
import re
alphabets = "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = "(Mr|Mrs|Ms|Dr|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov)"
def split_into_sentences(text):
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
if "Ph.D" in text:
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub("\s" + alphabets + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(
alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]",
"\\1<prd>\\2<prd>\\3<prd>",
text,
)
text = re.sub(alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + alphabets + "[.]", " \\1<prd>", text)
if "”" in text:
text = text.replace(".”", "”.")
if '"' in text:
text = text.replace('."', '".')
if "!" in text:
text = text.replace('!"', '"!')
if "?" in text:
text = text.replace('?"', '"?')
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = sentences[:-1]
sentences = [s.strip() for s in sentences]
return sentences
import itertools
words = set(nltk.corpus.words.words())
def remove_non_english(text):
return " ".join(
w
for w in nltk.wordpunct_tokenize(text)
if w.lower() in words or not w.isalpha()
)
# doc = spacy_nlp(x)
# tokens = [token.text for token in doc]
# preprocessed_doc = " ".join(w for w in tokens if w.lower() in words)
# return preprocessed_doc
def add_token_url(text):
urls = re.findall(URL_REGEX, text)
count = 0
for url in urls:
text = text.replace(url, "<URL>")
text = sent_tokenize(text)
text = [x for x in text if x not in string.punctuation]
result = []
text = [x.splitlines() for x in text]
text = list(itertools.chain.from_iterable(text))
text = list(filter(None, text))
text = [remove_non_english(x) for x in text]
text = [x for x in text if x not in string.punctuation]
text = [re.sub(r"[^\w\s]", "", x) for x in text]
text = [re.sub(" +", " ", x) for x in text]
text = [x.strip() for x in text]
text = list(filter(None, text))
return " [SEP] ".join(text)
train["preprocessed_full_question"] = [add_token_url(x) for x in train["full_question"]]
train["preprocessed_answer"] = [add_token_url(x) for x in train["answer"]]
test["preprocessed_full_question"] = [add_token_url(x) for x in test["full_question"]]
test["preprocessed_answer"] = [add_token_url(x) for x in test["answer"]]
# ## Load Bert model
print(len(tokenizer)) # 28997
tokenizer.add_tokens(["<URL>"])
print(len(tokenizer)) # 28997
bert_model.resize_token_embeddings(len(tokenizer))
# ## Convert to Bert inputs
def convert_text_to_vector(df, col, tokenizer, model):
df[col] = [x[:512] for x in df[col]]
tokenized = df[col].apply(lambda x: tokenizer.encode(x, add_special_tokens=True))
max_len = 512
padded = [i + [0] * (max_len - len(i)) for i in tokenized]
for i in tqdm(range(len(tokenized))):
tokenized[i].extend([0] * (max_len - len(tokenized[i])))
tokenized = [np.array(x) for x in tokenized]
tokenized = np.array(tokenized)
attention_mask = np.where(tokenized != 0, 1, 0)
input_ids = torch.tensor(tokenized).to(DEVICE)
attention_mask = torch.tensor(attention_mask).to(DEVICE)
segments = []
for tokens in tqdm(tokenized):
segment = []
current_segment_id = 0
for token in tokens:
segment.append(current_segment_id)
if token == 102:
current_segment_id += 1
segment = segment + [current_segment_id + 1] * (512 - len(tokens))
segments.append(segment)
segments = torch.tensor(segments).to(DEVICE)
return input_ids, attention_mask, segments
batch_size = 64
targets = [
"question_asker_intent_understanding",
"question_body_critical",
"question_conversational",
"question_expect_short_answer",
"question_fact_seeking",
"question_has_commonly_accepted_answer",
"question_interestingness_others",
"question_interestingness_self",
"question_multi_intent",
"question_not_really_a_question",
"question_opinion_seeking",
"question_type_choice",
"question_type_compare",
"question_type_consequence",
"question_type_definition",
"question_type_entity",
"question_type_instructions",
"question_type_procedure",
"question_type_reason_explanation",
"question_type_spelling",
"question_well_written",
"answer_helpful",
"answer_level_of_information",
"answer_plausible",
"answer_relevance",
"answer_satisfaction",
"answer_type_instructions",
"answer_type_procedure",
"answer_type_reason_explanation",
"answer_well_written",
]
y = train[targets].values
# ----
# ## Features Engineering
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i : i + n]
def splitDataFrameIntoSmaller(df, chunkSize=10000):
listOfDf = list()
numberChunks = len(df) // chunkSize + 1
for i in range(numberChunks):
listOfDf.append(df[i * chunkSize : (i + 1) * chunkSize])
return listOfDf
# ### Categpry features
import sklearn
le = sklearn.preprocessing.LabelEncoder()
le.fit(train["category"])
category_features_train = le.fit_transform(train["category"])
category_features_test = le.fit_transform(test["category"])
# ### Universal Google Encoding
module_url = "../input/universalsentenceencoderlarge4/"
embed = hub.load(module_url)
def encoding_sentence(df, col, batch_size, model):
all_features = []
for tokenized_batch in tqdm(
splitDataFrameIntoSmaller(df[col].values, chunkSize=batch_size)
):
all_features.append(model(tokenized_batch)["outputs"].numpy())
all_features = np.vstack(all_features)
return all_features
def calculate_text_distance(
question_title_features, question_body_features, answer_features
):
dist1 = list(
map(
lambda x, y: np.linalg.norm(x - y),
question_title_features,
question_body_features,
)
)
dist2 = list(
map(lambda x, y: np.linalg.norm(x - y), question_body_features, answer_features)
)
dist3 = list(
map(
lambda x, y: np.linalg.norm(x - y), answer_features, question_title_features
)
)
cosdist = np.array([dist1, dist2, dist3])
cosdist = cosdist.T
dist1 = list(
map(
lambda x, y: spatial.distance.cosine(x, y),
question_title_features,
question_body_features,
)
)
dist2 = list(
map(
lambda x, y: spatial.distance.cosine(x, y),
question_body_features,
answer_features,
)
)
dist3 = list(
map(
lambda x, y: spatial.distance.cosine(x, y),
answer_features,
question_title_features,
)
)
l2dist = np.array([dist1, dist2, dist3])
l2dist = l2dist.T
distance = np.hstack([cosdist, l2dist])
return distance
question_title_encoding = encoding_sentence(train, "question_title", 32, embed)
question_body_encoding = encoding_sentence(train, "question_body", 32, embed)
answer_encoding = encoding_sentence(train, "answer", 32, embed)
question_title_encoding_test = encoding_sentence(test, "question_title", 32, embed)
question_body_encoding_test = encoding_sentence(test, "question_body", 32, embed)
answer_encoding_test = encoding_sentence(test, "answer", 32, embed)
train_distance = calculate_text_distance(
question_title_encoding, question_body_encoding, answer_encoding
)
test_distance = calculate_text_distance(
question_title_encoding_test, question_body_encoding_test, answer_encoding_test
)
# ## Build model
def compute_spearmanr(trues, preds):
rhos = []
for col_trues, col_pred in zip(trues.T, preds.T):
rhos.append(
spearmanr(
col_trues, col_pred + np.random.normal(0, 1e-7, col_pred.shape[0])
).correlation
)
return np.mean(rhos)
class CustomCallback(tf.keras.callbacks.Callback):
def __init__(self, valid_data, test_data, batch_size=16, fold=None):
self.valid_inputs = valid_data[0]
self.valid_outputs = valid_data[1]
self.test_inputs = test_data
self.batch_size = batch_size
self.fold = fold
def on_train_begin(self, logs={}):
self.valid_predictions = []
self.test_predictions = []
def on_epoch_end(self, epoch, logs={}):
self.valid_predictions = self.model.predict(self.valid_inputs)
rho_val = compute_spearmanr(self.valid_outputs, self.valid_predictions)
print("\n Epoch {}, Validation score {}".format(epoch, rho_val))
validation_file = open(
"/content/drive/My Drive/Colab/kaggle_competition/validation.txt", "a"
)
validation_file.write("\n Epoch {}, Validation score {}".format(epoch, rho_val))
validation_file.close()
if self.fold is not None:
self.model.save_weights(f"bert-base-{fold}-{epoch}.h5py")
self.test_predictions = self.model.predict(self.test_inputs)
# BERT_PATH = '../input/bert-base-uncased-huggingface-transformer/'
# from transformers import *
class BertClassification(tf.keras.Model):
def __init__(
self, flag_distance=False, flag_cat=False, flag_lstm=False, trainable=True
):
super().__init__(name="BertClassification")
self.bert_layer = hub.KerasLayer(
"../input/bert-base-from-tfhub/bert_en_uncased_L-12_H-768_A-12",
trainable=trainable,
)
# config = BertConfig() # print(config) to see settings
# config.output_hidden_states = False # Set to True to obtain hidden states
# config.trainable = True
# self.bert_layer = TFBertModel.from_pretrained(BERT_PATH+'bert-base-uncased-tf_model.h5', config=config)
self.global_avarage = tf.keras.layers.GlobalAveragePooling1D()
self.dense_out = tf.keras.layers.Dense(
30, activation="sigmoid", name="dense_output"
)
self.embed = tf.keras.layers.Embedding(500, 64, input_length=1)
self.dropout = tf.keras.layers.Dropout(0.1)
self.flag_distance = flag_distance
self.flag_cat = flag_cat
self.flag_lstm = flag_lstm
def call(self, inputs):
max_len = 512
inputs = [tf.cast(x, tf.int32) for x in inputs]
input_word_ids_title, input_masks_title, input_segments_title = (
inputs[0],
inputs[1],
inputs[2],
)
input_word_ids_answer, input_masks_answer, input_segments_answer = (
inputs[3],
inputs[4],
inputs[5],
)
features_cat = inputs[6]
distance_features = tf.cast(inputs[7], tf.float32)
_, sequence_output_title = self.bert_layer(
[input_word_ids_title, input_masks_title, input_segments_title]
)
global_title = self.global_avarage(sequence_output_title)
_, sequence_output_answer = self.bert_layer(
[input_word_ids_answer, input_masks_answer, input_segments_answer]
)
global_answer = self.global_avarage(sequence_output_answer)
embedding_cat = self.embed(features_cat)
embedding_cat = self.global_avarage(embedding_cat)
concat = tf.keras.layers.concatenate(
[global_title, global_answer, embedding_cat, distance_features]
)
concat = self.dropout(concat)
out = self.dense_out(concat)
return out
# model = BertClassification()
def training(X_train, y_train, X_val, y_val, X_test):
batch_size = 2
custom_callback = CustomCallback(
valid_data=(X_val, y_val), test_data=X_test, batch_size=batch_size
)
learning_rate = 3e-5
epochs = 20
loss_function = "binary_crossentropy"
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model = BertClassification()
model.compile(loss=loss_function, optimizer=optimizer)
model.fit(
X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
callbacks=[custom_callback],
)
return model
def training_2(X_train, y_train, X_val, y_val, X_test):
batch_size = 2
custom_callback = CustomCallback(
valid_data=(X_val, y_val), test_data=X_test, batch_size=batch_size
)
learning_rate = 3e-5
epochs = 3
loss_function = "binary_crossentropy"
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model = question_answer_model()
model.compile(loss=loss_function, optimizer=optimizer)
model.fit(
X_train,
y_train,
epochs=epochs,
batch_size=batch_size,
callbacks=[custom_callback],
)
return model
# inputs_ids, attention_masks, segments = convert_text_to_vector(train, 'full_text', tokenizer, bert_model)
# inputs_ids_test, attention_masks_test, segments_test = convert_text_to_vector(test, 'full_text', tokenizer, bert_model)
inputs_ids_title, attention_masks_title, segments_title = convert_text_to_vector(
train, "preprocessed_full_question", tokenizer, bert_model
)
(
inputs_ids_test_title,
attention_masks_test_title,
segments_test_title,
) = convert_text_to_vector(test, "preprocessed_full_question", tokenizer, bert_model)
inputs_ids_answer, attention_masks_answer, segments_answer = convert_text_to_vector(
train, "preprocessed_answer", tokenizer, bert_model
)
(
inputs_ids_test_answer,
attention_masks_test_answer,
segments_test_answer,
) = convert_text_to_vector(test, "preprocessed_answer", tokenizer, bert_model)
X = [
inputs_ids_title.cpu().data.numpy(),
attention_masks_title.cpu().data.numpy(),
segments_title.cpu().data.numpy(),
inputs_ids_answer.cpu().data.numpy(),
attention_masks_answer.cpu().data.numpy(),
segments_answer.cpu().data.numpy(),
category_features_train,
train_distance,
]
X_test = [
inputs_ids_test_title.cpu().data.numpy(),
attention_masks_test_title.cpu().data.numpy(),
segments_test_title.cpu().data.numpy(),
inputs_ids_test_answer.cpu().data.numpy(),
attention_masks_test_answer.cpu().data.numpy(),
segments_test_answer.cpu().data.numpy(),
category_features_test,
test_distance,
]
"""
batch_size = 1
learning_rate = 3e-5
epochs = 7
loss_function = 'binary_crossentropy'
gkf = GroupKFold(n_splits=5).split(X=train.category, groups=train.category)
valid_preds = []
test_preds = []
validation_score = []
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model = BertClassification()
model.compile(loss=loss_function, optimizer=optimizer)
for fold, (train_idx, valid_idx) in tqdm(enumerate(gkf)):
if fold in [1, 2]:
print("Fold {}".format(fold))
start = timeit.default_timer()
X_train = [X[i][train_idx] for i in range(len(X))]
y_train = y[train_idx]
X_val = [X[i][valid_idx] for i in range(len(X))]
y_val = y[valid_idx]
K.clear_session()
custom_callback = CustomCallback(valid_data=(X_val,y_val),test_data=X_test, batch_size=batch_size)
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size,callbacks=[custom_callback])
valid_preds.append(model.predict(X_val))
test_preds.append(model.predict(X_test))
rho_val = compute_spearmanr(y_val, valid_preds[-1])
validation_score.append(rho_val)
stop = timeit.default_timer()
training_time = stop - start
validation_file = open('/content/drive/My Drive/Colab/kaggle_competition/validation.txt','a')
validation_file.write('\nFold {}, Validation score {}, Training time {}'.format(fold,rho_val,training_time))
validation_file.close()
# del model
del X_train
del y_train
del X_val
del y_val
# print("Validation score : {}".format(compute_spearmanr(y_val, valid_preds[-1]))
# #Your statements here
"""
"""
from sklearn.model_selection import train_test_split
X_train,y_train, X_val, y_val = train_test_split(X,y,random_state = 1,test_size = 0.25)
print("Validation score {}".format(compute_spearmanr(y_val, model.predict(X_val)))
"""
batch_size = 2
learning_rate = 3e-5
epochs = 10
loss_function = "binary_crossentropy"
X_val = [X[i][:500] for i in range(len(X))]
y_val = y[:500]
optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)
model = BertClassification()
model.compile(loss=loss_function, optimizer=optimizer)
# custom_callback = CustomCallback(valid_data=(X_val,y_val),test_data=X_test, batch_size=batch_size)
model.fit(X, y, epochs=epochs, batch_size=batch_size)
# ## Elastic Model
"""
X = [
category_features_train,
train_distance
]
X_test = [
category_features_test,
test_distance
]
elastic_model = MultiTaskElasticNet(alpha=0.001, random_state=42, l1_ratio=0.5)
elastic_model.fit(X, y)
"""
test_preds = model.predict(X_test)
submission = pd.DataFrame(columns=list(ss.columns))
submission["qa_id"] = test["qa_id"]
submission[targets] = test_preds
submission.to_csv("submission.csv", index=False)
submission
# https://mccormickml.com/2019/05/14/BERT-word-embeddings-tutorial/
|
import pandas as pd
from pathlib import Path
datafolder = Path("/kaggle/input/gse62944-06-01-15-tcga")
cancerType = pd.read_table(
datafolder / "GSE62944_06_01_15_TCGA_24_CancerType_Samples.txt", header=None
)
cancerType
cancerType[1].unique()
luad = cancerType.loc[cancerType[1] == "LUAD"]
luad
lusc = cancerType.loc[cancerType[1] == "LUSC"]
lusc
normalType = pd.read_table(
datafolder / "GSE62944_06_01_15_TCGA_24_Normal_CancerType_Samples.txt", header=None
)
normalType
norm = normalType.loc[(normalType[1] == "LUSC") | (normalType[1] == "LUAD")]
norm
norm.loc[:, 1] = "NORM"
norm
lung_labels = pd.concat([norm, luad, lusc])
lung_labels = lung_labels[
lung_labels[0] != "TCGA-44-6144-11A-01R-1755-07"
] # removing this observation because it has no cancer tissue sample and no clinical covariate in dataset
lung_labels
lung_labels = lung_labels.reset_index(drop=True) # reset indexes
lung_labels
ids_ordered = list(lung_labels.loc[:, 0].values)
ids_ordered[:10]
cancerFC = pd.read_table(
datafolder / "GSM1536837_06_01_15_TCGA_24.tumor_Rsubread_FeatureCounts.txt",
sep="\t",
header=0,
index_col=0,
)
cancerFC
normalFC = pd.read_table(
datafolder / "GSM1697009_06_01_15_TCGA_24.normal_Rsubread_FeatureCounts.txt",
sep="\t",
header=0,
index_col=0,
)
normalFC
FC = pd.concat([cancerFC, normalFC], axis=1)
FC
lung_FC = FC.loc[:, ids_ordered] # reorder in same way
lung_FC
cancerClinVar = pd.read_table(
datafolder / "GSE62944_06_01_15_TCGA_24_548_Clinical_Variables_9264_Samples.txt",
sep="\t",
header=0,
index_col=0,
)
cancerClinVar
exploreVar = cancerClinVar.iloc[:, 2:]
exploreVar = exploreVar.T
exploreVar
exploreVar["histological_type"].value_counts()
lung_covariates = cancerClinVar
lung_covariates
lung_covariates.loc["tumor_tissue_site"]
lung_covariates.columns
for (
col_norm
) in (
normalFC.columns
): # adding clinical covariates of normal sample (that correspond to a cancer sample)
for col_cancer in lung_covariates.columns:
if col_norm[:12] == col_cancer[:12]: # it's the same case id / patient
lung_covariates[col_norm] = lung_covariates[col_cancer]
lung_covariates = lung_covariates.loc[:, ids_ordered]
lung_covariates
nan_mask = lung_covariates.isna()
# count the number of NaN values in each row
row_nans = nan_mask.sum(axis=1)
# sort the row-wise counts in ascending order and select the first 15 rows
top_rows = row_nans.sort_values()
top_rows[20:80]
# now getting relevant covariates for lung (with less nas)
lung_covariates = lung_covariates.loc[covariates_list,]
lung_covariates = lung_covariates.dropna(axis=1)
lung_covariates
# get new order_ids
final_ids = list(lung_covariates.columns)
lung_labels.set_index(0, inplace=True)
lung_labels
lung_FC = lung_FC.loc[:, final_ids]
lung_labels = lung_labels.loc[final_ids, :]
lung_covariates = lung_covariates.loc[:, final_ids]
# write to csv
lung_FC.to_csv("FC_expression.csv")
lung_labels.to_csv("labels.csv")
lung_covariates.to_csv("covariates.csv")
|
import pandas as pd
from pandas import datetime
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
df = pd.read_csv(
"../input/rossmann-store-sales/train.csv", parse_dates=["Date"], low_memory=False
)
# df = pd.read_csv('../input/rossmann-store-sales/train.csv', parse_dates = True, index_col = 'Date', low_memory = False)
df.head()
df["Date"] = pd.to_datetime(df["Date"], format="%Y-%m-%d")
# df['Hour'] = df.index.hour
# df['Day_of_Month'] = df.index.day
# df['Day_of_Week'] = df.index.dayofweek
# df['Month'] = df.index.month
df["Hour"] = df["Date"].dt.hour
df["Day_of_Month"] = df["Date"].dt.day
df["Day_of_Week"] = df["Date"].dt.dayofweek
df["Month"] = df["Date"].dt.month
print(df["Date"].min())
print(df["Date"].max())
# print(df.index.min())
# print(df.index.max())
test = pd.read_csv(
"../input/rossmann-store-sales/test.csv", parse_dates=True, low_memory=False
)
test.head()
test["Date"] = pd.to_datetime(test["Date"], format="%Y-%m-%d")
test["Hour"] = test["Date"].dt.hour
test["Day_of_Month"] = test["Date"].dt.day
test["Day_of_Week"] = test["Date"].dt.dayofweek
test["Month"] = test["Date"].dt.month
print(test["Date"].min())
print(test["Date"].max())
sns.lineplot(x=df.index, y="Sales", data=df)
resampled = df.resample("M").mean()
# resampled.head()
sns.lineplot(x=resampled.index, y="Sales", data=resampled)
sns.pointplot(x="Month", y="Sales", data=df)
sns.pointplot(x="Day_of_Week", y="Sales", data=df)
sns.countplot(x="Day_of_Week", hue="Open", data=df)
plt.title("Store Daily Open Countplot")
sns.pointplot(x="Day_of_Month", y="Sales", data=df)
df["SalesPerCustomer"] = df["Sales"] / df["Customers"]
df["SalesPerCustomer"].describe()
df.Open.value_counts()
np.sum([df["Sales"] == 0])
# drop closed stores and stores with zero sales
df = df[(df["Open"] != 0) & (df["Sales"] != 0)]
store = pd.read_csv("../input/rossmann-store-sales/store.csv")
store.head(30)
store.isnull().sum()
store["CompetitionDistance"] = store["CompetitionDistance"].fillna(
store["CompetitionDistance"].mean()
)
store["CompetitionOpenSinceMonth"] = store["CompetitionOpenSinceMonth"].fillna(
0
) # try 0
store["CompetitionOpenSinceYear"] = store["CompetitionOpenSinceYear"].fillna(0) # try 0
store["Promo2SinceWeek"] = store["Promo2SinceWeek"].fillna(0) # try 0
store["Promo2SinceYear"] = store["Promo2SinceYear"].fillna(0) # try 0
store["PromoInterval"] = store["PromoInterval"].fillna(
store["PromoInterval"].mode().iloc[0]
) # try 0
store.head()
# store['Assortment'] = store['Assortment'].map({'a':1 , 'b':2 , 'c':3})
# store['Assortment'] = store['Assortment'].astype(int)
df_store = pd.merge(df, store, how="inner", on="Store")
df_store.head()
df_store.groupby("StoreType")["Sales"].describe()
df_store.groupby("StoreType")["Customers", "Sales"].sum()
# sales trends
sns.catplot(
data=df_store,
x="Month",
y="Sales",
col="StoreType", # per store type in cols
palette="plasma",
hue="StoreType",
row="Promo", # per promo in the store in rows
color="c",
)
# customer trends
sns.catplot(
data=df_store,
x="Month",
y="Customers",
col="StoreType", # per store type in cols
palette="plasma",
hue="StoreType",
row="Promo", # per promo in the store in rows
color="c",
)
# sales per customer
sns.catplot(
data=df_store,
x="Month",
y="SalesPerCustomer",
col="StoreType", # per store type in cols
palette="plasma",
hue="StoreType",
row="Promo", # per promo in the store in rows
color="c",
)
sns.catplot(
data=df_store,
x="Month",
y="Sales",
col="DayOfWeek", # per store type in cols
palette="plasma",
hue="StoreType",
row="StoreType", # per store type in rows
color="c",
)
# stores open on sunday
df_store[(df_store.Open == 1) & (df_store.DayOfWeek == 7)]["Store"].unique()
sns.catplot(
data=df_store,
x="DayOfWeek",
y="Sales",
col="Promo",
row="Promo2",
hue="Promo2",
palette="RdPu",
)
# remove error message below -- TypeError: float() argument must be a string or a number, not 'Period'
pd.plotting.register_matplotlib_converters()
# preparation: input should be float type
df["Sales"] = df["Sales"] * 1.0
# store types
sales_a = df[df.Store == 2]["Sales"]
sales_b = df[df.Store == 85]["Sales"].sort_index(
ascending=True
) # solve the reverse order
sales_c = df[df.Store == 1]["Sales"]
sales_d = df[df.Store == 13]["Sales"]
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=(12, 13))
# store types
sales_a.resample("W").sum().plot(color="purple", ax=ax1)
sales_b.resample("W").sum().plot(color="orange", ax=ax2)
sales_c.resample("W").sum().plot(color="indigo", ax=ax3)
sales_d.resample("W").sum().plot(color="pink", ax=ax4)
from statsmodels.tsa.seasonal import seasonal_decompose
f, (ax1, ax2, ax3, ax4) = plt.subplots(4, figsize=(12, 13))
# monthly
decomposition_a = seasonal_decompose(sales_a, model="additive", freq=365)
decomposition_a.trend.plot(color="purple", ax=ax1)
decomposition_b = seasonal_decompose(sales_b, model="additive", freq=365)
decomposition_b.trend.plot(color="orange", ax=ax2)
decomposition_c = seasonal_decompose(sales_c, model="additive", freq=365)
decomposition_c.trend.plot(color="indigo", ax=ax3)
decomposition_d = seasonal_decompose(sales_d, model="additive", freq=365)
decomposition_d.trend.plot(color="pink", ax=ax4)
# autocorrelation
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
# figure for subplots
plt.figure(figsize=(12, 8))
# acf and pacf for A
plt.subplot(421)
plot_acf(sales_a, lags=50, ax=plt.gca(), color="purple")
plt.subplot(422)
plot_pacf(sales_a, lags=50, ax=plt.gca(), color="purple")
# acf and pacf for B
plt.subplot(423)
plot_acf(sales_b, lags=50, ax=plt.gca(), color="orange")
plt.subplot(424)
plot_pacf(sales_b, lags=50, ax=plt.gca(), color="orange")
# acf and pacf for C
plt.subplot(425)
plot_acf(sales_c, lags=50, ax=plt.gca(), color="indigo")
plt.subplot(426)
plot_pacf(sales_c, lags=50, ax=plt.gca(), color="indigo")
# acf and pacf for D
plt.subplot(427)
plot_acf(sales_d, lags=50, ax=plt.gca(), color="pink")
plt.subplot(428)
plot_pacf(sales_d, lags=50, ax=plt.gca(), color="pink")
plt.show()
df_store["StateHoliday"] = df_store["StateHoliday"].map(
{"0": 0, 0: 0, "a": 1, "b": 2, "c": 3}
)
df_store["StateHoliday"] = df_store["StateHoliday"].astype(int)
df_store["StoreType"] = df_store["StoreType"].map({"a": 1, "b": 2, "c": 3, "d": 4})
df_store["StoreType"] = df_store["StoreType"].astype(int)
df_store.isnull().sum()
df_store["Assortment"] = df_store["Assortment"].map({"a": 1, "b": 2, "c": 3})
df_store["Assortment"] = df_store["Assortment"].astype(int)
# df_store['PromoInterval'] = df_store['PromoInterval'].replace(np.nan, 'None')
df_store["PromoInterval"] = df_store["PromoInterval"].map(
{"Jan,Apr,Jul,Oct": 1, "Feb,May,Aug,Nov": 2, "Mar,Jun,Sept,Dec": 3}
)
df_store["PromoInterval"] = df_store["PromoInterval"].astype(int)
# df_store['PromoInterval'] = df_store['PromoInterval'].map({'None':0, 'Jan,Apr,Jul,Oct':1 , 'Feb,May,Aug,Nov':2 , 'Mar,Jun,Sept,Dec':3})
# df_store['PromoInterval'] = df_store['PromoInterval'].astype(int)
df_store.isnull().sum()
len(df_store)
test = pd.merge(test, store, how="inner", on="Store")
test.head()
test.isnull().sum()
test.fillna(method="ffill", inplace=True)
test["StateHoliday"] = test["StateHoliday"].map({"0": 0, 0: 0, "a": 1, "b": 2, "c": 3})
test["StateHoliday"] = test["StateHoliday"].astype(int)
test["StoreType"] = test["StoreType"].map({"a": 1, "b": 2, "c": 3, "d": 4})
test["StoreType"] = test["StoreType"].astype(int)
test["Assortment"] = test["Assortment"].map({"a": 1, "b": 2, "c": 3})
test["Assortment"] = test["Assortment"].astype(int)
test["PromoInterval"] = test["PromoInterval"].map(
{"Jan,Apr,Jul,Oct": 1, "Feb,May,Aug,Nov": 2, "Mar,Jun,Sept,Dec": 3}
)
test["PromoInterval"] = test["PromoInterval"].astype(int)
test = test.drop(["Id", "Date"], axis=1)
test_pred.head()
test.isnull().sum()
test_pred = gbrt.predict(test[X.columns])
test_pred_inv = np.exp(test_pred) - 1
# submission_predict = pd.DataFrame({'ID':test['Id'],'Sales':test_pred_inv})
# make submission df
prediction = pd.DataFrame(test_pred_inv)
submission = pd.read_csv("../input/rossmann-store-sales/sample_submission.csv")
prediction_df = pd.concat([submission["Id"], prediction], axis=1)
prediction_df.columns = ["Id", "Sales"]
prediction_df.to_csv("sample_submission.csv", index=False)
prediction_df.head()
# Machine Learning
# LSTM
X = df_store.drop(["Sales", "Date", "Customers"], 1)
# Transform Target Variable
y = np.log(df_store["Sales"] + 1)
from sklearn.model_selection import train_test_split
X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.30, random_state=1)
X_train.shape, X_val.shape, y_train.shape, y_val.shape
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor(max_depth=11)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_val)
y_pred_dt = np.exp(y_pred_dt) - 1
y_val = np.exp(y_val) - 1
from sklearn.metrics import r2_score, mean_squared_error
print(r2_score(y_val, y_pred_dt))
print(np.sqrt(mean_squared_error(y_val, y_pred_dt)))
from sklearn.ensemble import GradientBoostingRegressor
gbrt = GradientBoostingRegressor(max_depth=2, n_estimators=120, random_state=42)
gbrt.fit(X_train, y_train)
print(gbrt.score(X_train, y_train))
y_pred[y_pred < 0] = 0
y_pred = np.expm1(y_pred)
y_pred
from sklearn.metrics import r2_score, mean_squared_error
y_pred = gbrt.predict(X_val)
y_pred[y_pred < 0] = 0
y_pred_exp = np.exp(y_pred) - 1
y_val_exp = np.exp(y_val) - 1
print("R-squared:", r2_score(y_val_exp, y_pred_exp))
print("RMSE:", np.sqrt(mean_squared_error(y_val_exp, y_pred_exp)))
print("RMSPE", rmspe(y_val_exp, y_pred_exp))
# LSTM
import tensorflow as tf
from tensorflow import keras
model = keras.Sequential()
model.add(
keras.layers.Bidirectional(
keras.layers.LSTM(units=128, input_shape=(X_train.shape[1], X_train.shape[2]))
)
)
model.add(keras.layers.Dropout(rate=0.2))
model.add(keras.layers.Dense(units=1))
model.compile(loss="mean_squared_error", optimizer="adam")
history = model.fit(
X_train, y_train, epochs=30, batch_size=32, validation_split=0.1, shuffle=False
)
plt.plot(history.history["loss"], label="train")
plt.plot(history.history["val_loss"], label="test")
plt.legend()
y_pred = model.predict(X_test)
plt.plot(y_test.flatten(), marker=".", label="true")
plt.plot(y_pred.flatten(), "r", label="prediction")
plt.ylabel("Bike Count")
plt.xlabel("Time Step")
plt.legend()
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
#
# # Table of content
#
# * [1. Introduction](#1)
# - [Problem statement](#1.1)
# - [Data description](#1.2)
#
# * [2. Import libraries](#2)
#
# * [3. Basic Exploration](#3)
# - [Read dataset](#3.1)
# - [Some information](#3.2)
# - [Data transformation](#3.3)
# - [Data visualization](#3.4)
# * [4. Machine Learning model](#4)
#
# * [5 Conclusion](#5)
# * [6 Author Message](#6)
# # Import libraries
import pandas as pd
import numpy as np
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
from datetime import datetime
from dateutil.parser import parse
print("Setup Complete")
#
# # Basic Exploration
# Read dataset
def read_dataset():
file_path = "/kaggle/input/popular-video-games-1980-2023/games.csv"
data = pd.read_csv(file_path, index_col=0)
return data
data = read_dataset()
#
# Some information
data.head()
data.shape
data.info()
data.nunique()
data.duplicated().any()
#
# Data transformation
# >
# Missing data treatment
total_null = data.isnull().sum().sort_values(ascending=False)
percent = ((data.isnull().sum() / data.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total records = ", data.shape[0])
missing_data = pd.concat(
[total_null, percent.round(2)], axis=1, keys=["Total Missing", "In Percent"]
)
missing_data
data[["Rating", "Team", "Summary"]].info()
data["Rating"] = data["Rating"].replace(np.nan, 0.0)
data["Team"] = data["Team"].replace(np.nan, "Unknown Team")
data["Summary"] = data["Summary"].replace(np.nan, "Unknown Summary")
total_null = data.isnull().sum().sort_values(ascending=False)
percent = ((data.isnull().sum() / data.isnull().count()) * 100).sort_values(
ascending=False
)
print("Total records = ", data.shape[0])
missing_data = pd.concat(
[total_null, percent.round(2)], axis=1, keys=["Total Missing", "In Percent"]
)
missing_data
# >
# Clean the data
data.head()
data[
["Times Listed", "Number of Reviews", "Plays", "Playing", "Backlogs", "Wishlist"]
] = (
data[
[
"Times Listed",
"Number of Reviews",
"Plays",
"Playing",
"Backlogs",
"Wishlist",
]
]
.astype("str")
.replace("K", "")
)
data[["Times Listed", "Number of Reviews", "Plays", "Playing", "Backlogs", "Wishlist"]]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/the-holy-quran-verses-of-fasting-and-pilgrimage/Quran Verses of Fasting and Pilgrimage.csv",
sep=",",
encoding="latin-1",
)
df.sample(20)
df["Class"].value_counts()
from wordcloud import WordCloud, STOPWORDS
import matplotlib.pyplot as plt
import re
surah_text = {}
custom_stopwords = ["ye", "verily", "will", "said", "say", "us", "thy", "thee"]
for sw in custom_stopwords:
STOPWORDS.add(sw)
for i, row in df.iterrows():
surah_text[i] = row["Sura Baqara"]
print(surah_text)
Rest_verses = df.query("Class=='Rest_verses'")["Sura Baqara"]
Pilgrimage = df.query("Class=='Pilgrimage'")["Sura Baqara"]
Fasting = df.query("Class=='Fasting'")["Sura Baqara"]
def wc(data, bgcolor):
plt.figure(figsize=(10, 10))
wc = WordCloud(background_color=bgcolor, max_words=100, stopwords=STOPWORDS)
wc.generate(" ".join(data))
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.show()
wc(Rest_verses, "white")
wc(Pilgrimage, "yellow")
wc(Fasting, "blue")
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Import Dependecies
import pandas as pd
import numpy as np
from sklearn.model_selection import TimeSeriesSplit, ParameterGrid
from sklearn.metrics import mean_absolute_percentage_error
import matplotlib.pyplot as plt
import seaborn as sns
import os
import xgboost
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
color_pal = sns.color_palette()
plt.style.use("fivethirtyeight")
def get_date_range(df):
data_range_df = pd.DataFrame()
for i in df.reset_index().Tarih.tolist():
data_range = pd.date_range(start=i, end=i + pd.Timedelta("23 hours"), freq="H")
data_range_df = pd.concat([data_range_df, pd.DataFrame(data_range)], axis=0)
data_range_df.rename(columns={0: "Tarih"}, inplace=True)
data_range_df.reset_index(drop=True, inplace=True)
data_range_df["downtime"] = 1
data_range_df = data_range_df.set_index("Tarih")
return data_range_df
def get_calendar_range(df):
calendar_range_df = pd.DataFrame()
for _, i in df.iterrows():
calendar_range = pd.date_range(
start=i.CALENDAR_DATE,
end=i.CALENDAR_DATE + pd.Timedelta("23 hours"),
freq="H",
)
df_cal = pd.DataFrame(calendar_range)
df_cal["RAMADAN_FLAG"] = i.RAMADAN_FLAG
df_cal["WEEKEND_FLAG"] = i.WEEKEND_FLAG
df_cal["RELIGIOUS_DAY_FLAG_SK"] = i.RELIGIOUS_DAY_FLAG_SK
df_cal["NATIONAL_DAY_FLAG_SK"] = i.NATIONAL_DAY_FLAG_SK
df_cal["PUBLIC_HOLIDAY_FLAG"] = i.PUBLIC_HOLIDAY_FLAG
calendar_range_df = pd.concat([calendar_range_df, df_cal], axis=0)
calendar_range_df.rename(columns={0: "CALENDAR_DATE"}, inplace=True)
calendar_range_df["WEEKEND_FLAG"] = calendar_range_df["WEEKEND_FLAG"].replace(
{"N": 0, "Y": 1}
)
calendar_range_df["RAMADAN_FLAG"] = calendar_range_df["RAMADAN_FLAG"].replace(
{"N": 0, "Y": 1}
)
calendar_range_df["PUBLIC_HOLIDAY_FLAG"] = calendar_range_df[
"PUBLIC_HOLIDAY_FLAG"
].replace({"N": 0, "Y": 1})
calendar_range_df = calendar_range_df.set_index("CALENDAR_DATE")
return calendar_range_df
df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/train.csv")
med_df = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/med.csv")
df_sub = pd.read_csv("/kaggle/input/gdz-elektrik-datathon-2023/sample_submission.csv")
calendar = pd.read_csv("/kaggle/input/turkish-calendar/Turkish calendar.csv", sep=";")
calendar["CALENDAR_DATE"] = pd.to_datetime(calendar["CALENDAR_DATE"])
med_df["downtime"] = 1
df = df.set_index("Tarih")
med_df = med_df.set_index("Tarih")
df_sub = df_sub.set_index("Tarih")
df.index = pd.to_datetime(df.index)
med_df.index = pd.to_datetime(med_df.index)
df_sub.index = pd.to_datetime(df_sub.index)
med_df = get_date_range(med_df)
calendar_piece = calendar[
[
"CALENDAR_DATE",
"RAMADAN_FLAG",
"WEEKEND_FLAG",
"RELIGIOUS_DAY_FLAG_SK",
"NATIONAL_DAY_FLAG_SK",
"PUBLIC_HOLIDAY_FLAG",
]
]
calendar_piece = calendar_piece[
(calendar_piece["CALENDAR_DATE"] >= df.index[0])
& (calendar_piece["CALENDAR_DATE"] <= df_sub.index[-1])
]
calendar_piece = get_calendar_range(calendar_piece)
df.plot(style=".", figsize=(15, 5), color=color_pal[0], title="Dağıtılan Enerji (MWh)")
plt.show()
tscv = TimeSeriesSplit(n_splits=5)
fig, ax = plt.subplots(5, 1, figsize=(15, 12))
fig.subplots_adjust(bottom=0.2)
for i, (train_index, test_index) in enumerate(tscv.split(df)):
train_part = df.iloc[train_index]
test_part = df.iloc[test_index]
print(f"Fold {i}:")
print(f" Train: index={train_index}")
print(f" Test: index={test_index}")
train_part.plot(
ax=ax[i], label="Training Set", title="Time Series Train/Test Split"
)
test_part.plot(ax=ax[i], label="Test Set")
plt.tight_layout()
plt.show()
# # Feature Engineering
def get_time_features(df):
df = df.copy()
df["hour"] = df.index.hour
df["dayofweek"] = df.index.dayofweek.astype("int32")
df["quarter"] = df.index.quarter
df["month"] = df.index.month
df["year"] = df.index.year
df["dayofyear"] = df.index.dayofyear
df["dayofmonth"] = df.index.day
df["weekofyear"] = df.index.isocalendar().week.astype("int32")
return df
def add_lags(df):
target_map = df["Dağıtılan Enerji (MWh)"].to_dict()
df["lag1"] = (df.index - pd.Timedelta("30 days")).map(target_map)
df["lag2"] = (df.index - pd.Timedelta("60 days")).map(target_map)
df["lag3"] = (df.index - pd.Timedelta("120 days")).map(target_map)
df["lag4"] = (df.index - pd.Timedelta("360 days")).map(target_map)
df["lag5"] = (df.index - pd.Timedelta("720 days")).map(target_map)
df["lag6"] = (df.index - pd.Timedelta("1080 days")).map(target_map)
return df
df_all = df.join(calendar_piece)
df_all = df_all.join(med_df)
df_all = get_time_features(df_all)
df_all = add_lags(df_all)
# df_all[["movave_7", "movstd_7"]] = df_all['Dağıtılan Enerji (MWh)'].rolling(7).agg([np.mean, np.std])
# df_all[["movave_30", "movstd_30"]] = df_all['Dağıtılan Enerji (MWh)'].rolling(30).agg([np.mean, np.std])
# df_all[["movave_90", "movstd_90"]] = df_all['Dağıtılan Enerji (MWh)'].rolling(90).agg([np.mean, np.std])
# df_all[["movave_365", "movstd_365"]] = df_all['Dağıtılan Enerji (MWh)'].rolling(365).agg([np.mean, np.std])
# df_all_rolling = df_all['Dağıtılan Enerji (MWh)'].rolling(window=90)
# df_all['q10'] = df_all_rolling.quantile(0.1).to_frame("q10")
# df_all['q50'] = df_all_rolling.quantile(0.5).to_frame("q50")
# df_all['q90'] = df_all_rolling.quantile(0.9).to_frame("q90")
# mean = np.mean(df_all['Dağıtılan Enerji (MWh)'].values)
# std = np.std(df_all['Dağıtılan Enerji (MWh)'].values)
# df_all["target"] = df_all['Dağıtılan Enerji (MWh)'].add(-mean).div(std)
df_all = df_all.fillna(df_all.mean())
df_all.tail()
# features = []
# corr_features=[]
# targets = []
# tau = 30 #forecasting periods
# for t in range(1, tau+1):
# data["target_t" + str(t)] = data.target.shift(-t)
# targets.append("target_t" + str(t))
# for t in range(1,31):
# data["feat_ar" + str(t)] = data.target.shift(t)
# #data["feat_ar" + str(t) + "_lag1y"] = data.target.shift(350)
# features.append("feat_ar" + str(t))
# #corr_features.append("feat_ar" + str(t))
# #features.append("feat_ar" + str(t) + "_lag1y")
# for t in [7, 14, 30]:
# data[["feat_movave" + str(t), "feat_movstd" + str(t), "feat_movmin" + str(t) ,"feat_movmax" + str(t)]] = data.energy.rolling(t).agg([np.mean, np.std, np.max, np.min])
# features.append("feat_movave" + str(t))
# #corr_features.append("feat_movave" + str(t))
# features.append("feat_movstd" + str(t))
# features.append("feat_movmin" + str(t))
# features.append("feat_movmax" + str(t))
# months = pd.get_dummies(data.mon,
# prefix="mon",
# drop_first=True)
# months.index = data.index
# data = pd.concat([data, months], axis=1)
# days = pd.get_dummies(data.day,
# prefix="day",
# drop_first=True)
# days.index = data.index
# data = pd.concat([data, days], axis=1)
# features = features + months.columns.values.tolist() + days.columns.values.tolist()
# corr_features = ["feat_ar1", "feat_ar2", "feat_ar3", "feat_ar4", "feat_ar5", "feat_ar6", "feat_ar7", "feat_movave7", "feat_movave14", "feat_movave30"]
# corr = data[["target_t1"] + corr_features].corr()
# top5_mostCorrFeats = corr["target_t1"].apply(abs).sort_values(ascending=False).index.values[:6]
# # Plot heatmap of correlation matrix
# sns.heatmap(corr, annot=True)
# plt.title("Pearson Correlation with 1 period target")
# plt.yticks(rotation=0); plt.xticks(rotation=90) # fix ticklabel directions
# plt.tight_layout() # fits plot area to the plot, "tightly"
# plt.show() # show the plot
# # Parameters Optimization
# # Model Training
tss = TimeSeriesSplit(n_splits=5)
df_all = df_all.sort_index()
preds = []
scores = []
for train_idx, val_idx in tss.split(df_all):
train = df_all.iloc[train_idx]
test = df_all.iloc[val_idx]
# FEATURES = ['hour', 'dayofweek', 'quarter', 'month',
# 'year', 'dayofyear', 'dayofmonth', 'weekofyear', 'downtime', 'lag1', 'lag2', 'lag3', 'lag4',
# 'RAMADAN_FLAG', 'WEEKEND_FLAG','RELIGIOUS_DAY_FLAG_SK',
# 'NATIONAL_DAY_FLAG_SK', 'PUBLIC_HOLIDAY_FLAG']
FEATURES = [
"RAMADAN_FLAG",
"WEEKEND_FLAG",
"RELIGIOUS_DAY_FLAG_SK",
"NATIONAL_DAY_FLAG_SK",
"PUBLIC_HOLIDAY_FLAG",
"downtime",
"hour",
"dayofweek",
"quarter",
"month",
"year",
"dayofyear",
"dayofmonth",
"weekofyear",
"lag1",
"lag2",
"lag3",
"lag4",
"lag5",
"lag6",
]
# 'q10', 'q50', 'q90', 'movave_90', 'movstd_90', 'movave_365', 'movstd_365',] #'movave_7', 'movstd_7', 'movave_30', 'movstd_30',
TARGET = "Dağıtılan Enerji (MWh)"
X_train = train[FEATURES]
y_train = train[TARGET].tolist()
X_test = test[FEATURES]
y_test = test[TARGET].tolist()
reg = xgboost.XGBRegressor(
base_score=0.5,
booster="gbtree",
n_estimators=600,
early_stopping_rounds=15,
objective="reg:squarederror",
max_depth=6,
colsample_bylevel=0.5,
learning_rate=0.017,
)
reg.fit(
X_train, y_train, eval_set=[(X_train, y_train), (X_test, y_test)], verbose=100
)
# rf_model = RandomForestRegressor(n_estimators=100, random_state=42)
# rf_model.fit(X_train, y_train)
y_pred = reg.predict(X_test).tolist()
preds.append(y_pred)
score = mean_absolute_percentage_error(y_test, y_pred)
scores.append(score)
print(f"Score across folds {np.mean(scores):0.4f}")
print(f"Fold scores:{scores}")
xgboost.plot_importance(reg)
plt.figure(figsize=(16, 12))
plt.show()
df_sub["isFuture"] = True
df["isFuture"] = False
df_and_future = pd.concat([df, df_sub])
df_and_future = df_and_future.join(calendar_piece)
df_and_future["downtime"] = 1.0
df_and_future = get_time_features(df_and_future)
df_and_future = add_lags(df_and_future)
# df_and_future[["movave_7", "movstd_7"]] = df_and_future['Dağıtılan Enerji (MWh)'].rolling(7).agg([np.mean, np.std])
# df_and_future[["movave_30", "movstd_30"]] = df_and_future['Dağıtılan Enerji (MWh)'].rolling(30).agg([np.mean, np.std])
# df_and_future[["movave_90", "movstd_90"]] = df_and_future['Dağıtılan Enerji (MWh)'].rolling(90).agg([np.mean, np.std])
# df_and_future[["movave_365", "movstd_365"]] = df_and_future['Dağıtılan Enerji (MWh)'].rolling(365).agg([np.mean, np.std])
# df_and_future_rolling = df_and_future['Dağıtılan Enerji (MWh)'].rolling(window=90)
# df_and_future['q10'] = df_and_future_rolling.quantile(0.1).to_frame("q10")
# df_and_future['q50'] = df_and_future_rolling.quantile(0.5).to_frame("q50")
# df_and_future['q90'] = df_and_future_rolling.quantile(0.9).to_frame("q90")
# mean = np.mean(df_all['Dağıtılan Enerji (MWh)'].values)
# std = np.std(df_all['Dağıtılan Enerji (MWh)'].values)
# df_and_future["target"] = df_and_future['Dağıtılan Enerji (MWh)'].add(-mean).div(std)
df_and_future = df_and_future.fillna(df_and_future.median())
df_and_future.head()
future_w_features = df_and_future.query("isFuture").copy()
future_w_features["pred"] = reg.predict(future_w_features[FEATURES])
future_w_features["pred"].plot(
figsize=(10, 5), color=color_pal[4], ms=1, lw=1, title="Future Predictions"
)
plt.show()
sample_submission_df = (
future_w_features["pred"]
.reset_index()
.rename(columns={"pred": "Dağıtılan Enerji (MWh)"})
)
sample_submission_df.to_csv("sample_submission.csv", index=False)
sample_submission_df
df_sub.drop(["isFuture"], axis=1).reset_index()
|
# # Exploratory Data Analysis - GDZ Elektrik Datathon 2023
# Datahackaton kapsamında elektrik dağıtımını analizini train veri setini kullanarak bazı sorulara cevap olacak şekilde görselleştirmeye çalıştım. Hangi sorulara cevap verdiğini aşağıdaki içindekiler kısmında öğrenebilirsiniz. Notebook hakkındaki önerilerinizi ve düşüncelerinizi kendimi geliştirmek amacıyla paylaşırsanız çok sevinirim. :)
# - İlham Aldığım Notebook'lar:
# - [dönemsel olayların ve veri setinin ilişkisi](https://www.kaggle.com/code/ilyassddemir/d-nemsel-olaylar-n-ve-veri-setinin-ili-kisi)
# - [Gdz Elektrik Datathon 2023 - EDA](https://www.kaggle.com/code/gunesevitan/gdz-elektrik-datathon-2023-eda)
# ## İçindekiler
# ##### 1. Kütüphaneler
# ##### 2. Veri Hazırlama
# ##### 3. Yıla Aya Ve Mevsime Göre Elektrik Dağıtım Ortalaması
# ##### 4. Elektrik Dağıtımının Hafta İçi/Sonu, Mevsim ve Saatlere Göre Değişimi
# ##### 5. Saatlik Enerji Dağılımının Mevsime ve Hafta İçi/Sonu Olmasına Etkisi
# ##### 6. Türkiye İçin Olan Özel Günlerin Elektrik Dağıtımına Etkisi
# ##### 7. Elektrik Kesintisinin Kabul Edilebilir Seviyeyi Açtığı Günlerde Kesintilerin Elektrik Dağıtımına Etkisi
# ### Kütüphaneler
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### Veri Hazırlama
data = pd.read_csv(
"/kaggle/input/gdz-elektrik-datathon-2023/train.csv",
parse_dates=["DateTime"],
index_col="DateTime",
header=0,
names=["DateTime", "Distributed Energy(MWh)"],
)
data.head()
# Prepared Major Event Day
medData = pd.read_csv(
"/kaggle/input/gdz-elektrik-datathon-2023/med.csv", header=0, names=["StartDate"]
)
medData["EndDate"] = medData["StartDate"]
medData["StartDate"] = medData["StartDate"] + " " + "00:00:00"
medData["EndDate"] = medData["EndDate"] + " " + "23:59:59"
med_dates = medData.values.tolist()
# Add year, month, day and hour column
data["Year"] = data.index.year
data["Month"] = data.index.month
data["Day"] = data.index.day
data["Hour"] = data.index.hour
# Add season column
def addSeasonColumn(year):
if year in [12, 1, 2]:
return "Winter"
elif year in [3, 4, 5]:
return "Spring"
elif year in [6, 7, 8]:
return "Summer"
elif year in [9, 10, 11]:
return "Fall"
data["Season"] = data["Month"].apply(addSeasonColumn)
# Add Weekday/Weekend columns
dayofWeek = data.index.day_name()
weekday_weekend = []
for day in dayofWeek:
if day in ["Saturday", "Sunday"]:
weekday_weekend.append("Weekend")
else:
weekday_weekend.append("Weekday")
data["Weekday/Weekend"] = weekday_weekend
# Annual Average of Distributed Energy
yearlyMean = data.groupby([data.index.year])["Distributed Energy(MWh)"].mean()
yearlyMean.index.names = ["Year"]
yearlyMean = yearlyMean.reset_index()
# Distributed Energy Average according to year and month
monthlyMean = data.groupby([data.index.year, data.index.month_name()])[
"Distributed Energy(MWh)"
].mean()
monthlyMean.index.names = ["Year", "Month"]
monthlyMean = monthlyMean.reset_index()
# Seasonally Average of Distributed Energy
seasonMean = data.groupby([data.index.year, data["Season"]])[
"Distributed Energy(MWh)"
].mean()
seasonMean.index.names = ["Year", "Season"]
seasonMean = seasonMean.reset_index()
# Special Dates in Turkey
special_dates = [
("2018-05-16 00:00:00", "2018-06-14 23:59:59"),
("2019-05-06 00:00:00", "2019-06-03 23:59:59"),
("2020-04-24 00:00:00", "2020-05-23 23:59:59"),
("2021-04-13 00:00:00", "2021-05-12 23:59:59"),
("2022-04-02 00:00:00", "2022-05-01 23:59:59"),
("2018-01-01 00:00:00", "2018-01-01 23:59:59"),
("2018-04-23 00:00:00", "2018-04-23 23:59:59"),
("2018-05-01 00:00:00", "2018-05-01 23:59:59"),
("2018-05-19 00:00:00", "2018-05-19 23:59:59"),
("2018-06-15 00:00:00", "2018-06-15 23:59:59"),
("2018-06-16 00:00:00", "2018-06-16 23:59:59"),
("2018-06-17 00:00:00", "2018-06-17 23:59:59"),
("2018-07-15 00:00:00", "2018-07-15 23:59:59"),
("2018-08-21 00:00:00", "2018-08-21 23:59:59"),
("2018-08-22 00:00:00", "2018-08-22 23:59:59"),
("2018-08-23 00:00:00", "2018-08-23 23:59:59"),
("2018-08-24 00:00:00", "2018-08-24 23:59:59"),
("2018-08-30 00:00:00", "2018-08-30 23:59:59"),
("2018-10-29 00:00:00", "2018-10-29 23:59:59"),
("2019-01-01 00:00:00", "2019-01-01 23:59:59"),
("2019-04-23 00:00:00", "2019-04-23 23:59:59"),
("2019-05-01 00:00:00", "2019-05-01 23:59:59"),
("2019-05-19 00:00:00", "2019-05-19 23:59:59"),
("2019-06-04 00:00:00", "2019-06-04 23:59:59"),
("2019-06-05 00:00:00", "2019-06-05 23:59:59"),
("2019-06-06 00:00:00", "2019-06-06 23:59:59"),
("2019-07-15 00:00:00", "2019-07-15 23:59:59"),
("2019-08-11 00:00:00", "2019-08-11 23:59:59"),
("2019-08-12 00:00:00", "2019-08-12 23:59:59"),
("2019-08-13 00:00:00", "2019-08-13 23:59:59"),
("2019-08-14 00:00:00", "2019-08-14 23:59:59"),
("2019-08-30 00:00:00", "2019-08-30 23:59:59"),
("2019-10-29 00:00:00", "2019-10-29 23:59:59"),
("2020-01-01 00:00:00", "2020-01-01 23:59:59"),
("2020-04-23 00:00:00", "2020-04-23 23:59:59"),
("2020-05-01 00:00:00", "2020-05-01 23:59:59"),
("2020-05-19 00:00:00", "2020-05-19 23:59:59"),
("2020-05-24 00:00:00", "2020-05-24 23:59:59"),
("2020-05-25 00:00:00", "2020-05-25 23:59:59"),
("2020-05-26 00:00:00", "2020-05-26 23:59:59"),
("2020-07-15 00:00:00", "2020-07-15 23:59:59"),
("2020-07-31 00:00:00", "2020-07-31 23:59:59"),
("2020-08-01 00:00:00", "2020-08-01 23:59:59"),
("2020-08-02 00:00:00", "2019-08-02 23:59:59"),
("2020-08-03 00:00:00", "2019-08-03 23:59:59"),
("2020-08-30 00:00:00", "2020-08-30 23:59:59"),
("2020-10-29 00:00:00", "2020-10-29 23:59:59"),
("2021-01-01 00:00:00", "2021-01-01 23:59:59"),
("2021-04-23 00:00:00", "2021-04-23 23:59:59"),
("2021-05-01 00:00:00", "2021-05-01 23:59:59"),
("2021-05-13 00:00:00", "2021-05-13 23:59:59"),
("2021-05-14 00:00:00", "2021-05-14 23:59:59"),
("2021-05-15 00:00:00", "2021-05-15 23:59:59"),
("2021-05-19 00:00:00", "2021-05-19 23:59:59"),
("2021-07-15 00:00:00", "2021-07-15 23:59:59"),
("2021-07-20 00:00:00", "2021-07-20 23:59:59"),
("2021-07-21 00:00:00", "2021-07-21 23:59:59"),
("2021-07-22 00:00:00", "2021-07-22 23:59:59"),
("2021-07-23 00:00:00", "2021-07-23 23:59:59"),
("2021-08-30 00:00:00", "2021-08-30 23:59:59"),
("2021-10-29 00:00:00", "2021-10-29 23:59:59"),
("2022-01-01 00:00:00", "2022-01-01 23:59:59"),
("2022-04-23 00:00:00", "2022-04-23 23:59:59"),
("2022-05-01 00:00:00", "2022-05-01 23:59:59"),
("2022-05-02 00:00:00", "2022-05-02 23:59:59"),
("2022-05-03 00:00:00", "2022-05-03 23:59:59"),
("2022-05-04 00:00:00", "2022-05-04 23:59:59"),
("2022-05-19 00:00:00", "2022-05-19 23:59:59"),
("2022-07-09 00:00:00", "2022-07-09 23:59:59"),
("2022-07-10 00:00:00", "2022-07-10 23:59:59"),
("2022-07-11 00:00:00", "2022-07-11 23:59:59"),
("2022-07-12 00:00:00", "2022-07-12 23:59:59"),
("2022-07-15 00:00:00", "2022-07-15 23:59:59"),
("2022-08-30 00:00:00", "2022-08-30 23:59:59"),
("2022-10-29 00:00:00", "2022-10-29 23:59:59"),
]
# Annual Distributed Energy
def plot_annual_energy_distribution(df, dates, year):
df_year = df[df["Year"] == year]
df_dates = pd.DataFrame()
for date_range in dates:
start_date, end_date = pd.to_datetime(date_range[0]), pd.to_datetime(
date_range[1]
)
df_dates = pd.concat(
[
df_dates,
df_year.loc[
(df_year.index >= start_date) & (df_year.index <= end_date)
],
]
)
plt.figure(figsize=(20, 6))
plt.plot(df_year.index, df_year["Distributed Energy(MWh)"], color="orange")
plt.scatter(df_dates.index, df_dates["Distributed Energy(MWh)"], color="red")
plt.xlabel("Datetime")
plt.ylabel("Distributed Energy(MWh)")
plt.title(f"{year} Year Energy Distribution (Special Days are Red)")
plt.show()
# ### Yıla Aya Ve Mevsime Göre Elektrik Dağıtım Ortalaması
fig, ax = plt.subplots(figsize=(15, 5))
sns.barplot(x="Year", y="Distributed Energy(MWh)", data=yearlyMean)
plt.title("Change of Electricity Distribution by Years")
plt.show()
fig, ax = plt.subplots(figsize=(25, 10))
sns.barplot(x="Month", y="Distributed Energy(MWh)", hue="Year", data=monthlyMean)
plt.title("Distributed Energy Average according to year and month")
plt.show()
fig, ax = plt.subplots(figsize=(25, 10))
sns.barplot(x="Season", y="Distributed Energy(MWh)", hue="Year", data=seasonMean)
plt.title("Seasonally Average of Distributed Energy")
plt.show()
# ## Elektrik Dağıtımının Hafta İçi/Sonu, Mevsim ve Saatlere Göre Değişimi
for i in data.columns[4:]:
fig, ax = plt.subplots(figsize=(25, 10))
sns.scatterplot(data=data, x=data.index, y="Distributed Energy(MWh)", hue=i)
plt.title(i + " - hourly energy flow ")
# ### Saatlik Enerji Dağılımının Mevsime ve Hafta İçi/Sonu Olmasına Etkisi
for i in data.columns[5:]:
data.groupby([i, "Hour"])["Distributed Energy(MWh)"].mean().unstack(level=0).plot(
figsize=(12, 8)
)
plt.title(i + "- hourly energy distrubition")
plt.xlabel("Hour")
plt.ylabel("Average Distributed Energy (MWh)")
plt.show()
# ### Türkiye İçin Olan Özel Günlerin Elektrik Dağıtımına Etkisi
for i in data.Year.unique():
plot_annual_energy_distribution(data, special_dates, i)
# ### Elektrik Kesintisinin Kabul Edilebilir Seviyeyi Açtığı Günlerde Kesintilerin Elektrik Dağıtımına Etkisi
for i in data.Year.unique():
plot_annual_energy_distribution(data, med_dates, i)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import cv2
import os
from fastai.vision import *
from fastai.metrics import error_rate
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
train = pd.read_csv("../input/digit-recognizer/train.csv", encoding="latin1")
test = pd.read_csv("../input/digit-recognizer/test.csv")
sub = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
train.tail()
class CustomImageList(ImageList):
def open(self, fn):
img = fn.reshape(28, 28)
img = np.stack((img,) * 3, axis=-1)
return Image(pil2tensor(img, dtype=np.float32))
@classmethod
def from_csv_custom(
cls,
path: PathOrStr,
csv_name: str,
imgIdx: int = 1,
header: str = "infer",
**kwargs
) -> "ItemList":
df = pd.read_csv(Path(path) / csv_name, header=header)
res = super().from_df(df, path=path, cols=0, **kwargs)
res.items = df.iloc[:, imgIdx:].apply(lambda x: x.values / 255.0, axis=1).values
return res
tfms = get_transforms(do_flip=False)
data = (
CustomImageList.from_csv_custom(
path="../input/digit-recognizer/", csv_name="train.csv", imgIdx=1
)
.split_by_rand_pct(0.2)
.label_from_df(cols="label")
.add_test(test, label=0)
.transform(tfms)
.databunch(bs=128, num_workers=0)
.normalize(imagenet_stats)
)
data.show_batch(rows=3, figsize=(5, 5))
learn = cnn_learner(
data, models.resnet50, metrics=accuracy, model_dir="/kaggle/working/models"
)
learn.lr_find()
learn.recorder.plot()
|
# # Playground Series - Season 3, Episode 12
# ## Binary Classification with a Kidney Stone Prediction Dataset
# ### Goal
# For each id in the test set, you must predict the probability of target (likelihood of the presence of a kidney stone).
# ## Import libraries
import pandas as pd
import matplotlib.pyplot as plt
# # Read the Data
# Read the data from CSV files and set the index from `id` column.
train_df = pd.read_csv(
"/kaggle/input/playground-series-s3e12/train.csv", index_col="id"
)
test_df = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv", index_col="id")
# # EDA (Exploratory Data Analysis)
train_df.sample(10)
train_df.describe()
train_df.info()
test_df.describe()
# ## Available features
# - gravity
# - ph
# - osmo
# - cond
# - urea
# - calc
rows_in_training_data = train_df.shape[0]
rows_in_testing_data = test_df.shape[0]
print(f"Rows in training data: {rows_in_training_data}.")
print(f"Rows in testing data: {rows_in_testing_data}.")
print(f"Total data: {rows_in_training_data + rows_in_testing_data}")
null_columns_in_train_df = train_df.columns[train_df.isnull().any()]
null_columns_in_test_df = test_df.columns[test_df.isnull().any()]
print(f"There are {len(null_columns_in_train_df)} null columns in train data.")
print(f"There are {len(null_columns_in_test_df)} null columns in test data.")
# distributions for all variables (exclude target)
for i in test_df.columns:
plt.hist(train_df[i])
plt.title(i)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import random
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from torchvision import datasets, transforms
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import Dense, BatchNormalization
import torch
import torchvision
import torchvision.transforms as transforms
dest = "/kaggle/input/data12"
tr = transforms.Compose([transforms.Resize((224, 224)), transforms.ToTensor()])
classes = ("vehicles", "dogs", "food")
inputs = datasets.ImageFolder(dest, transform=tr)
tl = int(0.20 * len(inputs))
el = int(0.80 * len(inputs))
train_d, test_d = torch.utils.data.random_split(inputs, [tl, el])
tl = int(0.60 * len(inputs))
el = int(0.40 * len(inputs))
train_d, test_d = torch.utils.data.random_split(inputs, [tl, el])
t_loader = torch.utils.data.DataLoader(train_d, batch_size=32, shuffle=True)
e_loader = torch.utils.data.DataLoader(test_d, batch_size=32, shuffle=True)
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
class AlexNet(nn.Module):
def __init__(self, num_classes=1000):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(64, 192, kernel_size=5, padding=2),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
nn.Conv2d(192, 384, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2),
)
self.avgpool = nn.AdaptiveAvgPool2d((6, 6))
self.classifier = nn.Sequential(
nn.Dropout(),
nn.Linear(256 * 6 * 6, 4096),
nn.ReLU(inplace=True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Linear(4096, num_classes),
)
def forward(self, x):
x = self.features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
import torch.optim as optim
import time
# Define the number of epochs
num_epochs = 30
batch_size = 32
# Initialize the model
model = AlexNet(num_classes=10)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model.to(device)
# Define the loss function
criterion = nn.CrossEntropyLoss()
# Define the optimizer
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Define the learning rate scheduler
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)
# Start the timer
start_time = time.time()
# Train the model
for epoch in range(num_epochs):
print("Epoch:", epoch + 1)
running_loss = 0.0
for i, data in enumerate(t_loader, 0):
inputs, labels = data
inputs, labels = inputs.to(device), labels.to(device)
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 200 == 199: # print every 200 mini-batches
print("[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 200))
running_loss = 0.0
# Update the learning rate
scheduler.step()
# End the timer
end_time = time.time()
print("Finished Training")
print("Total Training Time:", end_time - start_time, "seconds")
# Save the trained model
PATH = "my_model.pth"
torch.save(model.state_dict(), PATH)
# Load the saved model
model = AlexNet(num_classes=10)
model.load_state_dict(torch.load("my_model.pth"))
correct = 0
total = 0
with torch.no_grad():
for i, data in enumerate(e_loader, 0):
images, labels = data
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
if i % 100 == 99: # print every 100 mini-batches
print("Running accuracy: %d %%" % (100 * correct / total))
print("Accuracy of the network on the test images: %d %%" % (100 * correct / total))
|
import gensim
from gensim.models import Word2Vec
# Preprocess the text
sentences = [
["cat", "sat", "mat"],
["dog", "chased", "cat"],
["cat", "meowed"],
["dog", "barked"],
]
# Train the Word2Vec model
model = Word2Vec(sentences, size=5, window=2, min_count=1, workers=4)
# Get the embedding for the word 'cat'
cat_embedding = model["cat"]
print(cat_embedding)
import gensim
from gensim.models import Word2Vec
# define a small corpus of text data
sentences = [
["the", "cat", "sat", "on", "the", "mat"],
["the", "dog", "chased", "the", "cat"],
]
# train a Word2Vec model on the corpus
model = Word2Vec(sentences, size=100, window=5, min_count=1, workers=4)
# get the vector representation of a word
vector1 = model.wv["cat"]
print(vector1)
vector2 = model.wv["the"]
print(vector2)
from sklearn.metrics.pairwise import cosine_similarity
import numpy as np
similarity_score = cosine_similarity(vector1.reshape(1, -1), vector2.reshape(1, -1))[
0, 0
]
# Print the similarity score
print("Similarity score:", similarity_score)
# find the most similar words to a given word
similar_words = model.wv.most_similar("dog")
print(similar_words)
|
# # Image Clustering
# Image clustering, is the process of grouping similar images into clusters or categories based on some similarity criteria. The goal of image clustering is to organize a large set of images into meaningful groups so that they can be easily analyzed or searched. Common methods for image clustering include hierarchical clustering, k-means clustering, and spectral clustering.
# If you find this notebook helpful please upvote.
# # Import necessary libraries
import matplotlib.pyplot as plt
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import img_to_array
from sklearn.cluster import KMeans
import pandas as pd
import numpy as np
from tqdm import tqdm
import os
import shutil
# # Extract features using Inception-V3
# This code defines a function called `image_feature` that takes a directory (or a list of filenames) as input. The function uses the `InceptionV3` model to extract features from each image in the directory.
# - The `InceptionV3` model is loaded from the `Keras` library with the weights parameter set to `'imagenet'` and the `include_top` parameter set to False, which means that the fully connected layers at the top of the network (which are responsible for classification) will be excluded.
# - Two empty lists `features` and `img_name` are defined to store the extracted features and image filenames, respectively.
# - The function loops over each file in the directory, and for each file:
# - The image is loaded using the image.load_img() function from the Keras library. The target size of the image is set to (224, 224), which is the input size expected by the InceptionV3 model.
# - The image is converted to a numpy array using the img_to_array() function from the Keras library.
# - The numpy array is expanded to a 4-dimensional array using the np.expand_dims() function from the numpy library.
# - The image is preprocessed using the preprocess_input() function from the Keras library. This function performs mean normalization on the image to prepare it for input to the InceptionV3 model.
# - The InceptionV3 model is used to extract features from the preprocessed image using the model.predict() function.
# - The extracted features are flattened into a 1-dimensional array using the flatten() function from the numpy library.
# - The flattened features are appended to the features list.
# - The filename of the image is appended to the img_name list.
# - After all the images in the directory have been processed, the features and img_name lists are returned by the function.
def image_feature(direc):
model = InceptionV3(weights="imagenet", include_top=False)
features = []
img_name = []
for file in direc:
img = image.load_img(
"/kaggle/input/image-for-clustering/Image-for-Clustering/image/" + file,
target_size=(224, 224),
)
x = img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
feat = model.predict(x)
feat = feat.flatten()
features.append(feat)
img_name.append(file)
return features, img_name
img_path = os.listdir("/kaggle/input/image-for-clustering/Image-for-Clustering/image")
img_features, img_name = image_feature(img_path)
# # Clustering
k = 3
clusters = KMeans(k, random_state=42)
clusters.fit(img_features)
clusters.labels_
image_cluster = pd.DataFrame(img_name, columns=["image"])
image_cluster["labels"] = clusters.labels_
image_cluster.head()
# # Let's visualize the clusters
cl1 = image_cluster[image_cluster["labels"] == 0]["image"].tolist()
len(cl1)
cl2 = image_cluster[image_cluster["labels"] == 1]["image"].tolist()
len(cl2)
cl3 = image_cluster[image_cluster["labels"] == 2]["image"].tolist()
len(cl3)
def plot_clusters(path, row, col, image_list, title="Clusters"):
folder_path = path
image_list = image_list
rows = row
cols = col
fig, axs = plt.subplots(rows, cols, figsize=(12, 8))
fig.suptitle(title)
for i, img_name in enumerate(image_list):
row = i // cols
col = i % cols
img_path = os.path.join(folder_path, img_name)
img = plt.imread(img_path)
axs[row, col].imshow(img)
axs[row, col].axis("off")
plt.show()
plot_clusters(
"/kaggle/input/image-for-clustering/Image-for-Clustering/image/",
5,
9,
cl1,
"Red roses",
)
plot_clusters(
"/kaggle/input/image-for-clustering/Image-for-Clustering/image/",
5,
7,
cl2,
"Hyacinth",
)
plot_clusters(
"/kaggle/input/image-for-clustering/Image-for-Clustering/image/",
5,
7,
cl3,
"Sunflower",
)
|
# # Welcome to import land
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import matplotlib
from sklearn.neighbors import KNeighborsClassifier
import sklearn
from sklearn.model_selection import train_test_split
from sklearn import metrics
import seaborn as sns
import matplotlib.pyplot as pyplt
from sklearn.ensemble import RandomForestClassifier
import os
import glob
from glob import glob
import xgboost as xgb
import keras
from keras.datasets import mnist
from keras.layers import Dense
from keras.models import Sequential
import tensorflow as tf
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from random import randint
from sklearn.linear_model import Perceptron
from keras.layers import Dropout
from sklearn.preprocessing import StandardScaler
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import LabelBinarizer
from keras.layers import Dropout
from sklearn.model_selection import GridSearchCV
lb = LabelEncoder()
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from sklearn.ensemble import VotingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from mpl_toolkits.mplot3d import Axes3D
import cv2
import tqdm
from sklearn.utils import shuffle
from PIL import Image
# from torchvision import transforms
from keras.preprocessing.image import ImageDataGenerator
from keras.layers import (
Activation,
Conv2D,
MaxPooling2D,
ZeroPadding2D,
GlobalAveragePooling2D,
Flatten,
)
from tensorflow.keras.layers import BatchNormalization
# # Importing files
# there are two csv's :)
# However, BBox just includes some of the image sizes so it is useless and will not be used
# Bbox[x y w h] is a bounding box coordinate system and I believe it is useless. So, this entire csv is slightly useless.
path = "/kaggle/input/data/"
file = path + "Data_Entry_2017.csv"
# print(file)
Data_entry = pd.read_csv(file)
Data_entry = Data_entry.drop(columns=["Unnamed: 11"])
Data_entry
# ## Expanding the diseases
diseases = [
"No Finding",
"Cardiomegaly",
"Emphysema",
"Effusion",
"Hernia",
"Nodule",
"Pneumothorax",
"Atelectasis",
"Pleural_Thickening",
"Mass",
"Edema",
"Consolidation",
"Infiltration",
"Fibrosis",
"Pneumonia",
]
# Number diseases
for disease in diseases:
Data_entry[disease] = Data_entry["Finding Labels"].apply(
lambda x: 1 if disease in x else 0
)
# separate df for target, might use this as the y value
target = Data_entry[diseases].to_numpy()
target
Data_entry.rename(columns={"Finding Labels": "Specific Diagnosis"}, inplace=True)
Data_entry.head(5)
Data_entry["Simple Diagnosis"] = Data_entry["Specific Diagnosis"].str.split("|").str[0]
Data_entry
simpleDiseases = [
"(s)No Finding",
"(s)Cardiomegaly",
"(s)Emphysema",
"(s)Effusion",
"(s)Hernia",
"(s)Nodule",
"(s)Pneumothorax",
"(s)Atelectasis",
"(s)Pleural_Thickening",
"(s)Mass",
"(s)Edema",
"(s)Consolidation",
"(s)Infiltration",
"(s)Fibrosis",
"(s)Pneumonia",
]
for disease in diseases:
Data_entry["(s)" + disease] = Data_entry["Simple Diagnosis"].apply(
lambda x: 1 if disease in x else 0
)
Data_entry.head(10)
simpleTarget = Data_entry[simpleDiseases].to_numpy()
simpleTarget
simpleTarget.shape
# # EDA (slightly)
sns.pairplot(pd.read_csv("/kaggle/input/data/Data_Entry_2017.csv"))
sns.violinplot(x="Patient Gender", y="Patient Age", data=Data_entry)
# i smell outliers
Data_entry[Data_entry["Patient Age"] > 120]
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([148], 14)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([149], 14)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([150], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([151], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([152], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([153], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([154], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([155], 15)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([411], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([412], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([413], 41)
Data_entry["Patient Age"] = Data_entry["Patient Age"].replace([414], 41)
Data_entry[Data_entry["Patient Age"] > 120]
sns.violinplot(
x="Cardiomegaly", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Emphysema", y="Patient Age", hue="Patient Gender", data=Data_entry, split=True
)
sns.violinplot(
x="Effusion", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(x="Patient Gender", y="Patient Age", data=Data_entry)
sns.violinplot(
x="Hernia", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Nodule", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pneumothorax", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Atelectasis", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pleural_Thickening",
y="Patient Age",
data=Data_entry,
hue="Patient Gender",
split=True,
)
sns.violinplot(
x="Mass", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Edema", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Consolidation",
y="Patient Age",
data=Data_entry,
hue="Patient Gender",
split=True,
)
sns.violinplot(
x="Infiltration", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Fibrosis", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
sns.violinplot(
x="Pneumonia", y="Patient Age", data=Data_entry, hue="Patient Gender", split=True
)
plt.figure(figsize=(10, 10))
plt.xlabel("Disease")
plt.ylabel("No. of Patients")
ax = Data_entry[diseases].sum().sort_values(ascending=False).plot(kind="bar")
# # Image stuff
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
}
Data_entry["Path"] = Data_entry["Image Index"].map(all_image_paths.get)
files_list = Data_entry["Path"].tolist()
Data_entry
def show_image(img_path):
f = plt.figure(figsize=(20, 8))
s1 = f.add_subplot(1, 2, 1)
row = Data_entry[Data_entry["Path"] == img_path]
ID = int(row["Patient ID"])
age = int(row["Patient Age"])
gender = str(row["Patient Gender"].item())
diagnosis = str(row["Specific Diagnosis"].item())
s1.set_title(
f"Patient's Image\nPatient ID: {ID}\nPatient Age: {age}\nPateint Gender: {gender}\nSpecific Diagnosis: {diagnosis}"
)
img = cv2.imread(img_path)
plt.imshow(img, cmap="gray")
plt.axis("off")
plt.show()
show_image("../input/data/images_001/images/00000010_000.png")
# yay
# ## Starting to mess with images
images = list(Data_entry["Path"])
train_img, val_img, train_label, val_label = train_test_split(
images,
simpleTarget,
test_size=10000,
random_state=42,
stratify=simpleTarget,
)
Data_entry["Path"]
train_img[0]
# ## Model !??
new = Data_entry[Data_entry["Height]"] == 2500][
Data_entry["OriginalImage[Width"] == 2048
]
new
pathArr = new["Path"].to_numpy()
pathArr
targetArr = new[simpleDiseases].to_numpy()
targetArr
train_ds = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/data",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
train_ds.class_names
for image_batch, labels_batch in train_ds:
print(image_batch.shape)
print(labels_batch.shape)
break
normalization_layer = tf.keras.layers.Rescaling(1.0 / 255)
normalized_ds = train_ds.map(lambda x, y: normalization_layer(x), y)
image_batch, labels_batch = next(iter(normalized_ds))
first_image = image_batch[0]
print(np.min(first_image), np.max(first_image))
test_ds = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/data/",
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size,
)
targetArr = new[simpleDiseases].iloc[:4998].to_numpy()
targetArr.shape
val_ds.class_names
X_train, X_test, y_train, y_test = train_test_split(
pathArr, targetArr, test_size=0.25, random_state=42
)
train_ds
y_train
y_train.shape
# # Attempt #1 starts here
img_width = 2048
img_height = 2500
batch_size = 32
num_classes = 15
image_size = (img_height, img_width)
flat_image_size = 5120000
import pathlib
data_dir = pathlib.Path("/kaggle/input/data").with_suffix("")
list_ds = tf.data.Dataset.list_files(str(data_dir / "*/*/*"), shuffle=False)
list_ds = list_ds.shuffle(112120, reshuffle_each_iteration=False)
for f in list_ds.take(5):
print(f.numpy())
class_names = diseases
val_size = int(112120 * 0.25)
X_train = list_ds.skip(val_size)
X_test = list_ds.take(val_size)
for f in X_train.take(2):
print(f.numpy())
print(tf.data.experimental.cardinality(X_train).numpy())
print(tf.data.experimental.cardinality(X_test).numpy())
def get_label(file_path):
# image = file_path.str.split("images/").str[0]
file_name = tf.strings.split(file_path, os.path.sep)[-1]
row = Data_entry.loc[Data_entry["Image Index"] == file_name]
s_label = row["Simple Diagnosis"]
e_label = row[simpleDiseases].to_numpy()
o_label = row[diseases].to_numpy()
# print(s_label)
return s_label
def decode_img(img):
# Convert the compressed string to a 3D uint8 tensor
img = tf.io.decode_jpeg(img, channels=3)
# Resize the image to the desired size
return tf.image.resize(img, [img_height, img_width])
def process_path(file_path):
label = get_label(file_path)
# Load the raw data from the file as a string
img = tf.io.read_file(file_path)
img = decode_img(img)
return img, label
process_path("/kaggle/input/data/images_001/images/00000001_002.png")
AUTOTUNE = tf.data.AUTOTUNE
train_ds = X_train.cache().prefetch(buffer_size=AUTOTUNE)
val_ds = X_test.cache().prefetch(buffer_size=AUTOTUNE)
# Set `num_parallel_calls` so multiple images are loaded/processed in parallel.
train_ds = X_train.map(process_path, num_parallel_calls=AUTOTUNE)
val_ds = X_test.map(process_path, num_parallel_calls=AUTOTUNE)
for image, label in train_ds.take(1):
print("Image shape: ", image.numpy().shape)
print("Label: ", label.numpy())
# to-do:
# -get label:
# should take in path and return {0,0,0,0,0,0,1,0,0,0,..} o 1-15
# -decode_img
# -process_img
# -dataset.map
# -configure_for_performance
# **label is not being stored**
# # Attempt #1 ends here
# # Attempt #2 starts here
data = pd.read_csv("/kaggle/input/data/Data_Entry_2017.csv")
data = data[
data["Patient Age"] < 100
] # removing datapoints which having age greater than 100
data_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("kaggle", "input", "images*", "*", "*.png"))
}
print("Scans found:", len(data_image_paths), ", Total Headers", data.shape[0])
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "images", "*.png"))
}
data["path"] = Data_entry["Image Index"].map(all_image_paths.get)
files_list = Data_entry["path"].tolist()
data["Patient Age"] = data["Patient Age"].map(lambda x: int(x))
data.sample(3)
data
data["Finding Labels"] = data["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
from itertools import chain
all_labels = np.unique(
list(chain(*data["Finding Labels"].map(lambda x: x.split("|")).tolist()))
)
all_labels = [x for x in all_labels if len(x) > 0]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
data[c_label] = data["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
data.sample(3)
# keep at least 1000 cases
MIN_CASES = 1000
all_labels = [c_label for c_label in all_labels if data[c_label].sum() > MIN_CASES]
print(
"Clean Labels ({})".format(len(all_labels)),
[(c_label, int(data[c_label].sum())) for c_label in all_labels],
)
# since the dataset is very unbiased, we can resample it to be a more reasonable collection
# weight is 0.04 + number of findings
sample_weights = (
data["Finding Labels"].map(lambda x: len(x.split("|")) if len(x) > 0 else 0).values
+ 4e-2
)
sample_weights /= sample_weights.sum()
data = data.sample(40000, weights=sample_weights)
label_counts = data["Finding Labels"].value_counts()[:15]
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
ax1.bar(np.arange(len(label_counts)) + 0.5, label_counts)
ax1.set_xticks(np.arange(len(label_counts)) + 0.5)
_ = ax1.set_xticklabels(label_counts.index, rotation=90)
# creating vector of diseases
data["disease_vec"] = data.apply(lambda x: [x[all_labels].values], 1).map(
lambda x: x[0]
)
data
data.iloc[0]["disease_vec"]
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
data,
test_size=0.25,
random_state=2018,
stratify=data["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (128, 128)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
def flow_from_dataframe(img_data_gen, in_df, path_col, y_col, **dflow_args):
base_dir = os.path.dirname(in_df[path_col].values[0])
print("## Ignore next message from keras, values are replaced anyways")
df_gen = img_data_gen.flow_from_directory(
base_dir, class_mode="sparse", **dflow_args
)
df_gen.filenames = in_df[path_col].values
df_gen.classes = np.stack(in_df[y_col].values)
df_gen.samples = in_df.shape[0]
df_gen.n = in_df.shape[0]
df_gen._set_index_array()
df_gen.directory = "" # since we have the full path
df_gen.filepaths.extend(df_gen.filenames)
print("Reinserting dataframe: {} images".format(in_df.shape[0]))
return df_gen
train_gen = flow_from_dataframe(
core_idg,
train_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=32,
)
valid_gen = flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=256,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
flow_from_dataframe(
core_idg,
valid_df,
path_col="path",
y_col="disease_vec",
target_size=IMG_SIZE,
color_mode="grayscale",
batch_size=2048,
)
) # one big batch
# # Attempt #2 ends here
model = Sequential()
model.add(Dense(units=512, activation="sigmoid", input_shape=(img_height, img_width)))
model.add(Dense(units=512, activation="sigmoid"))
model.add(Dense(units=num_classes, activation="softmax"))
model.build()
model.summary()
model.compile(optimizer="sgd", loss="categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_ds, val_ds, batch_size=64, epochs=5, verbose=True)
loss, accuracy = model.evaluate(val_ds, y_test, verbose=False)
# Again, do some formatting
# Except we do not flatten each image into a 784-length vector because we want to perform convolutions first
X_train = X_train.reshape(
6512, 1, 1, 1
) # add an additional dimension to represent the single-channel
X_test = X_test.reshape(2171, 1, 1, 1)
X_train = X_train.astype("str") # change integers to 32-bit floating point numbers
X_test = X_test.astype("str")
model = Sequential() # Linear stacking of layers
# Convolution Layer 1
model.add(
Conv2D(32, (3, 3), input_shape=(2048, 2500, 1))
) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer01 = Activation("relu") # activation
model.add(convLayer01)
# Convolution Layer 2
model.add(Conv2D(32, (3, 3))) # 32 different 3x3 kernels -- so 32 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation("relu")) # activation
convLayer02 = MaxPooling2D(pool_size=(2, 2)) # Pool the max values over a 2x2 kernel
model.add(convLayer02)
# Convolution Layer 3
model.add(Conv2D(64, (3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
convLayer03 = Activation("relu") # activation
model.add(convLayer03)
# Convolution Layer 4
model.add(Conv2D(64, (3, 3))) # 64 different 3x3 kernels -- so 64 feature maps
model.add(BatchNormalization(axis=-1)) # normalize each feature map before activation
model.add(Activation("relu")) # activation
convLayer04 = MaxPooling2D(pool_size=(2, 2)) # Pool the max values over a 2x2 kernel
model.add(convLayer04)
model.add(Flatten()) # Flatten final 4x4x64 output matrix into a 1024-length vector
# Fully Connected Layer 5
model.add(Dense(512)) # 512 FCN nodes
model.add(BatchNormalization()) # normalization
model.add(Activation("relu")) # activation
# Fully Connected Layer 6
model.add(Dropout(0.2)) # 20% dropout of randomly selected nodes
model.add(Dense(10)) # final 10 FCN nodes
model.add(Activation("softmax"))
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.fit(train_ds, validation_data=val_ds, epochs=3)
train_generator = gen.flow(X_train, y_train, batch_size=128)
test_generator = test_gen.flow(X_test, y_test, batch_size=128)
model.fit(
train_generator,
steps_per_epoch=60000 // 128,
epochs=5,
verbose=1,
validation_data=test_generator,
validation_steps=10000 // 128,
)
|
# ## Keşifçi Veri Analizi | Becerileri Pekiştirme
# Aşağıda ihtiyacımız doğrultusunda kullanacağımız kütüphaneleri yükleyelim.
import numpy as np # Bilimsel hesaplama
import seaborn as sns # Verileri görselleştirme
import pandas as pd # Verileri analiz etme
import matplotlib.pyplot as plt # Çizim fonksiyonları içerir.
# Veri çerçevemizi bulunduğumuz dizinden yükleyelim ve bir veri çerçevesi haline getirerek df değişkenine atayalım. (pd.read_csv(...csv))
df = pd.read_csv("/kaggle/input/irisss/iris.csv")
# Veri çerçevesinin ilk 5 gözlemini görüntüleyelim.
df.head()
# direkt 5 tanesi görüntülenir içerisine farklı sayılar da yazılabilir.
# Veri çerçevesinin kaç öznitelik ve kaç gözlemden oluştuğunu görüntüleyelim.
df.shape
# Veri çerçevesindeki değişkenlerin hangi tipte olduğunu ve bellek kullanımını görüntüleyelim.
df.info()
# Veri çerçevesindeki sayısal değişkenler için temel istatistik değerlerini görüntüleyelim.
# Standart sapma ve ortalama değerlerden çıkarımda bulunarak hangi değişkenlerin ne kadar varyansa sahip olduğu hakkında fikir yürütelim.
df.describe().T
# Veri çerçevesinde hangi öznitelikte kaç adet eksik değer olduğunu gözlemleyelim.
df.isnull().sum()
# # Korelasyon Nedir?
# ****
# Korelasyon, iki değişken arasındaki ilişkiyi ölçen bir istatistiksel terimdir. Korelasyon, bir değişkenin artmasıyla diğer değişkenin artıp artmadığını veya azalıp azalmadığını belirler. İki değişken arasındaki korelasyon, genellikle Pearson korelasyon katsayısı veya r sınaması olarak ifade edilir. İşte korelasyon hakkında bilmeniz gereken önemli noktalar:
# Pozitif Korelasyon: İki değişken arasında pozitif bir korelasyon varsa, bir değişken artarken diğeri de artar veya bir değişken azalırken diğeri de azalır.
# Negatif Korelasyon: İki değişken arasında negatif bir korelasyon varsa, bir değişken artarken diğeri azalır veya bir değişken azalırken diğeri artar.
# Korelasyon Katsayısı: Korelasyon katsayısı, iki değişken arasındaki ilişkinin derecesini belirler ve -1 ile +1 arasında bir değer alır. +1, mükemmel pozitif korelasyonu ifade ederken, -1 mükemmel negatif korelasyonu ifade eder ve 0 ise iki değişken arasında herhangi bir ilişki olmadığını gösterir.
# Korelasyonun Değerlendirilmesi: Korelasyon katsayısı, iki değişken arasındaki ilişkinin gücünü belirler. Genellikle 0,3'ten az olan korelasyonlar zayıf olarak kabul edilirken, 0,3 ile 0,7 arasındaki korelasyonlar orta düzeyde olarak kabul edilir ve 0,7'den yüksek korelasyonlar güçlü olarak kabul edilir. Ancak korelasyon her zaman nedensellik değildir ve sadece iki değişken arasındaki ilişkinin varlığını belirler, nedenselliği değil.
# Sayısal değişkenler arasında korelasyon olup olmadığını göstermek için korelasyon matrisi çizdirelim. Korelasyon katsayıları hakkında fikir yürütelim.
# En güçlü pozitif ilişki hangi iki değişken arasındadır?
df.corr()
# # Isı Haritası hangi amaçla çizdirilir?
# Isı haritaları (heatmap), verilerin görselleştirilmesi için kullanılan bir tekniktir ve genellikle yoğunluk haritası olarak da adlandırılır. Isı haritaları, verilerin farklı seviyelerini renk tonlarına veya yoğunluğuna göre kodlayarak görselleştirir. Bu nedenle, ısı haritaları, büyük veri kümelerinin görselleştirilmesi için çok kullanışlıdır.
# Korelasyon katsayılarını daha iyi okuyabilmek için ısı haritası çizdirelim.
sns.heatmap(df.corr())
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz değerlerini görüntüleyelim.
df["variety"].unique()
# Veri çerçevemizin hedef değişkeninin "variety" benzersiz kaç adet değer içerdiğini görüntüleyelim.
df["variety"].nunique()
# Veri çerçevesindeki sepal.width ve sepal.length değişkenlerinin sürekli olduğunu görüyoruz. Bu iki sürekli veriyi görselleştirmek için önce scatterplot kullanalım.
sns.scatterplot(data=df, x="sepal.length", y="sepal.width", color="purple")
# # Jointplot
# Jointplot, iki değişken arasındaki ilişkiyi hem histogramlar hem de scatterplot kullanarak görselleştirmek için kullanılan bir grafik türüdür. İki değişken arasındaki ilişkiyi analiz etmek için yaygın olarak kullanılan bir grafik türüdür.
# Jointplot, iki değişken arasındaki korelasyonu belirlemek için kullanılır. Eğer iki değişken arasında pozitif bir ilişki varsa, scatterplot'un yukarı doğru bir eğilim göstermesi beklenir. Benzer şekilde, iki değişken arasında negatif bir ilişki varsa, scatterplot'un aşağı doğru bir eğilim göstermesi beklenir. Histogramlar, her değişkenin dağılımını gösterir ve bu sayede değişkenlerin yoğunlukları hakkında fikir sahibi olunur
# Aynı iki veriyi daha farklı bir açıdan frekanslarıyla incelemek için jointplot kullanarak görselleştirelim.
sns.jointplot(x="sepal.length", y="sepal.width", data=df, color="purple")
# Aynı iki veriyi scatterplot ile tekrardan görselleştirelim fakat bu sefer "variety" parametresi ile hedef değişkenine göre kırdıralım.
# 3 farklı renk arasında sepal değişkenleriyle bir kümeleme yapılabilir mi? Ne kadar ayırt edilebilir bunun üzerine düşünelim.
sns.scatterplot(data=df, x="sepal.length", y="sepal.width", hue="variety")
plt.show()
# value_counts() fonksiyonu ile veri çerçevemizin ne kadar dengeli dağıldığını sorgulayalım.
df["variety"].value_counts()
# # Keman Grafiği
# Keman grafiği, kutu grafiği ve yoğunluk grafiğinin birleşiminden oluşur. Kutu grafiği, verilerin çeyrekliklerini ve ortanca değerini gösterirken, yoğunluk grafiği, verilerin yoğunluğunu gösterir. Keman grafiği, bu iki grafik türünün birleşimi olduğu için, her ikisinin avantajlarını birleştirerek veri setinin daha ayrıntılı bir şekilde analiz edilmesine olanak tanır.
# Keman grafiği, özellikle veri setindeki dağılımın simetrik veya asimetrik olup olmadığını belirlemek için kullanılır. Ayrıca, veri setindeki aykırı değerleri belirlemek için de kullanılır. Keman grafiği, verilerin yoğunluğunu ve ortalamasını göstererek, veri setindeki örüntülerin daha iyi anlaşılmasına yardımcı olur.
# Keman grafiği çizdirerek sepal.width değişkeninin dağılımını inceleyin.
# Söz konusu dağılım bizim için ne ifade ediyor, normal bir dağılım olduğunu söyleyebilir miyiz?
sns.violinplot(data=df, y="sepal.width")
# Daha iyi anlayabilmek için sepal.width üzerine bir distplot çizdirelim.
sns.distplot(df["sepal.width"], color="orange")
# sepal.width in yoğunluğunu görselleştiririz.
# Üç çiçek türü için üç farklı keman grafiğini sepal.length değişkeninin dağılımı üzerine tek bir satır ile görselleştirelim.
sns.violinplot(data=df, x="variety", y="sepal.length")
# Hangi çiçek türünden kaçar adet gözlem barındırıyor veri çerçevemiz?
# 50 x 3 olduğunu ve dengeli olduğunu value_counts ile zaten görmüştük, ancak bunu görsel olarak ifade etmek için sns.countplot() fonksiyonuna variety parametresini vereilm.
sns.countplot(data=df, x="variety")
# sepal.length ve sepal.width değişkenlerini sns.jointplot ile görselleştirelim, dağılımı ve dağılımın frekansı yüksek olduğu bölgelerini inceleyelim.
sns.jointplot(data=df, x="sepal.width", y="sepal.length", kind="hist", color="green")
# Bir önceki hücrede yapmış olduğumuz görselleştirmeye kind = "kde" parametresini ekleyelim. Böylelikle dağılımın noktalı gösterimden çıkıp yoğunluk odaklı bir görselleştirmeye dönüştüğünü görmüş olacağız.
sns.jointplot(data=df, x="sepal.width", y="sepal.length", kind="kde", color="purple")
# scatterplot ile petal.length ve petal.width değişkenlerinin dağılımlarını çizdirelim.
sns.scatterplot(data=df, x="petal.width", y="petal.length")
# Aynı görselleştirmeye hue = "variety" parametresini ekleyerek 3. bir boyut verelim.
sns.scatterplot(data=df, x="petal.width", y="petal.length", hue="variety")
# sns.lmplot() görselleştirmesini petal.length ve petal.width değişkenleriyle implemente edelim. Petal length ile petal width arasında ne tür bir ilişki var ve bu ilişki güçlü müdür? sorusunu yanıtlayalım.
sns.lmplot(data=df, x="petal.length", y="petal.width")
# Doğrusal ve güçlü bir ilişki mevcuttur.
# Bu sorunun yanıtını pekiştirmek için iki değişken arasında korelasyon katsayısını yazdıralım.
df.corr()["petal.length"]["petal.width"]
# Petal Length ile Sepal Length değerlerini toplayarak yeni bir total length özniteliği oluşturalım.
df["total.length"] = df["sepal.length"] + df["petal.length"]
df["total.length"]
# total.length'in ortalama değerini yazdıralım.
df["total.length"].mean()
# total.length'in standart sapma değerini yazdıralım.
df["total.length"].std()
# sepal.length'in maksimum değerini yazdıralım.
df["sepal.length"].max()
# sepal.length'i 5.5'den büyük ve türü setosa olan gözlemleri yazdıralım.
df[(df["sepal.length"] > 5.5) & (df["variety"] == "Setosa")]
# petal.length'i 5'den küçük ve türü virginica olan gözlemlerin sadece sepal.length ve sepal.width değişkenlerini ve değerlerini yazdıralım.
df[(df["petal.length"] < 5) & (df["variety"] == "Virginica")][
["sepal.length", "sepal.width"]
]
# Hedef değişkenimiz variety'e göre bir gruplama işlemi yapalım değişken değerlerimizin ortalamasını görüntüleyelim.
df.groupby(["variety"]).mean()
# Hedef değişkenimiz variety'e göre gruplama işlemi yaparak sadece petal.length değişkenimizin standart sapma değerlerini yazdıralım.
df.groupby(["variety"])["petal.length"].std()
|
import pandas as pd
df = pd.read_csv("/kaggle/input/customer-shopping-dataset/customer_shopping_data.csv")
df.head()
df.tail()
df.shape
df.describe()
df.columns
df.columns = df.columns.str.replace(" ", "_")
df.dtypes
df.isnull().sum()
df.info()
print(len(df["age"].unique()))
df["age"].unique()
df["invoice_date"].head()
df["invoice_date"] = pd.to_datetime(df["invoice_date"])
# df["invoice_date"].head(1).year()
df["year"] = df["invoice_date"].dt.year
df["month"] = df["invoice_date"].dt.month
print(df.year)
df["year"].describe
df.head()
df.dtypes
df["gender"].describe()
print(df.duplicated())
df["category"].unique()
import pandas as pd
pd.plotting.register_matplotlib_converters()
import matplotlib.pyplot as plt
import seaborn as sns
print("Setup Complete")
sns.boxplot(df["age"])
cat_num = df["category"].value_counts()
sns.barplot(x=cat_num, y=cat_num.index, data=df)
plt.title("The number of categories", size=20)
gen_num = df["gender"].value_counts()
sns.barplot(y=gen_num, x=gen_num.index, data=df)
plt.title("male vs female", size=10)
sns.scatterplot(data=df, y="category", x="price")
plt.title("Category & Price", size=20)
sns.histplot(df["payment_method"], kde=False, stat="frequency")
plt.title(
"payments method ",
size=20,
)
sns.histplot(df["month"], kde=False)
plt.title(
"Histogram with the kde for the rating column ",
size=20,
)
sns.histplot(df["year"], kde=False)
plt.title(
"Histogram with the kde for the rating column ",
size=20,
)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from fbprophet import Prophet
from fbprophet.diagnostics import cross_validation, performance_metrics
from fbprophet.plot import plot_cross_validation_metric
# load the data
# YYYY-MM-DD
df = pd.read_csv("/kaggle/input/airbnb-nyc-data/nyc_data.csv")
df.head()
# prophet rules - target : y, date: ds
df = df.rename(columns={"Demand": "y", "Date": "ds"})
df.head(0)
# date variable reformatting
df.ds
df.ds = pd.to_datetime(df.ds, format="%m/%d/%Y")
df.ds
# prepping inputs for the model - Easter, Thanksgiving
# Easter
dates = pd.to_datetime(df[df.Easter == 1].ds)
easter = pd.DataFrame(
{"holiday": "easter", "ds": dates, "lower_window": -5, "upper_window": 2}
)
easter
# Easter
dates = pd.to_datetime(df[df.Thanksgiving == 1].ds)
thanksgiving = pd.DataFrame(
{"holiday": "thanksgiving", "ds": dates, "lower_window": -3, "upper_window": 6}
)
thanksgiving
# combining events
holidays = pd.concat([easter, thanksgiving])
holidays
df = df.drop(columns=["Easter", "Thanksgiving"])
df.head(0)
"""
component description
1. Holidays dataframe prepared with Easter, Thanksgiving dates
2. Seasonality_mode Multiplicative or Additive
3. Seasonality_prior_scale Strength of the seasonality
4. Holiday_prior_scale Larger values allow model to fit larger seasonal fluctuations
5. Changepoint_prior_scale How trend arrives at inflation point
"""
# create fbprophet model with default params
m = Prophet(
holidays=holidays,
seasonality_mode="multiplicative",
seasonality_prior_scale=10,
holidays_prior_scale=10,
changepoint_prior_scale=0.05,
)
# add regressors
m.add_regressor("Christmas")
m.add_regressor("Temperature")
m.add_regressor("Marketing")
m.fit(df)
df.shape[0] - 180
# cross validation
df_crossval = cross_validation(
model=m,
horizon="31 days",
period="16 days",
initial="2012 days",
parallel="processes",
)
df_crossval.head()
# plot model performance
performance_metrics(df_crossval).head()
print(
f"RMSE: {round(performance_metrics(df_crossval)['rmse'].mean(), 1)}",
"\n"
f"MAPE: {format(100 * round(performance_metrics(df_crossval)['mape'].mean(), 3), '.1f')}%",
"\n" f"MAE: {round(performance_metrics(df_crossval)['mae'].mean(), 1)}",
)
# plotting cross val metrics
import warnings
warnings.filterwarnings("ignore")
print(plot_cross_validation_metric(df_crossval, metric="rmse"))
|
# ## 1\. Contexto
# Este projeto foi desenvolvido para **Loggi**, uma das principais empresas de logística do Brasil. O objetivo do projeto foi analisar dados de entregas realizadas pela empresa na região do Distrito Federal, a fim de identificar padrões e tendências que possam ser úteis na otimização do processo logístico. Para isso, foram utilizadas técnicas de visualização de dados, como mapas de calor e gráficos de barras, que permitem uma análise mais clara e objetiva dos dados.
# Os gráficos e mapas de calor desenvolvidos neste projeto serão importantes para a análise de dados da empresa, pois permitem uma visualização mais intuitiva e fácil de entender dos dados, facilitando a identificação de padrões e tendências. Com isso, a empresa poderá tomar decisões mais acuradas em relação ao processo de entregas, o que poderá resultar em uma maior eficiência e redução de custos operacionais.
# ## 2\. Pacotes e bibliotecas
import json
import math
import os
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import folium
from folium.plugins import HeatMap
# ## 3\. Exploração de dados
# ### 3\.1\. Coleta de dados
# Fonte
# Coleta de dados
with open("deliveries.json", mode="r", encoding="utf8") as file:
data = json.load(file)
# ### 3.2\. Wrangling da Estrutura
# Criação do DataFrame base
deliveries_df = pd.DataFrame(data)
deliveries_df.head()
# Divisão de 'origin' em 'lng' e 'lat'
hub_origin_df = pd.json_normalize(deliveries_df["origin"])
# Fusão dos dois DataFrames
deliveries_df = pd.merge(
left=deliveries_df,
right=hub_origin_df,
how="inner",
left_index=True,
right_index=True,
)
# Exclusão da coluna 'origin' e organização do DataFrame:
deliveries_df = deliveries_df.drop("origin", axis=1)
deliveries_df = deliveries_df[
["name", "region", "lng", "lat", "vehicle_capacity", "deliveries"]
]
# Renomear as colunas 'lat' e 'lng' para identificá-las como pertencendo ao hub
deliveries_df.rename(columns={"lng": "hub_lng", "lat": "hub_lat"}, inplace=True)
deliveries_df.head()
# Criar um dataframe para os dados da coluna 'deliveries' mantendo o índice
deliveries_exploded_df = deliveries_df[["deliveries"]].explode("deliveries")
# Criar mais um DataFrame com os campos 'size', 'lat' e 'lng'
deliveries_normalized_df = pd.concat(
[
pd.DataFrame(
deliveries_exploded_df["deliveries"].apply(lambda record: record["size"])
).rename(columns={"deliveries": "delivery_size"}),
pd.DataFrame(
deliveries_exploded_df["deliveries"].apply(
lambda record: record["point"]["lng"]
)
).rename(columns={"deliveries": "delivery_lng"}),
pd.DataFrame(
deliveries_exploded_df["deliveries"].apply(
lambda record: record["point"]["lat"]
)
).rename(columns={"deliveries": "delivery_lat"}),
],
axis=1,
)
# Excluir a coluna 'deliveries' do dataframe inicial
deliveries_df = deliveries_df.drop("deliveries", axis=1)
# Mesclar ambos dataframes com base na quantidade de entregas
deliveries_df = pd.merge(
left=deliveries_df,
right=deliveries_normalized_df,
how="right",
left_index=True,
right_index=True,
)
deliveries_df.reset_index(inplace=True, drop=True)
deliveries_df
# ### 3.3\. Exploração do Schema
# #### Pacotes por entrega
sns.histplot(data=deliveries_df, x="delivery_size")
# * O número de pacotes está bem destribuídos, não há nenhuma grande diferença entre a quantidade de pacotes por pedido.
# #### Entregas para cada Hub e número estimado de viagens para entregar
# Agrupar as entregas por hub e região
grouped_df = deliveries_df.groupby(["name", "region"]).agg(
{"delivery_size": "sum", "vehicle_capacity": "mean", "name": "count"}
)
grouped_df.rename(
columns={"delivery_size": "total_delivery_size", "name": "name_count"}, inplace=True
)
# Calcular o número mínimo de viagens para atender todos os pedidos
grouped_df["trips"] = grouped_df["total_delivery_size"] / grouped_df["vehicle_capacity"]
grouped_df["trips"] = grouped_df["trips"].apply(lambda x: math.ceil(x))
# Adicionar a coluna "region"
grouped_df = grouped_df.reset_index()
grouped_df = grouped_df.merge(
deliveries_df[["name", "region"]].drop_duplicates(), on="name", how="left"
)
grouped_df = grouped_df.drop("region_y", axis=1).rename(columns={"region_x": "region"})
grouped_df.head()
# grouped_df.select_dtypes('int64').describe().transpose()
# Criar um novo dataframe com o número de entregas por região
deliveries_by_region = grouped_df.groupby("region").agg({"name_count": "sum"})
# Ordenar as regiões por número total de entregas
deliveries_by_region = deliveries_by_region.sort_values(
by="name_count", ascending=False
)
# Criar gráfico de pizza
deliveries_by_region.plot(kind="pie", y="name_count", figsize=(6, 6), autopct="%1.1f%%")
# Configurar o gráfico
plt.title("Proporção de Entregas por Região")
plt.ylabel("")
plt.show()
# * Em média, cada hub deve entregar 17.620 pacotes, sendo que o Hub com menos pacotes tem 4.308 e o com mais pacotes tem 31.145
# * Em média, cada Hub fará 99 viagens, o Hub com menos entregas poderá completar sua tarefa com apenas 24 viagens, enquanto o hub com mais entregas completará com 174 viagens.
# #### Entregas para cada região e número estima de viagens para entregar
grouped_df_region = grouped_df.groupby("region").agg(
{"total_delivery_size": "sum", "name_count": "sum", "trips": "sum"}
)
grouped_df_region
sns.scatterplot(
data=grouped_df_region, x="total_delivery_size", y="trips", hue="region"
)
plt.xlabel("Quantidade de entregas por região (x100.000")
plt.ylabel("Quantidade de viagens necessárias")
plt.title("Relação entre entregas e viagens por região")
plt.show()
# * Aqui fica claro que a região 0 é menos povoada.
# * Embora a proporção de pedidos/viagens esteja quase linear, a região 2 apresenta melhor aproveitamento de carga que as demais regiões.
# #### Densidade de entregas no Mapa
# Criar um mapa centrado no Brasil
mapa = folium.Map(location=[-15.788497, -47.879873], zoom_start=10)
# Criar um mapa de calor com base nas entregas por região
heat_data = deliveries_df[
["delivery_lat", "delivery_lng", "delivery_size"]
].values.tolist()
HeatMap(heat_data).add_to(mapa)
# Mostrar o mapa
mapa
# #### Entregas por região
# Obter a lista de regiões únicas
regions = deliveries_df["region"].unique()
# Iterar pelas regiões e criar um mapa de calor para cada uma delas
for region in regions:
# Filtrar os dados por região
region_data = deliveries_df[deliveries_df["region"] == region]
# Criar um mapa centrado na região
center_lat = region_data["delivery_lat"].mean()
center_lng = region_data["delivery_lng"].mean()
mapa = folium.Map(location=[center_lat, center_lng], zoom_start=10)
# Criar um mapa de calor com base nas entregas por região
heat_data = region_data[
["delivery_lat", "delivery_lng", "delivery_size"]
].values.tolist()
HeatMap(heat_data).add_to(mapa)
# Adicionar o nome da região ao mapa
folium.Marker([center_lat, center_lng], tooltip=region).add_to(mapa)
# Mostrar o mapa
display(mapa)
|
# # **Imports**
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import pandas_profiling as pp
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import *
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # **Read the data**
orig = pd.read_csv("/kaggle/input/playground-series-s3e12/sample_submission.csv")
train = pd.read_csv("/kaggle/input/playground-series-s3e12/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e12/test.csv")
# # **EDA**
print("Shape of original data is: ", orig.shape)
print("---" * 30)
print("Shape of train data is: ", train.shape)
print("---" * 30)
print("Shape of test data is: ", test.shape)
print("---" * 30)
orig.info()
train.info()
test.info()
orig.isnull().sum()
train.isnull().sum()
train.duplicated().sum()
test.isnull().sum()
test.duplicated().sum()
train = train.drop(columns=["id"], axis=1)
pp.ProfileReport(train)
pp.ProfileReport(test)
# # **Data Visualization**
fig, ax = plt.subplots(1, 2, figsize=(10, 5))
ax = ax.flatten()
labels = ["0", "1"]
colors = ["red", "orange"]
ax[0].pie(
train["target"].value_counts(),
shadow=True,
labels=labels,
autopct="%1.f%%",
textprops={"size": 20},
startangle=90,
colors=colors,
)
sns.countplot(data=train, y="target", ax=ax[1])
ax[1].yaxis.label.set_size(20)
plt.yticks(fontsize=15)
ax[1].set_xlabel("Count", fontsize=15)
plt.xticks(fontsize=15)
fig.suptitle("Target", fontsize=20, fontweight="bold")
plt.tight_layout()
plt.subplots(figsize=(10, 10))
sns.heatmap(train.corr(), cmap="PiYG", annot=True)
plt.show()
plt.figure(figsize=[16, 12])
plt.subplot(231)
sns.boxplot(y=train["gravity"])
plt.title("gravity")
plt.subplot(232)
sns.boxplot(y=train["ph"])
plt.title("ph")
plt.subplot(233)
sns.boxplot(y=train["osmo"])
plt.title("osmo")
plt.subplot(234)
sns.boxplot(y=train["cond"])
plt.title("cond")
plt.subplot(235)
sns.boxplot(y=train["urea"])
plt.title("urea")
plt.subplot(236)
sns.boxplot(y=train["calc"])
plt.title("calc")
plt.show()
plt.figure(figsize=[16, 10])
plt.subplot(231)
sns.distplot(train["gravity"])
plt.title("gravity")
plt.subplot(232)
sns.distplot(train["ph"])
plt.title("ph")
plt.subplot(233)
sns.distplot(train["osmo"])
plt.title("osmo")
plt.subplot(234)
sns.distplot(train["cond"])
plt.title("cond")
plt.subplot(235)
sns.distplot(train["urea"])
plt.title("urea")
plt.show()
plt.figure(figsize=(14, 10))
sns.pairplot(train, hue="target")
plt.title("Distribution of target datd")
plt.legend("target")
plt.tight_layout()
plt.plot()
plt.show()
# # **Model Building**
x = train.drop(columns=["target"])
y = train.target
x_train, x_val, y_train, y_val = train_test_split(x, y, train_size=0.8, test_size=0.2)
# # **XGBoost**
xg = XGBClassifier(n_estimators=200, random_state=0, learning_rate=0.05)
xg.fit(x_train, y_train)
pred_xg = xg.predict(x_val)
print("Accuracy score: ", round(accuracy_score(pred_xg, y_val) * 100, 2))
# # **Random Forest Classifier**
rf = RandomForestClassifier(n_estimators=200, random_state=1)
rf.fit(x_train, y_train)
pred_rf = rf.predict(x_val)
print("Accuracy score:", round(accuracy_score(pred_rf, y_val) * 100, 2))
# # **Decision Tree Classifier**
from sklearn.tree import DecisionTreeClassifier
dt = DecisionTreeClassifier(random_state=0)
dt.fit(x_train, y_train)
pred_dt = dt.predict(x_val)
print("Accuracy score:", round(accuracy_score(pred_dt, y_val) * 100, 2))
# # **Logistic Regression**
log = LogisticRegression()
log.fit(x_train, y_train)
pred_log = log.predict(x_val)
print("Accuracy score:", round(accuracy_score(pred_log, y_val) * 100, 2))
# # **Test Data Predictions**
test1 = test.drop(columns=["id"], axis=1)
test1.head()
plt.subplots(figsize=(10, 10))
sns.heatmap(
test1.corr(),
cmap="YlOrRd",
annot=True,
)
plt.show()
fig, ax = plt.subplots(3, 2, figsize=(15, 15))
sns.distplot(x=test1["gravity"], ax=ax[0, 0], color="orange")
sns.distplot(x=test1["ph"], ax=ax[0, 1], color="pink")
sns.distplot(x=test1["osmo"], ax=ax[1, 0], color="pink")
sns.distplot(x=test1["cond"], ax=ax[1, 1], color="orange")
sns.distplot(x=test1["urea"], ax=ax[2, 0], color="orange")
sns.distplot(x=test1["calc"], ax=ax[2, 1], color="pink")
plt.plot()
final_prediction = log.predict(test1)
final_prediction_prob = log.predict_proba(test1)[::, 1]
final_prediction_prob
final_test = pd.DataFrame({"id": test["id"], "target": final_prediction_prob})
final_test
# # **Submission**
final_test.to_csv("submission.csv", index=False)
|
# Hey guys!
# After the competition closed, I want to share my idea of filling missing with you.
# (The original train data have too mcuh missing...)
# The main idea is "time-series similarity between meters in single site".
# (for example, meter_A's pattern is very similar to meter_B in site1, and I do believe that some of you have noticed that)
# By well utilizing such similarity, we can find out those missing data!
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import lightgbm as lgb
from pathlib import Path
import matplotlib.pyplot as plt
import plotly.graph_objs as go
import plotly
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
import cufflinks as cf
cf.set_config_file(offline=True)
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from statsmodels.tsa.seasonal import seasonal_decompose
from kneed import KneeLocator
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
def MinMaxScaler(data):
return (data - np.min(data)) / (np.max(data) - np.min(data))
def Kmeans_clustering(df, clusterNum, max_iter, n_jobs):
scaler = StandardScaler()
scaler.fit(df)
df_std = pd.DataFrame(data=scaler.transform(df), columns=df.columns, index=df.index)
km_model = KMeans(
n_clusters=clusterNum, max_iter=max_iter, n_jobs=n_jobs, random_state=666
)
km_model = km_model.fit(df_std)
clusterdf = pd.DataFrame(data=km_model.labels_, columns=["ClusterNo"])
clusterdf.index = df.index
return clusterdf
def Kmeans_bestClusterNum(df, range_min, range_max, max_iter, n_jobs):
silhouette_avgs = []
sum_of_squared_distances = []
ks = range(range_min, range_max + 1)
for k in ks:
kmeans_fit = KMeans(
n_clusters=k, n_jobs=n_jobs, max_iter=max_iter, random_state=666
).fit(df)
cluster_labels = kmeans_fit.labels_
sum_of_squared_distances.append(kmeans_fit.inertia_)
kn = KneeLocator(
list(ks),
sum_of_squared_distances,
S=1.0,
curve="convex",
direction="decreasing",
)
plt.xlabel("k")
plt.ylabel("sum_of_squared_distances")
plt.title("The Elbow Method showing the optimal k")
plt.plot(ks, sum_of_squared_distances, "bx-")
plt.vlines(kn.knee, plt.ylim()[0], plt.ylim()[1], linestyles="dashed")
print("Optimal clustering number:" + str(kn.knee))
print("----------------------------")
return kn.knee
root = Path("../input/ashrae-feather-format-for-fast-loading")
train_df = pd.read_feather(root / "train.feather")
test_df = pd.read_feather(root / "test.feather")
weather_train_df = pd.read_feather(root / "weather_train.feather")
weather_test_df = pd.read_feather(root / "weather_test.feather")
building_meta_df = pd.read_feather(root / "building_metadata.feather")
# Let's just take site1 as example
train_df = train_df.merge(building_meta_df, on="building_id")
train_df = train_df[train_df["site_id"] == 1]
train_df["merged_id"] = (
"site"
+ train_df["site_id"].astype(str)
+ "_"
+ "bldg"
+ train_df["building_id"].astype(str)
+ "_"
+ "meter"
+ train_df["meter"].astype(str)
)
train_df = train_df[
["timestamp", "site_id", "building_id", "meter", "meter_reading", "merged_id"]
]
train_df = train_df.set_index("timestamp")
train_df = train_df.sort_values(["merged_id", "timestamp"])
train_df
train_df_pivot = train_df.pivot_table(
values="meter_reading", index="timestamp", columns="merged_id"
)
train_df_pivot
df_PM_temp = train_df_pivot.copy()
df_PM_temp = (df_PM_temp - df_PM_temp.mean()) / df_PM_temp.std()
STL_decomp = seasonal_decompose(
df_PM_temp.fillna(0), model="additive", freq=24 * 7, extrapolate_trend=1
)
df_seasonal_temp = STL_decomp.seasonal.iloc[24 * 10 : 24 * 17, :]
df_seasonal_temp = df_seasonal_temp.T
df_seasonal_temp["ClusterNo"] = Kmeans_clustering(
df=df_seasonal_temp, clusterNum=5, max_iter=100000, n_jobs=-1
)
for ClusterNo in df_seasonal_temp["ClusterNo"].unique():
df_plot = df_seasonal_temp[df_seasonal_temp["ClusterNo"] == ClusterNo].T.drop(
"ClusterNo"
)
print("ClusterNo: " + str(ClusterNo))
print("Amount of meters: " + str(len(df_plot.T)))
df_plot.plot(figsize=(15, 5), color="black", alpha=0.1, legend=False)
plt.show()
print(
"---------------------------------------------------------------------------------------------------"
)
df_PM_temp = train_df_pivot.copy()
df_PM_temp = (df_PM_temp - df_PM_temp.mean()) / df_PM_temp.std()
STL_decomp = seasonal_decompose(
df_PM_temp.fillna(0), model="additive", freq=24 * 7, extrapolate_trend=1
)
df_trend_temp = STL_decomp.trend
# df_trend_temp = df_trend_temp.loc['2016']
df_trend_temp = df_trend_temp.T
df_trend_temp["ClusterNo"] = Kmeans_clustering(
df=df_trend_temp, clusterNum=5, max_iter=100000, n_jobs=-1
)
for ClusterNo in df_trend_temp["ClusterNo"].unique():
df_plot = df_trend_temp[df_trend_temp["ClusterNo"] == ClusterNo].T.drop("ClusterNo")
print("ClusterNo: " + str(ClusterNo))
print("Amount of meters: " + str(len(df_plot.T)))
df_plot.plot(figsize=(15, 5), color="black", alpha=0.1, legend=False, ylim=(-3, 3))
plt.show()
print(
"---------------------------------------------------------------------------------------------------"
)
# Take a look of "site1_bldg105_meter0", we can observe abnormal constant in some days.
# Graph of original data: "site1_bldg105_meter0"
train_df_pivot["site1_bldg105_meter0"].iplot(kind="scatter")
# Let's clean these abnormal days by checking std_daily==0
list_powerMeter = list(train_df_pivot.columns)
for name_meter in list_powerMeter:
df_daily = train_df_pivot[name_meter].copy()
df_daily = df_daily.resample("D").std()
list_abnormalDate = list(df_daily[df_daily == 0].index.strftime("%Y-%m-%d"))
train_df_pivot["Date"] = pd.to_datetime(train_df_pivot.index.date)
train_df_pivot.loc[
train_df_pivot["Date"].isin(list_abnormalDate), name_meter
] = np.nan
train_df_pivot = train_df_pivot.drop("Date", axis=1)
# Successfully clean these abnormal days!
# Graph of cleand data: "site1_bldg105_meter0"
train_df_pivot["site1_bldg105_meter0"].iplot(kind="scatter")
# Now, let's take "site1_bldg105_meter0" as example for fillling missing
# Prepare data: "site1_bldg105_meter0"
example_df = train_df_pivot.copy()
example_df = example_df.dropna(axis=1) # Only take non-missing meter readings as inputs
example_df["elec_meas"] = train_df_pivot[
"site1_bldg105_meter0"
].copy() # Take "site1_bldg105_meter0" as target
traindata = example_df[~example_df["elec_meas"].isna()]
testdata = example_df[example_df["elec_meas"].isna()]
train_labels = traindata["elec_meas"]
train_features = traindata.drop("elec_meas", axis=1)
test_features = testdata.drop("elec_meas", axis=1)
LGB_model = lgb.LGBMRegressor()
LGB_model.fit(train_features, train_labels)
testdata["elec_pred"] = LGB_model.predict(test_features)
# Use the forest's predict method on the train data
example_df["elec_pred"] = LGB_model.predict(example_df.drop("elec_meas", axis=1))
# Boom! Here's the meter reading after filling missing😎
example_df[["elec_meas", "elec_pred"]].iplot(kind="scatter")
|
import os
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.neural_network import BernoulliRBM
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
trainXraw = pd.read_csv(
"/kaggle/input/preprocessed-csv/pre_processed_train.csv",
usecols=[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
)
trainYraw = pd.read_csv(
"/kaggle/input/preprocessed-csv/pre_processed_train.csv", usecols=[13]
)
train_X, test_X, train_y, test_y = train_test_split(
trainXraw, trainYraw, test_size=0.2, random_state=123
)
# train_X=train_X.values
# train_y=train_y.values
# test_X=test_X.values
# test_y=test_y.values
print(np.shape(train_X))
print(np.shape(train_y))
print(np.shape(test_X))
print(np.shape(test_y))
train_X = torch.from_numpy(train_X).float()
train_y = torch.from_numpy(train_y).float()
test_X = torch.from_numpy(test_X).float()
test_y = torch.from_numpy(test_y).float()
import torch
from torch import nn
class DBN(nn.Module):
def __init__(self, input_dim, hidden_dims, output_dim):
super(DBN, self).__init__()
self.input_layer = nn.Linear(input_dim, hidden_dims[0])
self.hidden_layers = nn.ModuleList(
[
nn.Linear(hidden_dims[i], hidden_dims[i + 1])
for i in range(len(hidden_dims) - 1)
]
)
self.output_layer = nn.Linear(hidden_dims[-1], output_dim)
self.initial_layer = nn.Linear(input_dim, hidden_dims[0])
def forward(self, x):
x = torch.relu(self.initial_layer(x))
x = torch.relu(self.input_layer(x))
for layer in self.hidden_layers:
x = torch.relu(layer(x))
x = self.output_layer(x)
return x
# Define input/output dimensions and hidden layer dimensions
input_dim = train_X.shape[1]
hidden_dims = [input_dim, 30, 40]
output_dim = 1
# Instantiate the DBN
dbn = DBN(input_dim, hidden_dims, output_dim)
# dbn = DBN([500, 300, 200, 100], GaussianBinaryRBMs, rbm_learning_rate=0.1, rbm_epochs=50, finetune_epochs=100, finetune_learning_rate=0.1)
# Define loss function and optimizer
criterion = nn.MSELoss()
optimizer = torch.optim.Adam(dbn.parameters(), lr=0.01)
# Train the DBN
for epoch in range(100):
optimizer.zero_grad()
outputs = dbn(train_X)
loss = criterion(outputs, train_y)
loss.backward()
optimizer.step()
# Make predictions using the trained DBN
predicted = dbn(test_X)
torch.set_printoptions(threshold=torch.inf)
predicted = predicted.detach().numpy()
predicted = np.squeeze(predicted)
def rmse(testY, predY):
return np.sqrt(np.mean((testY - predY) ** 2))
np.set_printoptions(threshold=np.inf)
# print(predicted)
# print(testY.ravel())
print(rmse(testY, predicted))
errors = testY.ravel() - predicted
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pandas as pd
User = pd.read_csv("../input/user-data/User_Data.csv")
User.head()
x = User.iloc[:, [2, 3]].values
y = User.iloc[:, 4].values
from sklearn.model_selection import train_test_split
xtrain, xtest, ytrain, ytest = train_test_split(x, y, test_size=0.25)
from sklearn.preprocessing import StandardScaler
Haha = StandardScaler()
xtrain = Haha.fit_transform(xtrain)
xtest = Haha.transform(xtest)
print(xtrain[0:10, :])
print(xtest[0:10, :])
from sklearn.linear_model import LogisticRegression
cl = LogisticRegression()
cl.fit(xtrain, ytrain)
print("The coefficient of the model are", cl.coef_)
inter = cl.intercept_
print("The intercept of the model is : \n", inter)
y_pred = cl.predict(xtest)
print("The predictions of the Logistic trained model are:\n", y_pred)
from sklearn.metrics import confusion_matrix
cm = confusion_matrix(ytest, y_pred)
print("Confusion Matrix : \n", cm)
from sklearn.metrics import accuracy_score
print("The Accuracy of the Model : ", accuracy_score(ytest, y_pred))
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# This creates a pandas dataframe and assigns it to the titanic variable.
titanic_test = pd.read_csv("../input/titanic/test.csv")
titanic = pd.read_csv("../input/titanic/train.csv")
# Print the first 5 rows of the dataframe.
titanic.head()
titanic_test.head().T
# note their is no Survived column here which is our target varible we are trying to predict
# info method provides information about dataset like
# total values in each column, null/not null, datatype, memory occupied etc
titanic.info()
titanic.describe()
# lets see if there are any more columns with missing values
null_columns = titanic.columns[titanic.isnull().any()]
titanic.isnull().sum()
# how about test set??
titanic_test.isnull().sum()
import matplotlib.pyplot as plt
import seaborn as sns
sns.set(font_scale=1)
labels = []
counts = []
for col in null_columns:
labels.append(col)
counts.append(titanic[col].isnull().sum())
ind = np.arange(len(labels))
fig, ax = plt.subplots(figsize=(10, 2))
ax.barh(ind, counts, color="purple")
ax.set_yticks(ind)
ax.set_yticklabels(labels)
ax.set_xlabel("Missing values")
ax.set_ylabel("Columns")
ax.set_title("Variables with missing values")
titanic.hist(bins=10, figsize=(9, 7), grid=False)
g = sns.FacetGrid(titanic, col="Sex", row="Survived", margin_titles=True)
g.map(plt.hist, "Age", color="purple")
g = sns.FacetGrid(
titanic,
hue="Survived",
col="Pclass",
margin_titles=True,
palette={1: "seagreen", 0: "gray"},
)
g = g.map(plt.scatter, "Fare", "Age", edgecolor="w").add_legend()
|
# # Students Test Score: Extended Dataset Starter Notebook
# ---
# This is a starter notebook to get you started with the dataset ([Student Test Scores: Extended Dataset](https://www.kaggle.com/datasets/desalegngeb/students-exam-scores)). Within this dataset there are two csv files (`Original_data_with_more_rows.csv` and `Expanded_data_with_more_features.csv`)
# - `Original_data_with_more_rows.csv`: This file has more than 30641 rows and 8 columns. And has no missing values.
# - `Expanded_data_with_more_features.csv`: This file, just like the above one, has 30641 rows, but with more features (14 columns) and has missing values in it which makes it ideal for data preprocessing such as missing value imputation techniques.
# **Note**: There exists a similar (in part, identical) kaggle dataset which is a smaller (1k rows instead of >30641) version of the `Original_data_with_more_rows.csv` data on which I have already done extensive EDA and predictions. If you are interested you may check it here, [Student's Test Performance: EDA and Prediction](https://www.kaggle.com/code/desalegngeb/student-s-test-performance-eda-and-prediction)
# ---
# ### Data dictionary
# The following are the column descriptions. Columns 0 to 5 and 11 to 13 are common to both files. The rest are present to the extended file only.
# 0. **Gender**: gender of the student (male/female)
# 1. **EthnicGroup**: ethnic group of the student (group A to E)
# 2. **ParentEduc**: parent(s) education background
# 3. **LunchType**: school luch type (standard or free/reduced)
# 4. **TestPrep**: test preparation course (completed or none)
# 5. **ParentMaritalStatus**: parent(s) marital status
# 6. **PracticeSport**: how often the student parctice sport
# 7. **IsFirstChild**: if the child is first child in the family or not
# 8. **NrSiblings** : Number of siblings the student has
# 9. **TransportMeans**: Means of transport to school
# 10. **WklyStudyHours**: Weekly self-study hours
# 11. **MathScore**: math test score
# 12. **ReadingScore**: reading test score
# 13. **WritingScore** writing test score
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import seaborn as sns
import missingno as msno
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
data_org = pd.read_csv(
"/kaggle/input/students-exam-scores/Original_data_with_more_rows.csv"
)
data_extended = pd.read_csv(
"/kaggle/input/students-exam-scores/Expanded_data_with_more_features.csv"
)
# ## 1. Original dataset
data_org = data_org.drop("Unnamed: 0", axis=1)
data_org.head(3)
data_org.info()
# ### Check for duplicates
data_org.duplicated().sum()
# ### Test score distributions
sns.histplot(data_org[["MathScore", "ReadingScore", "WritingScore"]])
plt.title("Distribution of the test scores")
data_org.groupby(["Gender"])[["MathScore", "ReadingScore", "WritingScore"]].agg(np.mean)
# ## 2. Extended Dataset
data_extended = data_extended.drop("Unnamed: 0", axis=1)
data_extended.head(3)
data_extended.info()
# ### Check for duplicates
data_extended.duplicated().sum()
# ### Check for null values
msno.matrix(data_extended, figsize=(12, 6), color=(0.1, 0.9, 0.6), fontsize=10)
for col in data_extended.columns:
print(
"Missing values in column {} is {} %".format(
col,
np.round(
data_extended[col].isnull().sum() * 100 / (len(data_extended[col])), 1
),
)
)
# ### Test score distributions
sns.histplot(data_extended[["MathScore", "ReadingScore", "WritingScore"]])
plt.title("Distribution of the test scores")
data_extended[["MathScore", "ReadingScore", "WritingScore"]].agg(
[np.mean, np.std, np.max, np.min]
)
# ### Gender
sns.countplot(x=data_extended["Gender"])
plt.title("Gender")
data_extended.groupby(["Gender"])[["MathScore", "ReadingScore", "WritingScore"]].agg(
np.mean
)
# ### Parent(s) educational background
sns.countplot(x=data_extended["ParentEduc"])
plt.title("Parents educational background")
plt.xticks(rotation=45)
data_extended.groupby(["ParentEduc"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
# ### Ethnic group
sns.countplot(x=data_extended["EthnicGroup"])
plt.title("Which ethnic group do the students belong to?")
# ### Lunch type
sns.countplot(x=data_extended["LunchType"])
plt.title("School lunch type")
data_extended.groupby(["LunchType"])[["MathScore", "ReadingScore", "WritingScore"]].agg(
np.mean
)
# ### Weekly study hours
sns.countplot(x=data_extended["WklyStudyHours"])
plt.title("How long do students study at home [hrs]?")
data_extended.groupby(["WklyStudyHours"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
# ### Practice sport
sns.countplot(x=data_extended["PracticeSport"])
plt.title("How often do students practice sports?")
data_extended.groupby(["PracticeSport"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
# ### Interacting features
data_extended.groupby(["Gender", "ParentEduc"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
data_extended.groupby(["EthnicGroup", "Gender"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
data_extended.groupby(["PracticeSport", "TestPrep"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
data_extended.groupby(["TestPrep"])[["MathScore", "ReadingScore", "WritingScore"]].agg(
np.mean
)
data_extended.groupby(["WklyStudyHours", "ParentEduc"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
data_extended.groupby(["Gender", "PracticeSport"])[
["MathScore", "ReadingScore", "WritingScore"]
].agg(np.mean)
|
# # Sentiment Analysis in Tweets
# Our sentiment analysis system is based on a hybrid approach, which employs supervised learning with a Support Vector Machines Sequential Minimal Optimization (Platt, 1998) linear kernel, on unigram and bigram features, but exploiting as features sentiment dictionaries, emoticon lists, slang lists and other social media-specific features.
# # Tweet Pre-processing
import pandas as pd
import numpy as np
np.random.seed(42)
df = pd.read_csv(
"../input/sentiment140/training.1600000.processed.noemoticon.csv",
encoding="latin",
header=None,
)
df = df.sample(20000)
df = df.iloc[:, [5, 0]]
df.columns = ["text", "class"]
# df['class'][df['class']==4] = 1
df = df.reset_index(drop=True)
df.head(10)
# # Tweet Pre-processing
# The language employed in Social Media sites is different from the one found in mainstream media and the form of the words employed is sometimes not the one we may find in a dictionary. Further on, users of Social Media platforms employ a special “slang” (i.e. informal language, with special expressions, such as “lol”, “omg”), emoticons, and often emphasize words by repeating some of their letters.
# Additionally, the language employed in Twitter has specific characteristics, such as the markup of tweets that were reposted by other users with “RT”, the markup of topics using the “#” (hash sign) and of the users using the “@” sign.
# All these aspects must be considered at the time of processing tweets. As such, before applying supervised learning to classify the sentiment of the tweets, we preprocess them, to normalize the language they contain. The pre-processing stage contains the following steps:
# ## Repeated Punctuation sign normalization
# In the first step of the pre-processing, we detect repetitions of punctuation signs (“.”, “!” and “?”). Multiple consecutive punctuation signs are replaced with the labels “multistop”, for the fullstops, “multiexclamation” in the case of exclamation sign and “multiquestion” for the question mark and spaces before and after.
import re
def preprocess(text):
# Replace repeated punctuation signs with labels and add spaces
text = re.sub(r"(\.{2,})", r" multistop ", text)
text = re.sub(r"(\!{2,})", r" multiexclamation ", text)
text = re.sub(r"(\?{2,})", r" multiquestion ", text)
# Add spaces before and after single punctuation signs
text = re.sub(r"(\.)", r" stop ", text)
text = re.sub(r"(\!)", r" exclamation ", text)
text = re.sub(r"(\?)", r" question ", text)
text = re.sub(r"(\,)", r" comma ", text)
# Lower case the text
text = text.lower()
return text
df["text"] = df["text"].apply(preprocess)
df.head()
# ## Emoticon replacement
# In the second step of the pre-processing, we employ the annotated list of emoticons from [SentiStrength](http://sentistrength.wlv.ac.uk/) and match the content of the tweets against this list. The emoticons found are replaced with their polarity (“positive” or “negative”) and the “neutral” ones are deleted.
def load_emoticons(emo_filename):
# Load emoticons and their polarity from a file
emoticon_dict = {}
with open(emo_filename, "r", encoding="latin-1") as file:
for line in file:
emoticon, polarity = line.strip().split("\t")
emoticon_dict[emoticon] = polarity
return emoticon_dict
# Load emoticons and their polarity from a file
emoticon_dict = load_emoticons("/kaggle/input/sentistrength/EmoticonLookupTable.txt")
def replace_emoticons(text, emoticon_dict=emoticon_dict):
# Replace emoticons with their polarity and delete neutral ones
for emoticon, polarity in emoticon_dict.items():
pattern = re.compile(re.escape(emoticon), re.IGNORECASE)
if polarity == "1":
text = pattern.sub("positive", text)
elif polarity == "-1":
text = pattern.sub("negative", text)
else:
text = pattern.sub("", text)
return text.split()
# Example usage
text = "I'm so Happy :) but :0->-<|: also a bit sad :("
processed_text = preprocess(text)
processed_text = replace_emoticons(processed_text, emoticon_dict)
print(processed_text)
df["text"] = df["text"].apply(replace_emoticons)
df.head()
# ## Slang replacement
# The next step involves the normalization of the language employed. In order to be able to include the semantics of the expressions frequently used in Social Media, we employed the list of slang from a specialized site.
def load_slang(slang_filename):
# Load emoticons and their polarity from a file
slang_dict = {}
with open(slang_filename, "r", encoding="latin-1") as file:
for line in file:
slang, meaning = line.strip().split("\t")
slang_dict[slang] = meaning
return slang_dict
# Load emoticons and their polarity from a file
slang_dict = load_slang("/kaggle/input/sentistrength/SlangLookupTable.txt")
def replace_slang(tokens, slang_dict=slang_dict):
# Replace emoticons with their polarity and delete neutral ones
for i, token in enumerate(tokens):
if token in slang_dict:
tokens[i] = slang_dict[token]
return tokens
# Example usage
text = "I'm so happy :) lol, but :0->-<|: also a bit sad :("
processed_text = preprocess(text)
processed_text = replace_emoticons(processed_text, emoticon_dict)
processed_text = replace_slang(processed_text, slang_dict)
print(processed_text)
df["text"] = df["text"].apply(replace_slang)
df.head()
# ## User and topic labeling
# Finally, the users mentioned in the tweet, which are marked with “@”, are replaced with “PERSON” and the topics which the tweet refers to (marked with “#”) are replaced with “TOPIC”.
def label_user_topic(tokens):
labeled_tokens = []
for token in tokens:
if token.startswith("@"):
labeled_tokens.append("PERSON")
elif token.startswith("#"):
labeled_tokens.append("TOPIC")
elif token.startswith("http"):
labeled_tokens.append("URL")
else:
labeled_tokens.append(token)
return labeled_tokens
text = "Hey @neo I'm so happy :) lol, but :0->-<|: #together also a bit sad :("
processed_text = preprocess(text)
processed_text = replace_emoticons(processed_text, emoticon_dict)
processed_text = replace_slang(processed_text, slang_dict)
label_user_topic(processed_text)
df["text"] = df["text"].apply(label_user_topic)
df.head()
# ## Word normalization
# At this stage, the tokens are compared to entries in Rogets Thesaurus. If no match is found, repeated letters are sequentially reduced to two or one until a match is found in the dictionary (e.g. “perrrrrrrrrrrrrrrrrrfeeect” becomes “perrfeect”, “perfeect”, “perrfect” and subsequently “perfect”). The words used in this form are maked as “stressed”.
import nltk
nltk.download("wordnet")
# an also unzip the wordnet in console
from nltk.corpus import wordnet as wn
def reduce_word(word):
# Check if the word is in Roget's Thesaurus
synsets = wn.synsets(word)
if synsets:
return word
# Iterate over the letters in the word, starting from the end
for i in range(len(word) - 1, 1, -1):
# If the current letter is the same as the previous one,
# remove the current letter and check if the resulting word
# is in Roget's Thesaurus
if word[i] == word[i - 1]:
word = word[:i] + word[i + 1 :]
synsets = wn.synsets(word)
if synsets:
return "STRESSED"
# If the current and previous letters are the same as the one before them,
# remove the current letter and check if the resulting word
# is in Roget's Thesaurus
elif i > 2 and word[i] == word[i - 2]:
word = word[: i - 1] + word[i:]
synsets = wn.synsets(word)
if synsets:
return "STRESSED"
# If no match is found, return the original word
return word
def normalize_words(tokens):
normalized_tokens = []
for token in tokens:
# Check if the token is a word
if re.match(r"\b\w+\b", token):
# Normalize the word
normalized_word = reduce_word(token.lower())
# If the normalized word is different from the original word,
# add both versions to the list of tokens
if normalized_word != token.lower():
normalized_tokens.append(normalized_word)
else:
normalized_tokens.append(token)
else:
normalized_tokens.append(token)
# normalized_tokens = [token.split() if 'STRESSED' in token else token for token in normalized_tokens]
# normalized_tokens = [item if not isinstance(item, list) else item for sublist in normalized_tokens for item in sublist]
return normalized_tokens
# Example usage
tokens = ["That's", "perrrrrrfeeect", "multiexclamtion", "HASHTAG"]
normalized_tokens = normalize_words(tokens)
print(normalized_tokens)
df["text"] = df["text"].apply(normalize_words)
df.tail()
# ## Affect word matching
# Further on, the tokens in the tweet are matched against three different sentiment lexicons: GI, LIWC and MicroWNOp, which were previously split into four different categories (“positive”, “high positive”, “negative” and “high negative”). Matched words are replaced with their sentiment label - i.e. “positive”, “negative”, “hpositive” and “hnegative”. A version of the data without these replacements is also maintained, for comparison purposes.
from nltk.sentiment import SentimentIntensityAnalyzer
from nltk.corpus import opinion_lexicon, sentiwordnet
def match_affect_words(tokens):
sia = SentimentIntensityAnalyzer()
positive_words = set(opinion_lexicon.positive())
negative_words = set(opinion_lexicon.negative())
hpositive_words = set(
word
for synset in sentiwordnet.senti_synsets("", "a")
for word, pos in synset.lemmas()
if synset.pos_score() > 0.75 and word not in positive_words
)
hnegative_words = set(
word
for synset in sentiwordnet.senti_synsets("", "a")
for word, pos in synset.lemmas()
if synset.neg_score() > 0.75 and word not in negative_words
)
affect_labels = {
"positive": positive_words,
"negative": negative_words,
"hpositive": hpositive_words,
"hnegative": hnegative_words,
}
# create a mapping from affect words to labels
affect_words = set(
word for label_words in affect_labels.values() for word in label_words
)
word_to_label = {}
for word in affect_words:
scores = sia.polarity_scores(word)
if scores["compound"] >= 0.5:
word_to_label[word] = "hpositive"
elif scores["compound"] > -0.5 and scores["compound"] < 0.5:
word_to_label[word] = "positive" if word in positive_words else "negative"
else:
word_to_label[word] = "hnegative"
# match tokens to affect labels
affect_set = set(affect_words)
matched_tokens = []
for token in tokens:
if token in affect_set:
label = word_to_label[token]
matched_tokens.append(label)
else:
matched_tokens.append(token)
return matched_tokens
tokens = ["That's", "excited"]
matched_tokens = match_affect_words(tokens)
print(matched_tokens)
df["text"] = df["text"].apply(match_affect_words)
df.tail()
# ## Modifier word matching
# Similar to the previous step, we employ a list of expressions that negate, intensify or diminish the intensity of the sentiment expressed to detect such words in the tweets. If such a word is matched, it is replaced with “negator”, “intensifier” or “diminisher”, respectively. As in the case of affective words, a version of the data without these replacements is also maintained, for comparison purposes.
# define the lists of negation, intensification and diminishment expressions
negation_list = [
"no",
"not",
"never",
"none",
"nobody",
"nowhere",
"nothing",
"neither",
"nor",
"cannot",
"can't",
"don't",
"doesn't",
"didn't",
"won't",
"wouldn't",
"shouldn't",
"couldn't",
"isn't",
"aren't",
"ain't",
"hate",
"dislike",
"disapprove",
"disapprove of",
"disagree",
"disagree with",
"reject",
"rejects",
"rejected",
"refuse",
"refuses",
"refused",
"never",
"rarely",
"seldom",
"hardly",
"scarcely",
"barely",
]
diminishment_list = [
"little",
"slightly",
"somewhat",
"kind",
"sort",
"bit",
"little",
"moderately",
"marginally",
"fairly",
"reasonably",
"comparatively",
"relatively",
"tad",
"touch",
"extent",
]
self_list = ["i", "I'm", "i'm", "I", "my", "mine", "myself", "me"]
opponent_list = ["you", "u", "your", "ur", "your's", "urs"]
point_list = ["this", "that", "it", "its", "it's", "that's"]
def match_modifier_words(tokens):
matched_tokens = []
for token in tokens:
if token in negation_list:
matched_tokens.append("negator")
elif token in intensification_list:
matched_tokens.append("intensifier")
elif token in diminishment_list:
matched_tokens.append("diminisher")
elif token in self_list:
matched_tokens.append("self")
elif token in opponent_list:
matched_tokens.append("opponent")
elif token in point_list:
matched_tokens.append("pointOut")
else:
matched_tokens.append(token)
return matched_tokens
tokens = [
"I'm",
"not",
"very",
"happy",
"with",
"the",
"service",
"it's",
"a",
"bit",
"disappointing",
]
matched_tokens = match_modifier_words(tokens)
print(matched_tokens)
# df["text"] = df["text"].apply(match_modifier_words)
# df.tail()
# The next step in the text processing pipeline is to convert the filtered tokens into a numerical representation that can be used as input to a machine learning algorithm. One common way to do this is to use a bag-of-words model, which represents a text as a vector of word frequencies.
try:
import nltk
stemmer = nltk.PorterStemmer()
for word in (
"Computations",
"Computation",
"Computing",
"Computed",
"Compute",
"Compulsive",
):
print(word, "=>", stemmer.stem(word))
except ImportError:
print("Error: stemming requires the NLTK module.")
stemmer = None
from sklearn.base import BaseEstimator, TransformerMixin
from collections import Counter
import numpy as np
class TweetToWordCounterTransformer(BaseEstimator, TransformerMixin):
def __init__(self, stemming=False):
self.stemming = stemming
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = []
for tokens in X:
word_counts = Counter(tokens)
if self.stemming and stemmer is not None:
stemmed_word_counts = Counter()
for word, count in word_counts.items():
stemmed_word = stemmer.stem(word)
stemmed_word_counts[stemmed_word] += count
word_counts = stemmed_word_counts
X_transformed.append(word_counts)
return np.array(X_transformed)
X = df["text"]
y = df["class"]
X_wordcounts = TweetToWordCounterTransformer().fit_transform(X)
X_wordcounts
# Now we have the word counts, and we need to convert them to vectors. For this, we will build another transformer whose fit() method will build the vocabulary (an ordered list of the most common words) and whose transform() method will use the vocabulary to convert word counts to vectors. The output is a sparse matrix.
from scipy.sparse import csr_matrix
class WordCounterToVectorTransformer(BaseEstimator, TransformerMixin):
def __init__(self, vocabulary_size=1000):
self.vocabulary_size = vocabulary_size
def fit(self, X, y=None):
total_count = Counter()
for word_count in X:
for word, count in word_count.items():
total_count[word] += min(count, 10)
most_common = total_count.most_common()[: self.vocabulary_size]
self.most_common_ = most_common
self.vocabulary_ = {
word: index + 1 for index, (word, count) in enumerate(most_common)
}
return self
def transform(self, X, y=None):
rows = []
cols = []
data = []
for row, word_count in enumerate(X):
for word, count in word_count.items():
rows.append(row)
cols.append(self.vocabulary_.get(word, 0))
data.append(count)
return csr_matrix(
(data, (rows, cols)), shape=(len(X), self.vocabulary_size + 1)
).toarray()
vocab_transformer = WordCounterToVectorTransformer(vocabulary_size=40000)
X_vectors = vocab_transformer.fit_transform(X_wordcounts)
X_vectors
# # Sentiment Classification of Tweets
# We employed supervised learning using SVM SMO with a linear kernel, based on boolean features - the presence or absence of n-grams (unigrams, bigrams and unigrams plus bigrams) determined from the training data
from sklearn.feature_extraction.text import CountVectorizer
# create the feature matrix using n-grams
vectorizer = CountVectorizer(ngram_range=(1, 2), binary=True)
X = vectorizer.fit_transform(df["text"].apply(lambda x: " ".join(x)))
# create the target variable
y = df["class"]
# split the data into training and test sets
train_size = int(0.8 * len(df))
X_train, X_test = X[:train_size], X[train_size:]
y_train, y_test = y[:train_size], y[train_size:]
from sklearn.svm import LinearSVC
# train the SVM classifier
clf = LinearSVC()
clf.fit(X_train, y_train)
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
# make predictions on the test set
y_pred = clf.predict(X_test)
# evaluate the performance of the classifier
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred, average="weighted")
recall = recall_score(y_test, y_pred, average="weighted")
f1 = f1_score(y_test, y_pred, average="weighted")
print("Accuracy:", accuracy)
print("Precision:", precision)
print("Recall:", recall)
print("F1-score:", f1)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch.nn as nn
import numpy as np
import torch
# Input (temp, rainfall, humidity)
inputs = np.array(
[
[73, 67, 43],
[91, 88, 64],
[87, 134, 58],
[102, 43, 37],
[69, 96, 70],
[74, 66, 43],
[91, 87, 65],
[88, 134, 59],
[101, 44, 37],
[68, 96, 71],
[73, 66, 44],
[92, 87, 64],
[87, 135, 57],
[103, 43, 36],
[68, 97, 70],
],
dtype="float32",
)
# Targets (apples, oranges)
targets = np.array(
[
[56, 70],
[81, 101],
[119, 133],
[22, 37],
[103, 119],
[57, 69],
[80, 102],
[118, 132],
[21, 38],
[104, 118],
[57, 69],
[82, 100],
[118, 134],
[20, 38],
[102, 120],
],
dtype="float32",
)
inputs = torch.from_numpy(inputs)
targets = torch.from_numpy(targets)
inputs
from torch.utils.data import TensorDataset
# Define dataset
train_ds = TensorDataset(inputs, targets)
train_ds[0:3]
from torch.utils.data import DataLoader
# Define data loader
batch_size = 5
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
for xb, yb in train_dl:
print(xb)
print(yb)
break
# Define model
model = nn.Linear(3, 2)
print(model.weight)
print(model.bias)
# Parameters normalizer
def paramsNormalizer(matrix, isBias):
consildated_weights = 0
if isBias:
return sum(matrix)
for row in matrix:
for col in row:
consildated_weights += col
return consildated_weights
print(paramsNormalizer(model.weight, False))
print(paramsNormalizer(model.bias, True))
# Parameters normalizer
def subKeyGenerator(KeyWeights, KeyBias, loss):
KeyWeightsNorm = round(abs(KeyWeights.item() + loss.item()) * 100)
KeyBiasNorm = round(abs(KeyBias.item() - loss.item()) * 100)
return KeyBiasNorm**KeyWeightsNorm
TWO_POW_128 = 340282366920938463463374607431768211456
KnighTourDict = {}
def makeKnighTourDict(knightTourSolution):
lookup = 0
while lookup < 64:
for i in range(0, 8):
for j in range(0, 8):
if knightTourSolution[i][j] == lookup:
KnighTourDict[lookup] = (i + 1) ** (j + 1) * 100
lookup = lookup + 1
print(KnighTourDict)
# One Knight Tour Sample Solution
knightTourSolution = [
[0, 59, 38, 33, 30, 17, 8, 63],
[37, 34, 31, 60, 9, 62, 29, 16],
[58, 1, 36, 39, 32, 27, 18, 7],
[35, 48, 41, 26, 61, 10, 15, 28],
[42, 57, 2, 49, 40, 23, 6, 19],
[47, 50, 45, 54, 25, 20, 11, 14],
[56, 43, 52, 3, 22, 13, 24, 5],
[51, 46, 55, 44, 53, 4, 21, 12],
]
makeKnighTourDict(knightTourSolution)
def xoredSubKey(subKey, counter, KnighTourDict):
return subKey ^ (KnighTourDict[counter])
# Generate predictions
preds = model(inputs)
preds
# Import nn.functional
import torch.nn.functional as F
# Define loss function
loss_fn = F.mse_loss
loss = loss_fn(model(inputs), targets)
print(loss)
# Define optimizer
opt = torch.optim.SGD(model.parameters(), lr=1e-5)
# INT_BITS = 128
# def rightRotate(n, d):
# # In n>>d, first d bits are 0.
# # To put last 3 bits of at
# # first, do bitwise or of n>>d
# # with n <<(INT_BITS - d)
# return (n >> d)|(n << (INT_BITS - d)) & 0xFFFFFFFFFFFFFFFFFFFFFFFF
# # **Helper function generating a subkey**
def helper(weightsTensor, biasTensor, loss, subKeyNum):
# summation of quantities
consolidatedWeights = paramsNormalizer(weightsTensor, False)
consolidatedBias = paramsNormalizer(biasTensor, True)
# subkey
subKey = subKeyGenerator(consolidatedWeights, consolidatedBias, loss)
xorSubKey = xoredSubKey(subKey, subKeyNum, KnighTourDict)
return xorSubKey % TWO_POW_128
keyMatrix = []
# Utility function to train the model
def fit(num_epochs, model, loss_fn, opt, train_dl):
# Repeat for given number of epochs
for epoch in range(num_epochs):
# Train with batches of data
for xb, yb in train_dl:
# 1. Generate predictions
pred = model(xb)
# 2. Calculate loss
loss = loss_fn(pred, yb)
# 3. Compute gradients
loss.backward()
# 4. Update parameters using gradients
opt.step()
# 5. Reset the gradients to zero
opt.zero_grad()
# Print the progress
if (epoch + 1) % 10 == 0:
print(
"Epoch [{}/{}], Loss: {:.4f}".format(epoch + 1, num_epochs, loss.item())
)
print(
"Sub Key {} : {}".format(
int((epoch + 1) / 10),
helper(model.weight, model.bias, loss, int((epoch + 1) / 10) - 1),
),
"\n",
)
keyMatrix.append(
helper(model.weight, model.bias, loss, int((epoch + 1) / 10) - 1)
)
fit(640, model, loss_fn, opt, train_dl)
def decimalToBinary(n):
return "{0:b}".format(int(n))
for keyNum, subKey in enumerate(keyMatrix):
print("Sub Key {} \n {}\n".format(keyNum + 1, decimalToBinary(subKey)))
# **API**
API_KEY = "Q4VK66NA8HFTBWEJ"
import requests
import time
# Define the list of stock symbols to fetch data for
stocks = [
"TATASTEEL.NS",
"RELIANCE.NS",
"HDFCBANK.NS",
"INFY.NS",
"HCLTECH.NS",
"ITC.NS",
"BAJFINANCE.NS",
"TITAN.NS",
"KOTAKBANK.NS",
"HINDUNILVR.NS",
"ASIANPAINT.NS",
"ICICIBANK.NS",
"HDFC.NS",
"WIPRO.NS",
"ONGC.NS",
"POWERGRID.NS",
"MARUTI.NS",
"TCS.NS",
"SBIN.NS",
"NESTLEIND.NS",
"BRITANNIA.NS",
"ULTRACEMCO.NS",
"HEROMOTOCO.NS",
"ADANIPORTS.NS",
"BHARTIARTL.NS",
"NTPC.NS",
"IOC.NS",
"SUNPHARMA.NS",
"CIPLA.NS",
"DRREDDY.NS",
"AXISBANK.NS",
"ONGC.NS",
"HINDALCO.NS",
"VEDL.NS",
"COALINDIA.NS",
"BPCL.NS",
"GAIL.NS",
"HINDPETRO.NS",
"IOC.NS",
"SAIL.NS",
"NMDC.NS",
"INDUSINDBK.NS",
"JSWSTEEL.NS",
"ZEEL.NS",
"ONGC.NS",
"UPL.NS",
"TECHM.NS",
"TATAMOTORS.NS",
"BAJAJ-AUTO.NS",
]
while True:
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = "https://query1.finance.yahoo.com/v8/finance/chart/" + symbol
params = {"interval": "1d", "range": "1d"}
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT, params=params)
print(response)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
data = response.json()["chart"]["result"][0]["indicators"]["quote"][0]
# Print the stock data
print(f"{symbol} stock data:")
print(f'Open: {data["open"][-1]}')
print(f'High: {data["high"][-1]}')
print(f'Low: {data["low"][-1]}')
print(f'Close: {data["close"][-1]}')
print(f'Volume: {data["volume"][-1]}')
# Wait for 30 seconds before making the next request
time.sleep(30)
import requests
import csv
import time
# Define the list of stock symbols to fetch data for
stocks = [
"TATASTEEL.NS",
"RELIANCE.NS",
"HDFCBANK.NS",
"INFY.NS",
"HCLTECH.NS",
"ITC.NS",
"BAJFINANCE.NS",
"TITAN.NS",
"KOTAKBANK.NS",
"HINDUNILVR.NS",
"ASIANPAINT.NS",
"ICICIBANK.NS",
"HDFC.NS",
"WIPRO.NS",
"ONGC.NS",
"POWERGRID.NS",
"MARUTI.NS",
"TCS.NS",
"SBIN.NS",
"NESTLEIND.NS",
"BRITANNIA.NS",
"ULTRACEMCO.NS",
"HEROMOTOCO.NS",
"ADANIPORTS.NS",
"BHARTIARTL.NS",
"NTPC.NS",
"IOC.NS",
"SUNPHARMA.NS",
"CIPLA.NS",
"DRREDDY.NS",
"AXISBANK.NS",
"ONGC.NS",
"HINDALCO.NS",
"VEDL.NS",
"COALINDIA.NS",
"BPCL.NS",
"GAIL.NS",
"HINDPETRO.NS",
"IOC.NS",
"SAIL.NS",
"NMDC.NS",
"INDUSINDBK.NS",
"JSWSTEEL.NS",
"ZEEL.NS",
"ONGC.NS",
"UPL.NS",
"TECHM.NS",
"TATAMOTORS.NS",
"BAJAJ-AUTO.NS",
]
# Define the headers for the CSV file
headers = ["Symbol", "Open", "High", "Low", "Close", "Volume"]
# Open the CSV file in write mode
with open("stock_data.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write the headers to the CSV file
writer.writerow(headers)
i = 0
while i < 6:
i = i + 1
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = "https://query1.finance.yahoo.com/v8/finance/chart/" + symbol
params = {"interval": "1m", "range": "1d"}
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT, params=params)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
data = response.json()["chart"]["result"][0]["indicators"]["quote"][0]
# Write the stock data to the CSV file
row = [
symbol,
data["open"][-1],
data["high"][-1],
data["low"][-1],
data["close"][-1],
data["volume"][-1],
]
writer.writerow(row)
# Wait for 30 seconds before making the next request
time.sleep(30)
import requests
import csv
import time
# Define the list of Indian stock symbols to fetch data for
stocks = ["HDFCBANK.NS", "RELIANCE.NS"]
# Define the headers for the CSV file
headers = ["Symbol", "Open", "High", "Low", "Close", "Volume"]
# Open the CSV file in write mode
with open("stock_data.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write the headers to the CSV file
writer.writerow(headers)
i = 0
while True:
i = i + 1
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = "https://query1.finance.yahoo.com/v8/finance/chart/" + symbol
params = {"interval": "1m", "range": "1d"}
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT, params=params)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
data = response.json()["chart"]["result"][0]["indicators"]["quote"][0]
# Write the stock data to the CSV file
row = [
symbol,
data["open"][-1],
data["high"][-1],
data["low"][-1],
data["close"][-1],
data["volume"][-1],
]
writer.writerow(row)
# Wait for 30 seconds before making the next request
time.sleep(30)
import pandas as pd
# Open the CSV file and convert it to a DataFrame
df = pd.read_csv("stock_data.csv")
# Print the DataFrame
print(df)
import requests
import csv
import time
# Define the list of Indian stock symbols to fetch data for
stocks = [
"HDFCBANK.NS",
"RELIANCE.NS",
"TATASTEEL.NS",
"INFY.NS",
"HCLTECH.NS",
"ITC.NS",
"BAJFINANCE.NS",
"TITAN.NS",
"KOTAKBANK.NS",
"HINDUNILVR.NS",
"ASIANPAINT.NS",
"ICICIBANK.NS",
"HDFC.NS",
"WIPRO.NS",
"ONGC.NS",
"POWERGRID.NS",
"MARUTI.NS",
"TCS.NS",
"SBIN.NS",
"NESTLEIND.NS",
"BRITANNIA.NS",
"ULTRACEMCO.NS",
"HEROMOTOCO.NS",
"ADANIPORTS.NS",
"BHARTIARTL.NS",
"NTPC.NS",
"IOC.NS",
"SUNPHARMA.NS",
"CIPLA.NS",
"DRREDDY.NS",
"AXISBANK.NS",
"HINDALCO.NS",
"VEDL.NS",
"COALINDIA.NS",
"BPCL.NS",
"GAIL.NS",
"HINDPETRO.NS",
"SAIL.NS",
"NMDC.NS",
"INDUSINDBK.NS",
"JSWSTEEL.NS",
"ZEEL.NS",
"UPL.NS",
"TECHM.NS",
"TATAMOTORS.NS",
"BAJAJ-AUTO.NS",
]
# Define the headers for the CSV file
headers = ["Symbol", "Open", "High", "Low", "Close", "Volume"]
# Open the CSV file in write mode
with open("stock_data.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write the headers to the CSV file
writer.writerow(headers)
while True:
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = "https://query1.finance.yahoo.com/v8/finance/chart/" + symbol
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
data = response.json()["chart"]["result"][0]["indicators"]["quote"][0]
# Write the stock data to the CSV file
row = [
symbol,
data["open"][-1],
data["high"][-1],
data["low"][-1],
data["close"][-1],
data["volume"][-1],
]
writer.writerow(row)
# Print the stock data to the console
print(row)
# Wait for 30 seconds before making the next request
time.sleep(5)
import requests
import csv
import time
# Define the Alpha Vantage API key and the list of Indian stock symbols to fetch data for
stocks = [
"HDFCBANK.NS",
"RELIANCE.NS",
"TATASTEEL.NS",
"INFY.NS",
"HCLTECH.NS",
"ITC.NS",
"BAJFINANCE.NS",
"TITAN.NS",
"KOTAKBANK.NS",
"HINDUNILVR.NS",
"ASIANPAINT.NS",
"ICICIBANK.NS",
"HDFC.NS",
"WIPRO.NS",
"ONGC.NS",
"POWERGRID.NS",
"MARUTI.NS",
"TCS.NS",
"SBIN.NS",
"NESTLEIND.NS",
"BRITANNIA.NS",
"ULTRACEMCO.NS",
"HEROMOTOCO.NS",
"ADANIPORTS.NS",
"BHARTIARTL.NS",
"NTPC.NS",
"IOC.NS",
"SUNPHARMA.NS",
"CIPLA.NS",
"DRREDDY.NS",
"AXISBANK.NS",
"HINDALCO.NS",
"VEDL.NS",
"COALINDIA.NS",
"BPCL.NS",
"GAIL.NS",
"HINDPETRO.NS",
"SAIL.NS",
"NMDC.NS",
"INDUSINDBK.NS",
"JSWSTEEL.NS",
"ZEEL.NS",
"UPL.NS",
"TECHM.NS",
"TATAMOTORS.NS",
"BAJAJ-AUTO.NS",
]
# Define the headers for the CSV file
headers = ["Symbol", "Open", "High", "Low", "Close", "Volume"]
# Open the CSV file in write mode
with open("stock_data.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write the headers to the CSV file
writer.writerow(headers)
while True:
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={API_KEY}"
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
print(response.json())
data = response.json()["Global Quote"]
# Write the stock data to the CSV file
row = [
symbol,
data["02. open"],
data["03. high"],
data["04. low"],
data["05. price"],
data["06. volume"],
]
writer.writerow(row)
# Print the stock data to the console
print(row)
# Wait for 30 seconds before making the next request
time.sleep(30)
import requests
from bs4 import BeautifulSoup
# Define the URL of the Yahoo Finance page
URL = "https://finance.yahoo.com/most-active"
# Send a GET request to the URL
response = requests.get(URL)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Parse the HTML content using BeautifulSoup
soup = BeautifulSoup(response.content, "html.parser")
# Find the table containing the top 50 stocks
table = soup.find("table", {"class": "W(100%)"})
# Extract the list of stock symbols from the table
stocks = [row.find_all("td")[0].text for row in table.find_all("tr")[1:]]
# Print the list of stock symbols to the console
print(stocks)
else:
# Print an error message if the request was unsuccessful
print(f"Error: {response.status_code}")
import requests
import csv
import time
# Define the Alpha Vantage API key and the list of US stock symbols to fetch data for
api_key = "YOUR_API_KEY"
# Define the headers for the CSV file
headers = ["Symbol", "Open", "High", "Low", "Close", "Volume"]
# Open the CSV file in write mode
with open("stock_data.csv", "w", newline="") as file:
writer = csv.writer(file)
# Write the headers to the CSV file
writer.writerow(headers)
# while True:
for symbol in stocks:
# Define the API endpoint and parameters for each stock symbol
API_ENDPOINT = f"https://www.alphavantage.co/query?function=GLOBAL_QUOTE&symbol={symbol}&apikey={API_KEY}"
# Send GET request to the API endpoint with the defined parameters
response = requests.get(API_ENDPOINT)
# Check if the request was successful (status code 200)
if response.status_code == 200:
# Extract the stock data from the JSON response
data = response.json()["Global Quote"]
# Write the stock data to the CSV file
row = [
symbol,
data["02. open"],
data["03. high"],
data["04. low"],
data["05. price"],
data["06. volume"],
]
writer.writerow(row)
# Print the stock data to the console
print(row)
time.sleep(15)
# # Wait for 30 seconds before making the next request
#
import pandas as pd
# Open the CSV file and convert it to a DataFrame
df = pd.read_csv("stock_data.csv")
# Print the DataFrame
print(df)
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import accuracy_score, confusion_matrix
train_df = pd.read_csv("/kaggle/input/nlp-getting-started/train.csv")
test_df = pd.read_csv("/kaggle/input/nlp-getting-started/test.csv")
sub_df = pd.read_csv("/kaggle/input/nlp-getting-started/sample_submission.csv")
train_df.head()
x = train_df["text"]
y = train_df["target"]
X_train, X_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
vect = CountVectorizer(stop_words="english")
x_train_cv = vect.fit_transform(X_train)
x_test_cv = vect.transform(X_test)
clf = MultinomialNB()
clf.fit(x_train_cv, y_train)
pred = clf.predict(x_test_cv)
pred
confusion_matrix(y_test, pred)
accuracy_score(y_test, pred)
y_test = test_df["text"]
y_test_cv = vect.transform(y_test)
preds = clf.predict(y_test_cv)
sub_df["target"] = preds
sub_df.to_csv("submission.csv", index=False)
|
# !nvidia-smi
import tensorflow as tf
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
# from google.colab import drive
# drive.mount('/content/drive')
data = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/guava-thermal-dataset/DAY 1"
)
data_iterator = data.as_numpy_iterator()
batch = data_iterator.next()
fig, ax = plt.subplots(ncols=4, figsize=(20, 20))
for idx, img in enumerate(batch[0][:4]):
ax[idx].imshow(img.astype(int))
ax[idx].title.set_text(batch[1][idx])
data = data.map(lambda x, y: (x / 255, y))
# data.as_numpy_iterator().next()
train_size = int(len(data) * 0.7)
val_size = int(len(data) * 0.2)
test_size = int(len(data) * 0.1)
train = data.take(train_size)
val = data.skip(train_size).take(val_size)
test = data.skip(train_size + val_size).take(test_size)
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
# model = Sequential()
# preTrained_model = tf.keras.applications.ResNet50(
# include_top=False,
# weights="imagenet",
# input_tensor=None,
# input_shape=(256,256,3),
# pooling='avg',
# classes=3)
# model.add(preTrained_model)
# model.add(Flatten())
# model.add(Dense(512,activation='relu'))
# model.add(Dense(3,activation='softmax'))
# model.compile('adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model = Sequential()
# preTrained_model = tf.keras.applications.ResNet50(
# include_top=False,
# weights="imagenet",
# input_tensor=None,
# input_shape=(256,256,3),
# pooling='avg',
# classes=3)
# model.add(preTrained_model)
# model.add(Flatten())
# model.add(Dropout(0.3))
# model.add(Dense(256,activation='relu'))
# model.add(Dropout(0.4))
# model.add(Dense(3,activation='softmax'))
# model.compile('adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
# model = Sequential()
# preTrained_model =tf.keras.applications.VGG16(
# include_top=False,
# weights="imagenet",
# input_tensor=None,
# input_shape=(256,256,3),
# pooling=None,
# classes=1000,
# classifier_activation="softmax",
# )
# model.add(preTrained_model)
# model.add(Flatten())
# model.add(Dropout(0.3))
# model.add(Dense(256,activation='relu'))
# model.add(Dropout(0.4))
# model.add(Dense(3,activation='softmax'))
# model.compile('adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model = Sequential()
preTrained_model = tf.keras.applications.VGG19(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(256, 256, 3),
pooling=None,
classes=1000,
classifier_activation="softmax",
)
model.add(preTrained_model)
model.add(Conv2D(64, (3, 3), 1, activation="relu"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dropout(0.3))
model.add(Dense(256, activation="relu"))
# model.add(Dropout(0.5))
model.add(Dense(3, activation="softmax"))
model.compile("adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
model.summary()
logdir = "logs"
tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=logdir)
hist = model.fit(train, epochs=5, validation_data=val, callbacks=[tensorboard_callback])
fig = plt.figure()
plt.plot(hist.history["loss"], color="teal", label="loss")
plt.plot(hist.history["val_loss"], color="orange", label="val_loss")
fig.suptitle("Loss", fontsize=20)
plt.legend(loc="upper left")
plt.show()
fig = plt.figure()
plt.plot(hist.history["accuracy"], color="teal", label="accuracy")
plt.plot(hist.history["val_accuracy"], color="orange", label="val_accuracy")
fig.suptitle("Accuracy", fontsize=20)
plt.legend(loc="upper left")
plt.show()
|
# # Unemployment Analysis with Python
# created by :- @gautamdewasiofficial
# importing necessary libraries
# for numerical operations
import pandas as pd
import numpy as np
# for date column
import datetime
# for graphical visualization
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
# ## 1) Data Collection
# dataset from kaggle
# link :- https://www.kaggle.com/datasets/gokulrajkmv/unemployment-in-india
# Note :- i choose the large record .csv file. if you want to work on both , you can
unemp_df = pd.read_csv("dataset/Unemployment in India.csv")
# ## 2) Analysing Dataset :-
# What actually our data file contain ???
# Understanding and exploring the data
# 2.1) name of the columns presented in the dataframe
"""
Note :-
need to rename some columns , becuase they contain <space> as suffix in their names
ex-: { ' Frequency', ' Estimated Unemployment Rate (%)',
' Estimated Employed', ' Estimated Labour Participation Rate (%)'}
"""
# for name of the columns
unemp_df.columns
# 2.2)How big is the Data ?
# (rows,columns)
unemp_df.shape
# rows*columns
unemp_df.size
# 2.3) How data look like ? ( some samples from the dataframe )
# first 5 records
unemp_df.head()
# last 5 records
unemp_df.tail()
# random records
unemp_df.sample()
# 2.4) What is the Data_type of columns/features in dataframe ?
"""
Note:- 'Date' column is object, need to convert into 'datetime'
"""
unemp_df.dtypes
# another way to identify data types
unemp_df.dtypes
# 2.5) Mathematical perspective on our data
round(
unemp_df[
[
" Estimated Unemployment Rate (%)",
" Estimated Employed",
" Estimated Labour Participation Rate (%)",
]
]
.describe()
.T,
2,
)
# 2.6) How our input features Correlated with the target feature ?
# 'Correlation' is the statistical measure of the relationship between two variables.
unemp_df[
[
" Estimated Unemployment Rate (%)",
" Estimated Employed",
" Estimated Labour Participation Rate (%)",
]
].corr()
# ## 3) Data Preprocessing :-
# It refers to the cleaning, transforming, and integrating of data in order to make it ready for analysis.
# 3.1) Checking NULL Values in our Dataframe
unemp_df.isnull().sum()
# NOTE :-
# each column contain 28 null values ,
# As compare to dataset, these are small records
# and dropping these record is good option
# before dropping
unemp_df.shape
# Dropping null records
# axis=0 means operation on rows
unemp_df.dropna(axis=0, inplace=True)
# after dropping
unemp_df.isnull().sum()
# after dropping
unemp_df.shape
# 3.2) Renaming the column names.
unemp_df.columns
# removing unwanted space from some columns
unemp_df.rename(
columns={
" Date": "Date",
" Frequency": "Frequency",
" Estimated Unemployment Rate (%)": "Estimated Unemployment Rate (%)",
" Estimated Employed": "Estimated Employed",
" Estimated Labour Participation Rate (%)": "Estimated Labour Participation Rate (%)",
},
inplace=True,
)
# after changes
unemp_df.columns
# 3.3 ) Check for duplicated values ( records ) in our dataframe
# note :- duplicated() function returns boolean values
# False - > duplicated record not found
# True - > duplicated record found
unemp_df[unemp_df.duplicated()]
# total number of duplicated records in our dataframe
unemp_df.duplicated().sum()
# NO Duplicate records present in our dataframe
# 3.4 ) Creating new columns from 'Date' columns
# First of all, need to convert ['Date'] dtype into datetme
unemp_df["Date"] = pd.to_datetime(unemp_df["Date"])
unemp_df.dtypes
# Creating **day** column from unemp_df['Date']
unemp_df["day"] = unemp_df["Date"].dt.day
# Creating **month** column from unemp_df['Date']
unemp_df["month"] = unemp_df["Date"].dt.month_name()
# Creating **year** column from unemp_df['Date']
unemp_df["year"] = unemp_df["Date"].dt.year
# after changes ,
unemp_df.head()
# ## 4) Exploratory Data Analysis ( EDA ) :-
# Analysing Data using Visualisation plots (graphs )
unemp_df.columns
# value count for each category in ['Region'] column
unemp_df["Region"].value_counts()
# #### Count plot
# Show the counts of observations in each categorical bin using bars.
# Value count for each state using count plot
sns.countplot(x=unemp_df["Region"], palette="Set2")
plt.xticks(rotation="vertical")
plt.show()
# value count for each category in ['Area'] column
unemp_df["Area"].value_counts()
# data seems balanced
# palette is for color patterns
# ex-: Set1,Set3, Dark2, BrBG etc
sns.countplot(x=unemp_df["Area"], palette="Set2")
plt.show()
# #### Pie chart
# Pie charts can be used to show percentages of a whole,
# and represents percentages at a set point in time.
# note :- explode is for slicing any part from graph
# ex- 'Urban' seprated 0.1 from the pie
plt.pie(
x=unemp_df["Area"].value_counts(),
autopct="%0.2f",
explode=[0, 0.1],
colors=["#f7856d", "#f5a952"],
)
plt.show()
# #### Scatter Plot
# A scatter plot is a diagram where each value
# in the data set is represented by a dot.
unemp_df.plot(kind="scatter", x="Estimated Unemployment Rate (%)", y="year")
# for changing y_axis range from 2018 to 2022
plt.ylim(2018, 2022)
plt.show()
# Note :- Unemployment Rate in 2019 was very less as compare to 2020
# We know the reason :- Lockdown due to COVID-19 Pandemic
unemp_df.plot(kind="scatter", x="Estimated Labour Participation Rate (%)", y="Region")
# Note : Labour Participation Rate in 'Meghalaya' , 'Tripura', and 'Telangana' were high in between 2019-2020
unemp_df.columns
# #### boxplot
# A boxplot divides data into quartiles,
# with the box representing the interquartile range.
# it used for finding OUTLIERS in dataset
# it uses "5 Number Summary "
sns.boxplot(unemp_df["Estimated Unemployment Rate (%)"])
# Note :- these are not Outliers ,
# Due to Pandemic , sudden hike in 'Estimated Unemployment Rate (%)' recorded
# #### Heatmap
# Heatmaps are used to show relationships between
# two variables, one plotted on each axis.
sns.heatmap(
unemp_df[
[
"Estimated Unemployment Rate (%)",
"Estimated Employed",
"Estimated Labour Participation Rate (%)",
]
].corr(),
annot=True,
)
plt.show()
# ### Scatter graph animation using Plotly library
px.scatter(
data_frame=unemp_df,
x="Estimated Unemployment Rate (%)",
y="Region",
animation_frame="year",
)
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from keras.layers import (
Conv2D,
Dropout,
MaxPooling2D,
Flatten,
Dense,
BatchNormalization,
)
import keras
import shutil
import os
x = []
y = []
datadir = r"/kaggle/input/flowers/flowers"
Categories = []
for file_label in os.listdir(datadir):
Categories.append(file_label)
for file_name in os.listdir(datadir + "/" + file_label):
x.append(datadir + "/" + file_label + "/" + file_name)
y.append(file_label)
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.20, random_state=34
)
print("x_train = ", len(x_train))
print("x_test = ", len(x_test))
os.mkdir("/kaggle/working/train")
os.mkdir("/kaggle/working/test")
datadir = r"/kaggle/working/"
for file_label in Categories:
os.mkdir("/kaggle/working/train/" + file_label)
new = "/kaggle/working/train/" + file_label
for file_name in x_train:
if file_name.find(file_label) != -1:
shutil.copy2(file_name, new)
for file_label in Categories:
os.mkdir("/kaggle/working/test/" + file_label)
new = "/kaggle/working/test/" + file_label
for file_name in x_test:
if (file_name.find(file_label)) > 0:
shutil.copy2(file_name, new)
image = load_img(x_train[45])
plt.imshow(image)
image.size
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
training_set = train_datagen.flow_from_directory(
"/kaggle/working/train",
color_mode="rgb",
target_size=(256, 256),
batch_size=25,
class_mode="categorical",
)
test_set = test_datagen.flow_from_directory(
"/kaggle/working/test",
color_mode="rgb",
target_size=(256, 256),
batch_size=10,
class_mode="categorical",
)
model = Sequential()
model.add(Conv2D(32, (3, 3), activation="relu", input_shape=(256, 256, 3)))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2), strides=(1, 1)))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(BatchNormalization())
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(1, 1)))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2)))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Flatten())
model.add(Dense(1024, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(512, activation="relu"))
model.add(BatchNormalization())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(16, activation="softmax")) # 16 because we have 16 classes
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
model.summary()
from tensorflow.keras.callbacks import EarlyStopping
custom_early_stopping = EarlyStopping(monitor="val_accuracy", patience=5, mode="max")
res = model.fit(
training_set, epochs=32, callbacks=[custom_early_stopping], validation_data=test_set
)
plt.plot(res.history["accuracy"])
plt.plot(res.history["val_accuracy"])
model.evaluate(test_set)
predict = model.predict(test_set)
predict = np.argmax(predict, axis=1)
from sklearn.metrics import accuracy_score
Acc = accuracy_score(test_set.classes, predict)
print("Accuracy = ", Acc)
|
# # Honda Car Datasets Analysis 本田汽车数据分析
# ## Context
# Cars.com is now the second-largest automotive classified site with a large collection of vehicles for sale. However, the website can be prolonged, and in an attempt to collect many Honda vehicles to help me choose between economical cars I build a web scraper to collect up to 5,000 Honda vehicles.
# ## Content
# This dataset will be scraped every few months and contains relevant information cars.com provides such as:
# - The car's model
# - Condition
# - Price
# - Ratings
# - Drivetrain
# - Transmission
# Honda,represents economical and functional cars from Japan. Through this analysis, we can see the information as below:
#
#
#
library(tidyverse)
df < -read_csv("/kaggle/input/honda-data/honda_sell_data.csv")
glimpse(df)
|
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import DBSCAN
from sklearn.neighbors import NearestNeighbors
import math
import numpy as np
import seaborn as sns
def findkdistances(df, neighbors, label1, label2, line):
neigh = NearestNeighbors(n_neighbors=neighbors)
nbrs = neigh.fit(df[[label1, label2]])
distances, indices = nbrs.kneighbors(df[[label1, label2]])
distances = np.sort(distances, axis=0)
distances = distances[:, 1]
print(distances)
plt.plot(distances)
plt.xlabel("Data point sorted by distance")
plt.ylabel("Epsilon")
for i in line:
plt.axhline(i)
plt.show()
def dbscanFitPlot(eps, min_samples, df, label1, label2):
dbscan = DBSCAN(eps=eps, min_samples=min_samples)
dbscan.fit(df[[label1, label2]])
df["dbscan_lables"] = dbscan.labels_
plt.scatter(df[label1], df[label2], c=df["dbscan_lables"], s=10)
plt.show()
def PointsInCircum(r, n=100):
return [
(
math.cos(2 * math.pi / n * x) * r + np.random.normal(-30, 30),
math.sin(2 * math.pi / n * x) * r + np.random.normal(-30, 30),
)
for x in range(1, n + 1)
]
data = pd.DataFrame(PointsInCircum(500, 1000))
data = data.append(PointsInCircum(300, 700))
data = data.append(PointsInCircum(100, 300))
data.head()
plt.scatter(data[0], data[1], s=10)
plt.show()
# appending some noise
data = data.append(
[(np.random.randint(-600, 600), np.random.randint(-600, 600)) for i in range(300)]
)
plt.scatter(data[0], data[1], s=10)
plt.show()
dbscan = DBSCAN(eps=30, min_samples=6)
dbscan.fit(data[[0, 1]])
data["dbscan_lables"] = dbscan.labels_
plt.scatter(data[0], data[1], c=data["dbscan_lables"], s=10, cmap="rainbow")
plt.show()
findkdistances(data, 2, 0, 1, [30])
data_2 = pd.read_csv(
"/kaggle/input/hierarchical-clustering-for-customer-data/segmented_customers.csv"
)
data_2 = data_2[["Annual Income (k$)", "Spending Score (1-100)"]]
plt.scatter(data_2["Annual Income (k$)"], data_2["Spending Score (1-100)"], s=10)
plt.show()
neigh = NearestNeighbors(n_neighbors=2)
nbrs = neigh.fit(data_2[["Annual Income (k$)", "Spending Score (1-100)"]])
distances, indices = nbrs.kneighbors(
data_2[["Annual Income (k$)", "Spending Score (1-100)"]]
)
distances = np.sort(distances, axis=0)
distances = distances[:, 1]
plt.plot(distances)
plt.xlabel("data point sorted by distance")
plt.ylabel("epsilon")
plt.axhline(2)
plt.axhline(4)
plt.axhline(6)
plt.axhline(8)
plt.axhline(10)
plt.show()
dbscan = DBSCAN(eps=6, min_samples=3)
dbscan.fit(data_2[["Annual Income (k$)", "Spending Score (1-100)"]])
data_2["dbscan_lables"] = dbscan.labels_
plt.scatter(
data_2["Annual Income (k$)"],
data_2["Spending Score (1-100)"],
c=data_2["dbscan_lables"],
s=10,
cmap="rainbow",
)
plt.show()
min_samples = [2, 3, 4, 5, 6, 7]
for sample in min_samples:
dbscan = DBSCAN(eps=6, min_samples=sample)
dbscan.fit(data_2[["Annual Income (k$)", "Spending Score (1-100)"]])
data_2["dbscan_lables"] = dbscan.labels_
plt.title(f"min_samples = {sample}")
plt.scatter(
data_2["Annual Income (k$)"],
data_2["Spending Score (1-100)"],
c=data_2["dbscan_lables"],
s=50,
cmap="rainbow",
)
plt.show()
face_data = pd.read_csv("/kaggle/input/clustering-exercises/face.csv")
sns.pairplot(face_data)
face_data
findkdistances(face_data, 2, "x", "y", [4, 10])
dbscanFitPlot(8, 3, face_data, "x", "y")
wave = pd.read_csv("/kaggle/input/clustering-exercises/wave.csv")
wave.head()
sns.pairplot(wave)
findkdistances(wave, 2, "x", "y", [3, 2])
dbscanFitPlot(10, 3, wave, "x", "y")
|
# ## Θεωρητικό Μέρος
# α) Σύμφωνα με το paper, η διαδικασία παραγωγής της Γκαουσιανής πυραμίδας ισοδυναμεί με την συνέλιξη της αρχικής εικονάς με ένα σετ ισοδύναμων συναρτήσεων βαρών h. Η συνάρτηση αυτή h μοιάζει όλο και περισσότερο με την συνάρτηση της Γκαουσιανής κατανομής όσο το α γίνεται μικρότερο της μονάδας αλλά καθώς η παράμετρος α προσεγγίζει την μονάδα, το σχήμα της συνάρτησης βαρών h παίρνει πιο τριγωνικές μορφές. Επιπλέον η παράμετρος α καθορίζει το κατά πόσο θα μειωθούν η διακύμανση και η εντροπία των ιστογραμμάτων των εικόνων του κάθε επιπέδου της πυραμίδας.
# b) Η εντροπία είναι ο ελάχιστος αριθμός από bits ανά pixel, που χρειαζόμαστε για να κωδικοποιήσουμε μία εικόνα. Επειδή χρησιμοποιούμε 8 bits για την αναπαράσταση του κάθε pixel σε μία grayscale εικόνα, άρα έχουμε 2^8=256 δυνατά αποτελέσματα, η μέγιστη εντροπία θα είναι:
# $ -\sum \limits _{n=0}^ {255}P(n)\log(P(n)) = -\sum \limits _{n=0}^{255}2^{(-8)}\log(2^{(-8)}) = -\log(2^{(-8)}) = 8 $
# c)
# d)
# ## Εργαστηριακό Μέρος
# ### Α. Υλοποίηση Αλγορίθμου
import numpy as np
from skimage import io
from skimage.transform import resize
from skimage import color
import matplotlib.pyplot as plt
def GKernel(a=0.0):
w_n = np.array(
[(0.25 - a / 2), 0.25, a, 0.25, (0.25 - a / 2)]
) # initializing row vector w(n) with given constraints
w_m = w_n.reshape((5, 1)) # initializing column vector w(m)
w = np.outer(w_m, w_n) # getting the 5x5 kernel
return w
def GReduce(I, h):
if I.ndim < 3: # grayscale image
window = 5
offset = window // 2
row, col = I.shape
if row % 2 == 0:
height = row - offset
else:
height = row - offset - 1
if col % 2 == 0:
width = row - offset
else:
width = row - offset - 1
nextLevel = np.zeros((width // 2 - 1, height // 2 - 1))
for i in range(2, width):
for j in range(2, height):
if j % 2 == 0 and i % 2 == 0:
patch = I[i - offset : i + offset + 1, j - offset : j + offset + 1]
psum = np.dot(patch, h).sum()
nextLevel[(i // 2) - 1, (j // 2) - 1] = psum
return nextLevel
else: # coloured image
window = 5
offset = window // 2
row, col, ch = I.shape
# splitting rgb channels to process seperately
red = I[:, :, 0]
green = I[:, :, 1]
blue = I[:, :, 2]
if row % 2 == 0:
height = row - offset
else:
height = row - offset - 1
if col % 2 == 0:
width = row - offset
else:
width = row - offset - 1
nextRedLevel = np.zeros((width // 2 - 1, height // 2 - 1))
nextGreenLevel = np.zeros((width // 2 - 1, height // 2 - 1))
nextBlueLevel = np.zeros((width // 2 - 1, height // 2 - 1))
# applying filter to each channel
for i in range(2, width):
for j in range(2, height):
if j % 2 == 0 and i % 2 == 0:
patch = red[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextRedLevel[(i // 2) - 1, (j // 2) - 1] = psum
patch = green[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextGreenLevel[(i // 2) - 1, (j // 2) - 1] = psum
patch = blue[
i - offset : i + offset + 1, j - offset : j + offset + 1
]
psum = np.dot(patch, h).sum()
nextBlueLevel[(i // 2) - 1, (j // 2) - 1] = psum
# combining back to a single 3d array
nextLevel = np.zeros((width // 2 - 1, height // 2 - 1, 3), dtype=float)
row, col, ch = nextLevel.shape
for i in range(0, row):
for j in range(0, col):
nextLevel[i, j, 0] = nextRedLevel[i, j]
nextLevel[i, j, 1] = nextGreenLevel[i, j]
nextLevel[i, j, 2] = nextBlueLevel[i, j]
return nextLevel
img = io.imread("/kaggle/input/lenapng/lena.png")
img = img / 255
gray_img = color.rgb2gray(img)
print(img[30, 30])
io.imshow(img)
I_out = GReduce(img, GKernel(0.5))
print(I_out[30, 30])
plt.imshow(I_out / 6)
plt.show()
|
a = 4
print(a)
type(a)
b = 3.8
print(b)
type(b)
c = "Nallathai Govindaraj"
print(c)
type(c)
# **Variables with numbers********
# **interger, floating point, complex number**
a = 2 + 3j
print(a)
type(a)
Total_mark = 400
Average = Total_mark / 5
print(Average)
type(Average)
Total_mark = 400
Missed = 24
Total_mark = Total_mark + Missed
print(Total_mark)
# **Variable with string**
Gst = "18AABCU9603R1ZM"
print(Gst)
type(Gst)
# **concatenate string**
First_name = "Nallathai"
Last_name = "Govindaraj"
Name = First_name + " " + Last_name
print(Name)
type(Name)
# **Variable with Boolean True/False**
a = 8
b = 5
c = a < b
print(c)
type(c)
# **Multiple Assignments**
a, b, c = 26, 84.5, "average"
print(a)
print(b)
print(c)
# **Naming conversion of variable**
Name = "Nallathai" # starts with Alphabet not with number or special character
name = "Govindaraj" # case sensitive
print(name)
print(Name)
First_name = "Ramasamy" # only underscore should be used not others and space
# **Reserved keywords - Predefined cannot be used as variable names**
# eg: for, if, is, else....
# ****Print statement or function - is used to print ************************
a = 7
b = 3
c = 29 * 3 + 18
print(c)
print("The value of a,b,c is", a, b, c) # passing multiple variable in print statement
# **Swapping two variables**
a = 8
b = 4
c = a
a = b
b = c
print(a, b) # swaping variables
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.linear_model import LinearRegression
df = pd.read_csv("/kaggle/input/crop-recommendation/Crop_recommendation.csv")
df.head()
df.info()
df.isnull().sum()
df.drop(["Unnamed: 8", "Unnamed: 9"], axis=1, inplace=True)
df.head()
df.tail()
crops = pd.DataFrame(df["label"].unique(), columns=["crop"])
crop_list = df["label"].unique().tolist()
print(crop_list)
|
# Basic libraries
import numpy as np
import pandas as pd
# Visualization libraries
import matplotlib.pyplot as plt
import seaborn as sns
# General ML libraries
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
# Neural networks libraries
import keras
from keras.utils.np_utils import to_categorical # convert to one-hot-encoding
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPool2D
from keras.optimizers import RMSprop
from keras.preprocessing.image import ImageDataGenerator
# ## Step 1. Load data
sample_submission = pd.read_csv("../input/digit-recognizer/sample_submission.csv")
test = pd.read_csv("../input/digit-recognizer/test.csv")
train = pd.read_csv("../input/digit-recognizer/train.csv")
print("Original data structure:")
display(train.head())
fig = sns.countplot(train["label"], alpha=0.75).set_title("Digit counts")
plt.xlabel("Digits")
plt.ylabel("Counts")
plt.show()
# ## Step 2. Data preparation
img_rows, img_cols = 28, 28
num_classes = 10
def prep_data(raw):
y = raw["label"]
x = raw.drop(labels=["label"], axis=1)
x = x / 255
y = y / 255
x = x.values.reshape(-1, img_rows, img_cols, 1)
return x, y
X_train, Y_train = prep_data(train)
test = test / 255
test = test.values.reshape(-1, img_rows, img_cols, 1)
Y_train = to_categorical(Y_train, num_classes)
# ## Step 3. Data model
# X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size = 0.1, random_state=21)
batch_size = 16
model = Sequential()
model.add(
Conv2D(
filters=32,
kernel_size=(3, 3),
activation="relu",
input_shape=(img_rows, img_cols, 1),
)
)
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation="relu"))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.5))
model.add(Conv2D(filters=16, kernel_size=(3, 3), activation="relu"))
model.add(Flatten())
model.add(Dense(128, activation="relu"))
model.add(Dense(num_classes, activation="softmax"))
model.compile(
loss=keras.losses.categorical_crossentropy, optimizer="adam", metrics=["accuracy"]
)
model.fit(X_train, Y_train, batch_size=batch_size, epochs=3, validation_split=0.1)
# predict results
results = model.predict(test)
# select the index with the maximum probability
results = np.argmax(results, axis=1)
results = pd.Series(results, name="Label")
submission = pd.concat([pd.Series(range(1, 28001), name="ImageId"), results], axis=1)
submission.to_csv("submit.csv", index=False)
|
import numpy as np
import pandas as pd
from tqdm.notebook import tqdm
import os
import matplotlib.pyplot as plt
import seaborn as sns
import math
from scipy import stats
from sklearn.preprocessing import LabelEncoder
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import train_test_split, KFold, cross_val_score
from sklearn.linear_model import (
LinearRegression,
Ridge,
SGDRegressor,
ElasticNetCV,
Lasso,
)
from sklearn.metrics import mean_squared_error, SCORERS
from sklearn.kernel_ridge import KernelRidge
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import PolynomialFeatures, MinMaxScaler
from sklearn.model_selection import GridSearchCV
from xgboost.sklearn import XGBRegressor
import warnings
from datetime import datetime
warnings.filterwarnings("ignore")
GD = False
ROOT = "/kaggle/input/house-prices-advanced-regression-techniques/"
TEST = os.path.join(ROOT, "test.csv")
TRAIN = os.path.join(ROOT, "train.csv")
df_test = pd.read_csv(TEST)
df_train = pd.read_csv(TRAIN)
# # 1. Context
# Based on the information available, we want to predict the price of a house based on a set of specific features. The answer to this question implies a regression model.
# Since we are looking to use a linear regression model, we'll want to keep features that have a significant correlation with our target value (`SalesPrice`). Keeping this in mind, we'll need to evaluate the correlation of 3 types of variables:
# 1. Discrete and continuous variable (we'll be using pearson-r)
# 2. Binary variable (we'll be using a point-biseral correlation)
# 3. Categorical variable with more than 2 options (we'll use the [correlation ratio](https://towardsdatascience.com/the-search-for-categorical-correlation-a1cf7f1888c9))
# We'll also want to exclude features correlated with each other (colinearity). Another important element we'll want to look at is the type of distribution our features have. Since we'll use a regression model we need to have our data normaly distributed - this may require to apply some transformation.
# # 2. Data Exploration
# ## 2.1 High Level Data Structure
# We'll start by looking at our data from a hugh level. We'll try to understand:
# * shape of data
# * data type
# * missing values
# We'll also seperate our dataset into target (y) and features (X). Finaly, if we find any missing values we'll work on either dropping the column or replacing null values.
rows, cols = df_train.shape
print(f"Training Dataset\n-------\ncolumns: {cols}\nrows: {rows}")
cat_cols = (
df_train.loc[:, df_train.columns != "SalePrice"]
.select_dtypes(include=["object"])
.columns
)
num_cols = (
df_train.loc[:, df_train.columns != "SalePrice"]
.select_dtypes(exclude=["object"])
.columns
)
print(
f"categorical columns: {len(cat_cols)}\nnumeric columns: {len(num_cols)}\n\n=================\n"
)
rows, cols = df_test.shape
print(f"Test Dataset\n-------\ncolumns: {cols}\nrows: {rows}")
cat_cols = (
df_test.loc[:, df_test.columns != "SalePrice"]
.select_dtypes(include=["object"])
.columns
)
num_cols = (
df_test.loc[:, df_test.columns != "SalePrice"]
.select_dtypes(exclude=["object"])
.columns
)
print(f"categorical columns: {len(cat_cols)}\nnumeric columns: {len(num_cols)}")
# Our training dataset has 1,460 rows and 81 columns (80 features total). This is a pretty small dataset with a somewhat large amount of features. Out of our 80 columns we have:
# * 37 numeric columns
# * 43 categorical/string columns
# This tells us we'll have to do some encoding on our categorical values to be able to take advantage of the full dataset
nulls = {}
for col in df_train.columns:
nulls[col] = 1 - (len(df_train[df_train[col].isna()][col]) / df_train.shape[0])
labels = []
vals = []
for k, v in nulls.items():
if v < 1.0:
labels.append(k)
vals.append(v)
_, ax = plt.subplots(figsize=(12, 5))
sns.barplot(y=vals, x=labels, color="lightskyblue")
ax.set_xticklabels(labels=labels, rotation=45)
plt.title("% non-null values by columns")
ax.set_xlabel("columns")
ax.set_ylabel("%")
plt.show()
# Most of our columns have non-null data (18 out of 81). Among those 18 only 4 have an amount of non-null data that is very small (<20%). Base on this information, it is fair to drop those columns from our dataset.
to_drop = []
for k, v in nulls.items():
if v < 0.6:
to_drop.append(k)
# Let's use a copy of our dataframe so that we won't have to reload our entire dataset in case we need
# to do so (especially a good idea when we are working with very large dataset)
df_train_c = df_train.drop(to_drop, axis=1)
rows, cols = df_train_c.shape
print(f"columns: {cols}\nrows: {rows}")
cat_cols = (
df_train_c.loc[:, df_train_c.columns != "SalePrice"]
.select_dtypes(include=["object"])
.columns
)
num_cols = (
df_train_c.loc[:, df_train_c.columns != "SalePrice"]
.select_dtypes(exclude=["object"])
.columns
)
print(f"categorical columns: {len(cat_cols)}\nnumeric columns: {len(num_cols)}")
# Let's now fill in the missing values for our 14 columns with N/A values. We'll use the `SimpleImputer()` method using the most frequent value present in the column to replace null values.
si = SimpleImputer(strategy="most_frequent")
for k, v in nulls.items():
if (v < 1) and (k not in to_drop):
df_train_c[k] = si.fit_transform(df_train_c[k].values.reshape(-1, 1))
df_train_c = df_train_c[df_train_c.GrLivArea < 4000]
# We now have 77 columns. The 4 columns we dropped were all categorical.
# Now that we have a somewhat cleaner set, we can start working on understanding the correlation of variables with `SalePrice`. First we'll seperate our dependent (y) and independent variables (X). We'll also remove the `Id` from our dataframe as we do not need this information right now.
X = df_train_c.loc[:, df_train_c.columns != "SalePrice"]
y = df_train_c.loc[:, df_train_c.columns == "SalePrice"]
df_train_ID = df_train.Id
df_test_ID = df_test.Id
X = X.loc[:, X.columns != "Id"]
# ## 2.1 Checking Data Skewness
# As we are going to perform a regression to predict our house sales price, we should make sure our numerical features are normaly distributed. If not, we should apply a transform before moving forward.
# Let's first take a look at our target variable y.
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
sns.distplot(y, fit=stats.norm, ax=ax[0], kde=False)
stats.probplot(y.SalePrice, plot=ax[1])
plt.show()
print(f"Fisher-Pearson coeficient of skewness: {stats.skew(y.SalePrice.values):.2f}")
# Our code gives us 3 output:
# * our data distribution with a normal fitted. This gives a way to see the actual distribution of our data
# * a QQ plot. This is use as a visual check to see if our data is normaly distributed. It sorts our values in ascending order (y-axis) and plot these againsta theorical quantiles (x-axis) from a normal distribution. A normaly distributed set will form a straigh line
# * Fisher-Pearson coefficient of skewness. A coeficient of 0 indicates no skenews while a positive coeficient indicates a right skewed distribution
# We can visualy see that our target varibale is not normally distributed. This is confirmed when we compute the . We should apply a transformation.
# Let's now check if any of our numerical varibles are normaly distributed. We'll consider that any feature with an absolute skewness greater than 0.5 is skewed and will need to be transformed.
# We know that the following features are categorical variable that have been encoded so we should ignore them:
# * `MSSubClass`
# * `OverallQual`
# * `OverallCond`
numerical_columns = (
X.loc[
:, ~X.columns.isin(["MSSubClass", "OverallQual", "OverallCond", "GarageYrBlt"])
]
.select_dtypes(include=["int", "float"])
.columns
)
sk = (
X[numerical_columns]
.apply(lambda x: stats.skew(x.dropna()))
.to_frame("Fisher-Pearson Coef")
)
skw_cols = list(sk[abs(sk["Fisher-Pearson Coef"]) > 0.5].index)
sk[abs(sk["Fisher-Pearson Coef"]) > 0.5]
# 24 of our numerical columns have a skewed distribution. We'll need to transform these in order to perform our regression. We'll use a Box Cox transformation. It is important to keep the lambda value constant in our transformation when using box cox.
# A lambda value of 0 for the Box Cox transformation simply applies a log transformation.
lmbda = 0.0
X[skw_cols] = (
X[numerical_columns]
.loc[:, X[numerical_columns].columns.isin(skw_cols)]
.apply(lambda x: stats.boxcox(1 + x, lmbda=lmbda))
)
y = y.apply(lambda x: stats.boxcox(1 + x, lmbda=lmbda))
# We now have applied our transformation to our training data. Let's check the skewness of our data again to confirm we are now working with clean data.
sk["Fisher-Pearson Coef (After)"] = X[numerical_columns].apply(lambda x: stats.skew(x))
sk[sk.index.isin(skw_cols)]
# Not all of our columns have been successfuly transformed, though the majority of them now have a distribution with a coeficient of skewness close to 0.
fig, ax = plt.subplots(1, 2, figsize=(15, 5))
sns.distplot(y, fit=stats.norm, ax=ax[0], kde=False)
stats.probplot(y.SalePrice, plot=ax[1])
plt.show()
print(f"Fisher-Pearson coeficient of skewness: {stats.skew(y.SalePrice.values):,.2f}")
# The transformation for our target variable had a great effect. It is now almost normally distributed with a very low coeficient of skewness.
# ### 2.2 Selecting our features
# #### 2.2.1 Discrete Variables
# Let's start with our numerical values. This is the simpliest. `Pandas` offers a simple way to calculate pearson-r correlation, so this will be straightforward. Now we need to be award of a few element:
# * `MSSubClass`, `OverallQual`, `OverallCond`, are encoded categorical variable. We may need to re-encode with a code starting at 0
# * `MoSold` and `YrSold`, `YearBuilt`, `YearRemodAdd`, `GarageYrBlt` are discret variable and a spearman correlation may be more appropriate
# Let's exclude those features and run our pearson correlation only against discrete variables.
X_disc = X.loc[
:,
~(X.columns.isin(["MoSold", "YrSold", "YearBuilt", "YearRemodAdd", "Id"]))
& (X.columns.isin(numerical_columns)),
]
X_disc["y"] = y
_, ax = plt.subplots(figsize=(25, 15))
sns.heatmap(X_disc.corr(), annot=True, cbar=False, cmap="YlGnBu")
plt.show()
# We are now able to see the correlation of our discrete variables to our dependent variable `y`. We'll keep only variables with a moderate to strong correlation (i.e > |0.3|).
mask = abs(X_disc.corr()["y"] >= 0.3)
corr_variables = X_disc.corr()["y"][mask]
corr_variables = list(corr_variables[corr_variables.index != "y"].index)
corr_variables
# Now that we have selected our discrete features let's controle for colinearity in features. Features with strong correlation between each other will explain the same information in our model, hence we can get rid of one of the 2 variables.
_, ax = plt.subplots(figsize=(15, 8))
sns.heatmap(X_disc.loc[:, corr_variables].corr(), annot=True, cbar=True, cmap="YlGnBu")
plt.show()
mask = (abs(X_disc.loc[:, corr_variables].corr()) > 0.8) & (
X_disc.loc[:, corr_variables].corr() != 1.0
)
cols = list(
X_disc.loc[:, corr_variables].corr()[mask].dropna(how="all", axis=1).columns
)
to_remove = []
for i in range(0, len(cols), 2):
to_remove.append(cols[i])
continous_features = list(set(corr_variables) - set(to_remove))
continous_features
# #### 2.2.2 Continuous Variables
# Now that we have our continuous numerical data, let's look at our discrete variables. One thing to note is that `YearRemodAdd` would be equal to `YearBuilt` if no remodeling has been done. Therefore, we'll engineer it into a `1`/`0` dichotomous variable where `1` indicates a remodeled house and `2` indicates a non remodeled house.
# We'll also add 3 new field in replacement to `YearBuilt`,`'YearRemodAdd`,`GarageYrBlt` capturing the rencency (as opposed to the year). It is fair to assume that what the relationship really capture is the timeframe between the the remodeling/construction and the sell date.
X["IsRemod"] = np.where(
np.expm1(X[["YearBuilt"]]).astype("int").YearBuilt == X.YearRemodAdd, 0, 1
)
X["YrSinceBuilt"] = X.YrSold - X.YearBuilt
X["YrSinceRemod"] = X.YrSold - X.YearRemodAdd
X["YrSinceGarageYrBlt"] = X.YrSold - X.GarageYrBlt
X["HasMasVnr"] = np.where(X.MasVnrType == "None", 0, 1)
# tmp = X[X['YrSinceRemod'] == 0]
# X = X[X['YrSinceRemod'] != 0]
# tmp['YrSinceRemod'] = tmp.YrSinceRemod.replace(0,np.nan)
# X = X.append(tmp)
X_discrete = X.loc[
:,
X.columns.isin(
["MoSold", "YrSold", "YrSinceBuilt", "YrSinceRemod", "YrSinceGarageYrBlt"]
),
]
X_discrete["y"] = y
_, ax = plt.subplots(figsize=(15, 8))
sns.heatmap(X_discrete.corr("spearman"), annot=True, cmap="YlGnBu")
plt.show()
# We can see 3 features that have a medium to high correlation with our dependent variable. We'll check for any colinearality between these 3 variables. The relationship tend to indicate that as the recency grows (was built or renovated farther in the past) we get a smaller price. Now, we can see a pretty strong colinerality between these 3 variables (especially between garage and construction). Hence we'll go ahead and only keep 2 of these 3 variables:
# * `YrSinceBuilt`
# * `YrSinceRemod`
mask = (abs(X_discrete.corr("spearman")["y"]) >= 0.3) & (
X_discrete.corr("spearman")["y"] != 1.0
)
X_discrete_cols = list(X_discrete.corr("spearman")["y"][mask].index)
discrete_features = list(set(X_discrete_cols) - set(["YrSinceGarageYrBlt"]))
X_num = X.loc[:, X.columns.isin(continous_features + discrete_features)]
X_num["y"] = y
# Now that we have our numerical features let's plot these variable against our target variable `SalePrice -` to have a visuale representation of the relationship.
sns.pairplot(
x_vars=continous_features[: int(len(continous_features) / 2)],
y_vars=["y"],
data=X_num,
height=3.5,
)
sns.pairplot(
x_vars=continous_features[int(len(continous_features) / 2) :],
y_vars=["y"],
data=X_num,
height=3.5,
)
sns.pairplot(x_vars=discrete_features, y_vars=["y"], data=X_num, height=3.5)
plt.show()
# #### 2.2.3 Dichotomous Variables
# To analyse the relationship between our dichotomous variable and our discrete dependent variable `y` we'll use the point-biseral correlation. It is important to note that we'll consider only natural dichotomous variables. We have the following variables:
# * `CentralAir`
# * `IsRemod`
le = LabelEncoder()
X["CentralAir_enc"] = X[["CentralAir"]].apply(lambda x: le.fit_transform(x.values))
r, p = stats.pointbiserialr(X.IsRemod.values, y.values.ravel())
print(f"IsRemod - r: {r} | p: {p}")
r, p = stats.pointbiserialr(X.CentralAir_enc.values, y.values.ravel())
print(f"CentralAir_enc - r: {r} | p: {p}")
r, p = stats.pointbiserialr(X.HasMasVnr.values, y.values.ravel())
print(f"MasVnr_enc - r: {r} | p: {p}")
dico = ["CentralAir_enc", "HasMasVnr"]
# Based on the standard we set to consider a relationship with our target variable, we can see that `CentralAir_enc` and `MasVnr_enc` both fit our requirement.
# #### 2.2.4 Categorical Variables
# Now let's move on to the categorical variables. We'll use the correlation ratio to measure the relationship between our categorical value and our target value. Correlation ratio range from 0-1 where 1 indicates the variance in our target value comes from differences between categories and where 0 indicates the differences in our target value comes from differences within our categories.
# What we are interested is the variance between category (i.e. a value close to 1) as it indicates that belonging to a specific category influences the `SalePrice`
categoricals = list(
X.loc[:, X.columns != "CentralAir"].select_dtypes(include="object").columns
)
categoricals = categoricals + ["MSSubClass", "OverallQual", "OverallCond"]
X_categoricals = X[categoricals].apply(lambda x: le.fit_transform(x))
X_categoricals["y"] = y
corr = []
for col in tqdm(X_categoricals.columns):
cat = X_categoricals[col].unique()
y_avg = []
n_cat = []
for c in cat:
y_avg.append(X_categoricals[X_categoricals[col] == c].y.mean())
n_cat.append(len(X_categoricals[X_categoricals[col] == c]))
y_total_avg = np.sum(np.multiply(y_avg, n_cat) / np.sum(n_cat))
numerator = np.sum(
(np.multiply(n_cat, np.power(np.subtract(y_avg, y_total_avg), 2)))
)
denominator = np.sum(np.power(np.subtract(X_categoricals.y, y_total_avg), 2))
if denominator == 0:
eta = 0.0
corr.append((col, eta))
else:
eta = np.sqrt(numerator / denominator)
corr.append((col, eta))
print(corr)
categoricals_columns = []
for el in corr:
if el[1] >= 0.3:
categoricals_columns.append(el[0])
categoricals_columns.pop(len(categoricals_columns) - 1)
categoricals_columns
X_cat = X_categoricals[categoricals_columns]
X_cat["y"] = y
sns.pairplot(
x_vars=categoricals_columns[: int(len(categoricals_columns) / 2)],
y_vars=["y"],
data=X_cat,
height=3.5,
)
sns.pairplot(
x_vars=categoricals_columns[int(len(categoricals_columns) / 2) :],
y_vars=["y"],
data=X_cat,
height=3.5,
)
plt.show()
# ## 3. Training & Testing our Model
# Let's first recap. what we have done so far:
# 1. *descriptive analytics*: we looked at the shape of our data, the missing values as well as the data types we are working with. We also did some very light data engineering and data transformation
# 2. *Selected our features*: after looking at the data we selected our features based on their correlation with our target variable. We split the feature selection based on the type of variables we were working with.
# It is now time to train and test our model. We'll first define a baseline so that we can see how well our model is performing. For our model, I have chosen to use a stacked ensemble model using 4 submodels:
# * OLS Regression
# * Ridge Regression
# * ElasticNet Regression
# * GB Regression
# * XGB Regression
# The choice of uing a stacked model was driven by notebooks from other users and an interest in learning this technique.
# ### 3.1 Bookkeeping
# Let's first do some bookkeeping work. First we'll combine all of our features, then split our model into a train, a test and a validation set and finaly get our baseline score. We'll use an untune single XGB Regressor model as a baseline.
features = categoricals_columns + continous_features + discrete_features + dico
X = X[features]
y = y
X[categoricals_columns] = X[categoricals_columns].apply(lambda x: le.fit_transform(x))
X_train, X_test, y_train, y_test = train_test_split(
X.loc[:, X.columns != "y"], y, test_size=0.3
)
X_test, X_val, y_test, y_val = train_test_split(X_test, y_test, test_size=0.3)
# Now that we have our sets, let's define our cross validation function that will be used to measure the performance of our model when performing some tunning.
n_fold = 5
def rmseModel(m):
kf = KFold(n_splits=n_fold, random_state=0, shuffle=True).get_n_splits()
rmse = np.sqrt(-cross_val_score(m, X, y, scoring="neg_mean_squared_error", cv=kf))
return rmse
XGBBaselie = XGBRegressor(objective="reg:squarederror")
XGBBaselie.fit(X_test, y_test)
pred = XGBBaselie.predict(X_val)
rmseBaseline = np.sqrt(mean_squared_error(pred, y_val.values))
print(f"Baseline RMSE: {rmseBaseline}")
# ### 3.2 Model Definition
# We now have our baseline score and the approach we want to use to tackle this problem. Let's now train our models, implement our stacked ensemble and evaluate its performance against our baseline.
# #### 3.2.1 OLS Regression
ols_reg = LinearRegression()
ols_rge_scores = rmseModel(ols_reg)
print(f"OLS Reg RMSE, mean: {np.mean(ols_rge_scores)}, stdv: {np.std(ols_rge_scores)}")
# #### 3.2.2 Ridge Regression
if GD:
print("Running Grid Search for model tunning")
params = {
"alpha": [0.1, 0.3, 0.5, 0.7, 0.9],
"solver": ["auto", "svd", "cholesky", "lsqr"],
}
ridge_reg = Ridge()
gs = GridSearchCV(ridge_reg, params, cv=5)
gsf = gs.fit(X_train, y_train).best_params_
else:
gsf = {"alpha": 0.9, "solver": "auto"}
ridge_reg = Ridge(**gsf)
ridge_reg_scores = rmseModel(ridge_reg)
print(
f"Ridge Reg RMSE, mean: {np.mean(ridge_reg_scores)}, stdv: {np.std(ridge_reg_scores)}"
)
# #### 3.2.3 ElasticNet
if GD:
print("Running Grid Search for model tunning")
params = {
"l1_ratio": [0.1, 0.5, 0.7, 0.9, 0.92, 0.95, 0.99, 1],
"n_alphas": [10, 15, 50, 100],
"normalize": [True, False],
"max_iter": [5, 10, 50, 100],
"tol": [0.001, 0.0001, 0.00001],
}
el_reg = ElasticNetCV()
gs = GridSearchCV(el_reg, params, cv=5, n_jobs=-1, verbose=1)
gsf = gs.fit(X_train, y_train).best_params_
else:
gsf = {
"l1_ratio": 0.9,
"max_iter": 50,
"n_alphas": 50,
"normalize": True,
"tol": 0.0001,
}
el_reg = ElasticNetCV(**gsf)
el_reg_scores = rmseModel(el_reg)
print(
f"Elastic Net Reg RMSE, mean: {np.mean(el_reg_scores)}, stdv: {np.std(el_reg_scores)}"
)
# #### 3.2.4 Gradient Boost Regression
# We followed the approach laid out by [Aarshay Jain](https://www.analyticsvidhya.com/blog/2016/02/complete-guide-parameter-tuning-gradient-boosting-gbm-python/) on [analyticsvidhya.com](http://analyticsvidhya.com) to tune our GB model.
if GD:
print("Running Grid Search for model tunning")
params = {
"min_samples_split": [80],
"min_samples_leaf": [25],
"max_depth": [9],
"max_features": [4],
"subsample": [0.8],
"n_estimators": [2500],
"learning_rate": [0.005],
"subsample": [0.87],
}
GB = GradientBoostingRegressor()
gs = GridSearchCV(GB, param_grid=params, cv=5, n_jobs=-1, verbose=1)
gsf = gs.fit(X_train, y_train).best_params_
else:
gsf = {
"learning_rate": 0.005,
"max_depth": 9,
"max_features": 4,
"min_samples_leaf": 25,
"min_samples_split": 80,
"n_estimators": 2500,
"subsample": 0.87,
}
GB_reg = GradientBoostingRegressor(**gsf)
GB_reg_scores = rmseModel(GB_reg)
print(f"GB Reg, mean: {np.mean(GB_reg_scores)}, stdv: {np.std(GB_reg_scores)}")
# #### 3.2.5 Extrem Gradient Boost Regression
# We followed the approach laid out by [Aarshay Jain](https://www.analyticsvidhya.com/blog/2016/03/complete-guide-parameter-tuning-xgboost-with-codes-python/) on [analyticsvidhya.com](http://analyticsvidhya.com) to tune our XGB model.
if GD:
print("Running Grid Search for model tunning")
params = {
"max_depth ": [1],
"min_child_weight": [2],
"gamma ": [0.0],
"subsample": [0.7],
"reg_alpha": [1e-5, 1e-4, 1e-6],
"colsample_bytree": [0.87],
"scale_pos_weight": [1],
}
xgb_reg = XGBRegressor()
gs = GridSearchCV(xgb_reg, params, cv=5, n_jobs=-1, verbose=1)
gsf = gs.fit(X_train, y_train).best_params_
else:
gsf = {
"colsample_bytree": 0.87,
"gamma ": 0.0,
"max_depth ": 1,
"min_child_weight": 2,
"reg_alpha": 1e-06,
"scale_pos_weight": 1,
"subsample": 0.7,
}
xgb_reg = XGBRegressor(
**gsf,
objective="reg:squarederror",
nthread=4,
learning_rate=0.005,
n_estimators=10000,
)
xgb_reg_scores = rmseModel(xgb_reg)
print(f"XGB Reg, mean: {np.mean(xgb_reg_scores)}, stdv: {np.std(xgb_reg_scores)}")
# It is encouraging to see that all of tuned models perform better than our baseline - with our XGB model performing the best, though not as stable based on the standard deviation. We'll now build and test our stacked ensemble model and test how it performs.
# ### 3.3 Building and Testing our Ensemble Model
# #### 3.3.1 How is our model going to work?
# Our approach to building our stack ensemble will be to use the output from 4 of our model (OLS, Ridge, Elastic Net, and GB) as our input for an ensemble regression model. We'll then combine the prediction from this model with the one of the XGB (allocating appropriate weights) to get our final prediction.
olsM = ols_reg.fit(X_train, y_train)
elM = el_reg.fit(X_train, y_train)
RidgeM = ridge_reg.fit(X_train, y_train)
GBregM = GB_reg.fit(X_train, y_train)
XGBoostM = xgb_reg.fit(X_train, y_train)
ensembleOutput = np.hstack(
(
olsM.predict(X_test),
RidgeM.predict(X_test),
elM.predict(X_test).reshape(-1, 1),
GBregM.predict(X_test).reshape(-1, 1),
)
)
stackedReg = LinearRegression()
sackedM = stackedReg.fit(ensembleOutput, y_test)
valEnsembleOutput = np.hstack(
(
olsM.predict(X_val),
RidgeM.predict(X_val),
elM.predict(X_val).reshape(-1, 1),
GBregM.predict(X_val).reshape(-1, 1),
)
)
stackedPred = sackedM.predict(valEnsembleOutput)
pred = (
np.expm1(stackedPred).reshape(1, -1)[0] * 0.55
+ np.expm1(XGBoostM.predict(X_val)) * 0.45
)
rmse_test = np.sqrt(mean_squared_error(np.log(pred), y_val.values))
print(f"rmse for test data: {rmse_test}")
# # 4. Submitting our Predictions
# ## 4.1 Transforming our data
df_test["IsRemod"] = np.where(df_test.YearBuilt == df_test.YearRemodAdd, 0, 1)
df_test["YrSinceBuilt"] = df_test.YrSold - df_test.YearBuilt
df_test["YrSinceRemod"] = df_test.YrSold - df_test.YearRemodAdd
df_test["YrSinceGarageYrBlt"] = df_test.YrSold - df_test.GarageYrBlt
df_test["HasMasVnr"] = np.where(df_test.MasVnrType == "None", 0, 1)
df_test["CentralAir_enc"] = df_test[["CentralAir"]].apply(
lambda x: le.fit_transform(x.values)
)
dfPred = df_test[features]
nulls = {}
for col in dfPred.columns:
nulls[col] = 1 - (len(dfPred[dfPred[col].isna()][col]) / dfPred.shape[0])
for k, v in nulls.items():
if v < 1.0:
dfPred[k] = si.fit_transform(dfPred[k].values.reshape(-1, 1))
dfPred[list(set(skw_cols).intersection(set(dfPred.columns)))] = dfPred[
list(set(skw_cols).intersection(set(dfPred.columns)))
].apply(lambda x: stats.boxcox(1 + x, lmbda=lmbda))
dfPred[categoricals_columns] = dfPred[categoricals_columns].apply(
lambda x: le.fit_transform(x)
)
# ## 4.2 Making our Prediction
outputPred = np.hstack(
(
olsM.predict(dfPred),
RidgeM.predict(dfPred),
elM.predict(dfPred).reshape(-1, 1),
GBregM.predict(dfPred).reshape(-1, 1),
)
)
stackedPred = sackedM.predict(outputPred)
finalPred = (
np.expm1(stackedPred).reshape(1, -1)[0] * 0.55
+ np.expm1(XGBoostM.predict(dfPred)) * 0.45
)
# ## 4.3 Submitting our File
dff = pd.DataFrame({"Id": df_test.Id, "SalePrice": finalPred})
dff.to_csv(f"submission_{datetime.today().strftime('%Y%m%d')}.csv", index=False)
|
# # 🌅 ☀️ 🌃 Time of Day recognition using CNN:
# # 🧱 Contents:
# [ **1 ) Introduction**](#content1)
# [ **2 ) Data**](#content2)
# [ **3 ) Xception architecture**](#content3)
# [ **4 ) Import Necessary libraries**](#content4)
# [ **5 ) Model development**](#content5)
# [ **6 ) Predictions**](#content6)
# [ **7 ) Evaluation**](#content7)
# [ **8 ) Final thoughts**](#content8)
# # 📖 Introduction
# In this notebook, I'll be exploring the classification of different time of days using my own architecture before using the Xception deep learning architecture. The classes I'll be working with are: Daytime, Nighttime, and sunrise. By the end of this notebook, I hope to have a model that can accurately identify the time of day of a scene.
# # 💾 Data
# The dataset consists of images of 3 different times of day that were obtained by webscraping the Unsplash website. Unsplash is a platform that allows users to download high-quality images for free, which makes it a great resource for computer vision tasks. The dataset has been made available on Kaggle for anyone who wants to test their computer vision skills.
# # 💡 Xception Architecture
# Xception is a convolutional neural network (CNN) architecture that was introduced by François Chollet in 2016. It is based on the Inception architecture but replaces the standard convolutional layers with depthwise separable convolutions.
# Depthwise separable convolutions are composed of two stages: a depthwise convolution and a pointwise convolution. In the depthwise convolution stage, each filter in the convolutional layer is applied to a single input channel, producing a set of output channels. In the pointwise convolution stage, a 1x1 convolution is applied to combine the output channels from the depthwise convolution stage into a smaller number of output channels.
# This approach reduces the number of parameters in the network and improves its computational efficiency while maintaining or improving accuracy. Xception has been shown to achieve state-of-the-art performance on several image classification benchmarks.
# You can find information about the Xception network on the original paper published by the authors, as well as on the TensorFlow and Keras documentation.
# > Here are some links where you can find more information:
# * Original paper: https://arxiv.org/pdf/1610.02357.pdf
# * TensorFlow documentation: https://www.tensorflow.org/api_docs/python/tf/keras/applications/Xception
# * Keras documentation: https://keras.io/api/applications/xception/
# # 🏗️ Import Necessary Libraries
import os
import shutil
import random
from tensorflow.keras.applications import Xception
from tensorflow.keras.applications.xception import preprocess_input
from tensorflow.keras.layers import (
Conv2D,
MaxPool2D,
BatchNormalization,
Dense,
Flatten,
Dropout,
)
from tensorflow.keras.models import Model, Sequential
import numpy as np
import matplotlib.pyplot as plt
from tensorflow.keras.preprocessing import image
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#
# # 📈 Model Development
# To be able to accurately train and evaluate our model we need to split the data to train, validation and test sets. One way to do it is to load all the data and do a regular train_test_split, but that could sometimes be inefficient and unfeasable.
# For that I wanted to create new folders in our working environment containing train, validation and test sets.
# * The initial_path variable is set to the directory containing the original dataset.
# * The directories variable contains the names of the top-level directories that will be created to hold the training, validation, and test sets.
# * The subdirectories variable contains the names of the subdirectories within each top-level directory. These subdirectories correspond to the different classes or labels of the dataset, which in this case are 'daytime', 'nighttime', and 'sunrise'.
# * The props variable contains the proportions of the original dataset that will be allocated to the training, validation, and test sets, respectively. In this case, **90%** of the data is allocated to the training set, **5%** to the validation set, and **5%** to the test set.
# We will then loops over each top-level directory and subdirectory, and create the corresponding directories for each set. *For each subdirectory, the files in the original dataset are shuffled randomly.*
# The total number of files is calculated, and the indices of the files that belong to each set are computed based on the proportions defined in props.
# * If the current directory is the training directory, the files from the beginning of the shuffled list up to the training index are assigned to the training set.
# * If the current directory is the validation directory, the files from the training index up to the validation index are assigned to the validation set.
# * If the current directory is the test directory, the remaining files are assigned to the test set.
# For each file in each set, the script creates a new file path in the corresponding subdirectory of the appropriate top-level directory, and copies the file from the original dataset to the new file path using **shutil.copy()**.
# After the script has finished running, there will be three new directories containing the training, validation, and test sets of the original dataset, respectively, with the files split according to the proportions specified in props.
# Define the initial path
initial_path = "/kaggle/input/timeofdaydataset"
# Define the new top-level directory names
directories = ["train", "val", "test"]
# Define the subdirectory names for each top-level directory
subdirectories = ["daytime", "nighttime", "sunrise"]
# Define the proportions for train, validation, and test sets
props = [0.9, 0.05, 0.05]
# Loop over the top-level directories and subdirectories, and create the corresponding directories
for directory in directories:
for subdirectory in subdirectories:
path = os.path.join(initial_path, subdirectory)
files = os.listdir(path)
random.shuffle(files)
total = len(files)
train_end = int(total * props[0])
val_end = train_end + int(total * props[1])
if directory == "train":
new_files = files[:train_end]
elif directory == "val":
new_files = files[train_end:val_end]
else:
new_files = files[val_end:]
new_path = os.path.join(directory, subdirectory)
os.makedirs(new_path, exist_ok=True)
for file in new_files:
old_file_path = os.path.join(path, file)
new_file_path = os.path.join(new_path, file)
shutil.copy(old_file_path, new_file_path)
# To prepare the data for use, we used Keras' ImageDataGenerator class to perform data augmentation and create batches of image data that are suitable for feeding into the model. Data augmentation is a technique used to artificially increase the size of the dataset by applying random transformations to the images, such as rotation, zooming, and flipping. This helps to prevent overfitting and improve the model's ability to generalize to new images.
# Define the directories for the train, validation, and test sets
train_dir = "train"
val_dir = "val"
# Define the image dimensions and batch size
img_height = 224
img_width = 224
batch_size = 32
# Define the data generators for the train, validation, and test sets
train_datagen = ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=20,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
)
train_generator = train_datagen.flow_from_directory(
train_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
color_mode="rgb",
)
val_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
val_generator = val_datagen.flow_from_directory(
val_dir,
target_size=(img_height, img_width),
batch_size=batch_size,
class_mode="categorical",
color_mode="rgb",
)
# ### Defining the architecture:
# * Convolutional layers : Convolutional layers are the most important component of a CNN. These layers apply a set of filters to the input image to extract features. In this code, we use three consecutive convolutional layers with the same padding, a kernel size of 3x3, and ReLU activation function. The first sets of convolutional layers have 64 filters, and the number of filters is doubled in the next sets of layers, to 128 and 256, respectively. We use a small kernel size of 3x3, as this has been found to be effective for image classification tasks.
# * Batch normalization: Batch normalization is used to improve the stability and speed of the training process. It normalizes the input of a layer by subtracting the batch mean and dividing by the batch standard deviation. This helps to alleviate the internal covariate shift problem and reduces overfitting.
# * Max pooling: Max pooling is used to downsample the feature maps generated by the convolutional layers. This reduces the spatial size of the feature maps, which reduces the number of parameters in the model and makes it less prone to overfitting. In this code, we use max pooling with a pool size of 2x2.
# * Flatten: The output of the convolutional layers is flattened into a 1D vector. This is done to pass the output to the fully connected layers.
# * Fully connected layers: The flattened output is passed through two fully connected layers, with 256 and 128 neurons, respectively. These layers use ReLU activation and are followed by batch normalization and dropout.
# * Output layer: The output layer uses a softmax activation function, which is commonly used for multiclass classification tasks. In this code, we have 3 classes, so the output layer has 3 neurons.
# ### Compiling the model:
# * Optimizer: The 'adam' optimizer is used. Adam is a popular optimizer that uses adaptive learning rates and momentum to speed up the training process and converge faster.
# * Loss function: The 'categorical_crossentropy' loss function is used. This is the standard loss function for multiclass classification tasks. It measures the difference between the predicted class probabilities and the true class probabilities.
# ### Fitting the model:
# * Train generator: We are using the train generator that we defined earlier.
# * Steps per epoch: The number of steps per epoch is set to train_generator.samples // batch_size. This is the number of batches of data that the model will train on in one epoch.
# * Epochs: The number of epochs is set to 20. An epoch is a complete pass through the entire training dataset.
# * Validation generator: We are using the validation generator that we defined earlier.
# * Validation steps: The number of validation steps is set to val_generator.samples // batch_size. This is the number of batches of data that the model will validate on in one epoch.
# Defining the architecture
model = Sequential()
model.add(
Conv2D(64, (3, 3), padding="same", activation="relu", input_shape=(224, 224, 3))
)
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(64, (3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(128, (3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(2, 2))
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(BatchNormalization())
model.add(Conv2D(256, (3, 3), activation="relu", padding="same"))
model.add(MaxPool2D(2, 2))
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(128, activation="relu"))
model.add(Dense(3, activation="softmax"))
# Compiling the model
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
# Fiting the model to the training set
model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=20,
validation_data=val_generator,
validation_steps=val_generator.samples // batch_size,
)
# Now that we have tried our own architecture and were able to see the accuracy on both train and validation sets, we are going to use transfer learning and see if we can get an increase in performance.
# * Loading a Pre-trained Model: The first step is to load a pre-trained model. Here, the Xception model is used, which is a pre-trained convolutional neural network architecture that has been trained on the ImageNet dataset. This model is loaded without its top classification layer.
# * Freezing Pre-trained Layers: After loading the pre-trained model, the next step is to freeze its layers so that they are not updated during training. This is done to preserve the learned features in the pre-trained model.
# * Adding Custom Layers: After freezing the pre-trained layers, new classification layers are added on top of the pre-trained model. Here, a flatten layer is added to convert the output of the pre-trained model into a 1D array, followed by a dense layer with 256 neurons and ReLU activation function. The final layer is a dense layer with 3 neurons and softmax activation function for multi-class classification.
# * Creating the Full Model: The full model is created by specifying the inputs as the input layer of the pre-trained model and the outputs as the output layer of the custom layers.
# * Compiling the Model: The last step is to compile the model by specifying the optimizer, loss function, and evaluation metric. Here, the Adam optimizer is used, categorical cross-entropy is used as the loss function for multi-class classification, and accuracy is used as the evaluation metric.
#
# Load the pre-trained Xception model without the top classification layer
base_model = Xception(
weights="imagenet", include_top=False, input_shape=(img_height, img_width, 3)
)
# Freeze the pre-trained layers so they are not updated during training
for layer in base_model.layers:
layer.trainable = False
# Add your own classification layers on top of the pre-trained model
x = base_model.output
x = Flatten()(x)
x = Dense(256, activation="relu")(x)
predictions = Dense(3, activation="softmax")(x)
# Create the full model with both the pre-trained and new classification layers
model = Model(inputs=base_model.input, outputs=predictions)
# Compile the model
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
num_epochs = 20
history = model.fit(
train_generator,
steps_per_epoch=train_generator.samples // batch_size,
epochs=num_epochs,
validation_data=val_generator,
validation_steps=val_generator.samples // batch_size,
)
# The loss and accuracy curves illustrate the performance of the model during training and validation. The loss curve shows a significant decrease in the loss value over the 10 epochs of training, indicating that the model is effectively learning from the training data. The accuracy curve shows a consistent increase in the accuracy value over the epochs, reaching a value of 98.45% on the training data and 98.44% on the validation data, which is indicative of a well-performing model.
import matplotlib.pyplot as plt
# Plot training and validation loss
plt.plot(history.history["loss"], label="Training loss")
plt.plot(history.history["val_loss"], label="Validation loss")
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.legend()
plt.show()
# Plot training and validation accuracy
plt.plot(history.history["accuracy"], label="Training accuracy")
plt.plot(history.history["val_accuracy"], label="Validation accuracy")
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
#
# # 🤹 Predictions
# Here we are going to make predictions on the test images for that:
# 1) We create a dictionnary that contains the label indices and class names. This dictionary is created using the class_indices attribute of the train_generator.
# 2) Next, a test_datagen object is created, which will be used to load the test images.
# 3) The model.predict method is used to generate predictions for the test images. The predicted probabilities are stored in the predictions variable.
# 4) The argmax function from numpy is then used to find the index of the highest predicted probability for each test image. The resulting indices are converted to class labels using the class_names dictionary.
# 5) The true labels of the test images are obtained from the classes attribute of the test_generator. The resulting indices are also converted to class labels using the class_names dictionary.
# 6) This allows for the comparison of predicted and true labels, which can be used to evaluate the performance of the model on the test set.
# Define a dictionary to convert label indices to class names
class_names = train_generator.class_indices
class_names = {v: k for k, v in class_names.items()}
# Use the trained model to predict labels for the test images
test_dir = "/kaggle/working/test"
test_datagen = ImageDataGenerator(preprocessing_function=preprocess_input)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(img_height, img_width),
batch_size=1, # Set batch size to 1 to get individual images
class_mode="categorical",
color_mode="rgb",
shuffle=False,
)
predictions = model.predict(test_generator)
# Convert the predicted probabilities to class labels
predicted_labels = np.argmax(predictions, axis=1)
predicted_labels = [class_names[label] for label in predicted_labels]
# Get the true labels of the test images
true_labels = test_generator.classes
true_labels = [class_names[label] for label in true_labels]
# After training the model, I evaluated its performance on the test set. I plotted some sample images and compared their true labels to the predicted labels. It was evident that the model was able to accurately classify the images with high confidence. This confirmed that the model was able to generalize well on the test set
# Print the predicted and true labels for a subset of test images
n_rows = 3
n_cols = 4
fig, axs = plt.subplots(nrows=n_rows, ncols=n_cols, figsize=(15, 15))
np.random.seed(42)
random_indexes = np.random.choice(
len(predicted_labels), size=n_rows * n_cols, replace=False
)
for i, index in enumerate(random_indexes):
img_path = test_generator.filepaths[index]
img = image.load_img(img_path, target_size=(img_height, img_width))
axs[i // n_cols, i % n_cols].imshow(img)
axs[i // n_cols, i % n_cols].axis("off")
axs[i // n_cols, i % n_cols].set_title(
"Predicted: {}\nTrue: {}".format(predicted_labels[index], true_labels[index]),
color="green" if predicted_labels[index] == true_labels[index] else "red",
)
plt.show()
#
# # 📊 Evaluation
# The confusion matrix obtained from the test set predictions indicates that all the labels were almost entirely predicted correctly, with sunrise being always correctly predicted. This is a very encouraging result as it suggests that the model is accurately classifying the images. The high accuracy obtained during training and validation, along with the good performance on the test set, indicate that the model is effectively learning the patterns present in the data and generalizing well to unseen data.
from sklearn.metrics import confusion_matrix
import seaborn as sns
# Create the confusion matrix
cm = confusion_matrix(true_labels, predicted_labels)
# Plot the confusion matrix using seaborn
plt.figure(figsize=(10, 8))
sns.heatmap(
cm,
annot=True,
cmap="Blues",
xticklabels=class_names.values(),
yticklabels=class_names.values(),
)
plt.xlabel("Predicted labels")
plt.ylabel("True labels")
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.float_format", lambda x: "%.2f" % x)
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.preprocessing import (
MinMaxScaler,
LabelEncoder,
StandardScaler,
RobustScaler,
)
df = pd.read_csv("/kaggle/input/insurance/insurance.csv")
# # DATA OVERVIEW
def general_pict(dataframe, head=5):
print("---------- HEAD ------------")
print(f"{dataframe.head(head)}\n\n")
print("---------- COLUMNS ------------")
print(f"{dataframe.columns}\n\n")
print("---------- INFO ------------")
print(f"{dataframe.info()}\n\n")
print("---------- IS THERE ANY NULL? ------------")
print(f"{dataframe.isnull().sum()}\n\n")
print("---------- NUMBER OF UNIQUE ------------")
print(f"{dataframe.nunique()}\n\n")
print("---------- DESCRIBE ------------")
print(f"{dataframe.describe().T}\n\n")
general_pict(df)
df.groupby("smoker").agg({"charges": "mean"})
ax = sns.lmplot(x="age", y="charges", data=df, hue="smoker", palette="Set1")
ax = sns.lmplot(x="bmi", y="charges", data=df, hue="smoker", palette="Set2")
ax = sns.lmplot(x="children", y="charges", data=df, hue="smoker", palette="Set3")
# ***As can be seen, smoking has a serious effect on the charge.***
# # PREPARING THE DATA
# - Determining the types of features
# - Outlier control
# - Correlation analysis
# - Encoding
# - Standartization
# > ***Determining the types of features***
def grab_col_names(dataframe, cat_th=10, car_th=20):
# cat_cols, cat_but_car
cat_cols = [col for col in dataframe.columns if dataframe[col].dtypes == "O"]
num_but_cat = [
col
for col in dataframe.columns
if dataframe[col].nunique() < cat_th and dataframe[col].dtypes != "O"
]
cat_but_car = [
col
for col in dataframe.columns
if dataframe[col].nunique() > car_th and dataframe[col].dtypes == "O"
]
cat_cols = cat_cols + num_but_cat
cat_cols = [col for col in cat_cols if col not in cat_but_car]
# num_cols
num_cols = [col for col in dataframe.columns if dataframe[col].dtypes != "O"]
num_cols = [col for col in num_cols if col not in num_but_cat]
print(f"Observations: {dataframe.shape[0]}")
print(f"Variables: {dataframe.shape[1]}")
print(f"cat_cols: {len(cat_cols)}")
print(f"num_cols: {len(num_cols)}")
print(f"cat_but_car: {len(cat_but_car)}")
print(f"num_but_cat: {len(num_but_cat)}")
return cat_cols, num_cols, cat_but_car
cat_cols, num_cols, cat_but_car = grab_col_names(df, cat_th=5)
print(f"categorical cols: {cat_cols}, numeical cols: {num_cols}")
# > ***Outlier control***
def outlier_thresholds(dataframe, col_name, q1=0.25, q3=0.75):
quartile1 = dataframe[col_name].quantile(q1)
quartile3 = dataframe[col_name].quantile(q3)
interquartile = quartile3 - quartile1
up_limit = quartile3 + 1.5 * interquartile
low_limit = quartile1 - 1.5 * interquartile
return low_limit, up_limit
def check_outlier(dataframe, col_name):
low_limit, up_limit = outlier_thresholds(dataframe, col_name)
if dataframe[
(dataframe[col_name] > up_limit) | (dataframe[col_name] < low_limit)
].any(axis=None):
return True
else:
return False
for col in num_cols:
print(col, check_outlier(df, col))
def replace_with_thresholds(dataframe, variable):
low_limit, up_limit = outlier_thresholds(dataframe, variable)
dataframe.loc[(dataframe[variable] < low_limit), variable] = low_limit
dataframe.loc[(dataframe[variable] > up_limit), variable] = up_limit
for col in num_cols:
replace_with_thresholds(df, col)
for col in num_cols:
print(col, check_outlier(df, col))
# > ***Correlation analysis***
corr_matrix = df.corr() # korelasyon analizi
print(corr_matrix)
sns.heatmap(corr_matrix, annot=True)
plt.show()
# ***The correlation between the variables is not exaggerated.***
# > ***Encoding***
def label_encoder(dataframe, binary_col):
labalencoder = LabelEncoder()
dataframe[binary_col] = labalencoder.fit_transform(dataframe[binary_col])
return dataframe
binary_cols = [
col
for col in df.columns
if df[col].dtype not in [int, float] and df[col].nunique() == 2
]
binary_cols
for col in binary_cols:
label_encoder(df, col)
df.head()
def one_hot_encoder(dataframe, categorical_cols, drop_first=False):
dataframe = pd.get_dummies(
dataframe, columns=categorical_cols, drop_first=drop_first
)
return dataframe
ohe_cols = [col for col in df.columns if 5 >= df[col].nunique() > 2]
ohe_cols
df = one_hot_encoder(df, ohe_cols, drop_first=True)
df.head()
df.info()
# > ***Standardization***
cat_cols, num_cols, car_cols = grab_col_names(df)
num_cols
num_cols = [col for col in num_cols if col != "charges"]
num_cols
scaler = StandardScaler()
df[num_cols] = scaler.fit_transform(df[num_cols])
df[num_cols].head()
# # MODEL
X = df.drop("charges", axis=1)
y = df["charges"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=1
)
X_train.shape
reg_model = LinearRegression().fit(X_train, y_train)
reg_model.intercept_
reg_model.coef_
random_user = X.sample(1, random_state=45)
random_user
reg_model.predict(random_user)
df.loc[random_user.index]
# # SUCCESS OF THE MODEL
y_pred = reg_model.predict(X_test)
np.sqrt(mean_squared_error(y_test, y_pred))
reg_model.score(X_test, y_test)
np.mean(
np.sqrt(-cross_val_score(reg_model, X, y, cv=10, scoring="neg_mean_squared_error"))
)
|
# # Postgres Function
# ## Problem Statement
# ## Need to call a function written in postgress DB
# create or replace function get_film_by_name (param_title varchar)
# returns table (
# film_title varchar
# )
# language plpgsql
# as $$
# begin
# return query
# SELECT title FROM film WHERE title = param_title;
# end;$$
# ## get_film_by_name function expects param_title parameter
# ## Lets see how we can connect to this DB and call function and get result
import psycopg2
hostname = "localhost"
database = "dvdrental"
username = "postgres"
pwd = "***" # Here is the password of your db
port_id = 5432
conn = None
cur = None
try:
## Creating connection
conn = psycopg2.connect(
host=hostname, dbname=database, user=username, password=pwd, port=port_id
)
cur = conn.cursor()
## This is calling db function and passing title
cur.callproc("get_film_by_name", ["Chamber Italian"])
result = cur.fetchone()
print(result)
## This is executing simple select query and fetchall() function is getting all the results
cur.execute("SELECT * FROM film LIMIT 10")
for rec in cur.fetchall():
print(rec)
## This is callig get_films function which is simple function returning all the films
cur.callproc("get_films")
for rec in cur.fetchall():
print(rec)
except Exception as error:
print(error)
finally:
## Closing cursor if its open
if cur is not None:
cur.close()
## Closing connection if its open
if conn is not None:
conn.close()
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv("/kaggle/input/students-performance-in-exams/StudentsPerformance.csv")
df.head()
df["lunch_binary"] = (df.lunch == "standard").astype(int)
df.head()
sns.histplot(df["math score"], kde=True)
plt.show()
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/train.csv")
df.head()
# Here we see there are many missing values. We need to replace them with either their mean or mode
df.isnull().sum()
sns.heatmap(df.isnull(), yticklabels=False)
df.shape
# Now let us fill up the missing values:
# For that we will be using the mode value of the dataset given:
x = df["LotFrontage"].mean()
x
df["LotFrontage"] = df["LotFrontage"].fillna(x)
# axis = 1, means dropping the feature column-wise
df.drop(["Alley"], axis=1, inplace=True)
df["BsmtCond"] = df["BsmtCond"].fillna(df["BsmtCond"].mode())
df["BsmtQual"] = df["BsmtQual"].fillna(df["BsmtQual"].mode())
df["FireplaceQu"] = df["FireplaceQu"].fillna(df["FireplaceQu"].mode())
df["GarageType"] = df["GarageType"].fillna(df["GarageType"].mode())
df.drop(["GarageYrBlt"], axis=1, inplace=True)
df.head()
df1 = pd.read_csv("/kaggle/input/house-prices-advanced-regression-techniques/test.csv")
df1.head()
df1.isnull().sum()
df1["LotFrontage"] = df1["LotFrontage"].fillna(df1["LotFrontage"].mean())
df1["MSZoning"].value_counts()
df1["MSZoning"] = df["MSZoning"].fillna(df["MSZoning"].mode())
df["GarageType"].value_counts()
df1["BsmtCond"] = df["BsmtCond"].fillna(df["BsmtCond"].mode())
df1["BsmtQual"] = df["BsmtQual"].fillna(df["BsmtQual"].mode())
df1["FireplaceQu"] = df["FireplaceQu"].fillna(df["FireplaceQu"].mode())
df1["GarageType"] = df["GarageType"].fillna(df["GarageType"].mode())
df1.drop(["GarageYrBlt"], axis=1, inplace=True)
df["GarageFinish"] = df["GarageFinish"].fillna(df["GarageFinish"].mode())
df["GarageQual"] = df["GarageQual"].fillna(df["GarageQual"].mode())
df["GarageCond"] = df["GarageCond"].fillna(df["GarageCond"].mode())
df.drop(["PoolQC", "Fence", "MiscFeature"], axis=1, inplace=True)
df.shape
df.drop(["Id"], axis=1, inplace=True)
df.isnull().sum()
df["MasVnrType"] = df["MasVnrType"].fillna(df["MasVnrType"].mode())
df["MasVnrArea"] = df["MasVnrArea"].fillna(df["MasVnrArea"].mode())
df["BsmtFinType2"] = df["BsmtFinType2"].fillna(df["BsmtFinType2"].mode())
# Drops any null values if present
df.dropna(inplace=False)
df.shape
x = df.columns
prev_df = df.copy()
|
# # Project 4 step 1
# add imports
import pandas as pd
import numpy as np
import lightgbm as lgb
from sklearn.preprocessing import OrdinalEncoder
import polars as pl
import os
data_dir = "/kaggle/input/project-2-data/project_2_data"
write_dir = "/kaggle/working/"
# data_dir = os.getcwd()
# data_dir = os.path.join(data_dir, 'project_2_data')
data_dir
# ### Polar vs Pandas
# Pandas: filter out products that don't have sales using cumsum
sample_1_rmd = (
pd.read_parquet(f"{data_dir}/sales_data.parquet")
.reset_index()
.assign(
cumsum=(lambda x: x.groupby(["id"])["sales"].transform(lambda x: x.cumsum()))
)
.loc[lambda x: x["cumsum"] > 0, :]
# .drop('cumsum', axis=1)
.set_index(["date", "id"])
)
sample_1_rmd.loc[
lambda x: x.index.get_level_values("id") == "FOODS_1_004_TX_2_evaluation", :
]
# Polars: Without lazy api
smplv_1 = pl.read_parquet(f"{data_dir}/sales_data.parquet")
smplv_1 = smplv_1.sort(["id", "date"])
smplv_1 = smplv_1.with_columns(pl.col("sales").cumsum().over(["id"]).alias("cumsum"))
smplv_1 = smplv_1.filter((pl.col("cumsum") > 0))
smplv_1.filter((pl.col("id") == "FOODS_1_004_TX_2_evaluation"))
# Polars: With lazy api
data_vp = (
pl.read_parquet(f"{data_dir}/sales_data.parquet")
.lazy()
.sort(["id", "date"])
.with_columns(pl.col("sales").cumsum().over(["id"]).alias("cumsum"))
.filter((pl.col("cumsum") > 0))
.collect()
)
data_vp.filter((pl.col("id") == "FOODS_1_004_TX_2_evaluation"))
# ### Remarks
# Polars turns out to be 2 times faster than Pandas for my query. However polars syntax is but less user friendly than pandas.
# ## Model drift experiment
data = pd.read_parquet(f"{data_dir}/sales_data.parquet")
# data
# Create a sample of take whole data
smpl_1 = data.copy()
# set(data.reset_index()
# .loc[:, ['date']]
# .assign(year=lambda df: df['date'].dt.year)['year']
# )
# smpl_1 = (data.reset_index()
# .loc[lambda df: ((df.date.dt.year >= 2015) & (df.date.dt.year <= 2016)), :]
# .reset_index(drop=True)
# .copy())
# smpl_1
# Check the dataset is at ID level
assert smpl_1.reset_index().loc[:, ["date", "id"]].duplicated().sum() == 0
# filter out products that don't have sales using cumsum
sample_1_rmd = (
smpl_1.reset_index()
.assign(
cumsum=(lambda x: x.groupby(["id"])["sales"].transform(lambda x: x.cumsum()))
)
.loc[lambda x: x["cumsum"] > 0, :]
.drop("cumsum", axis=1)
.set_index(["date", "id"])
)
sample_1_rmd
# ### Feature engineering
def calc_lag(df, shift_length, forecast_horizon, by_day_of_week=False):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
feature_name = f"lag_{shift_length}_{forecast_horizon}"
return (
df.assign(day_of_week=df.index.get_level_values("date").dayofweek)
.groupby(group_cols)
.sales.shift(forecast_horizon + shift_length)
.rename(feature_name)
), feature_name
def calc_rolling_agg(
df, window_length, forecast_horizon, agg_func="mean", by_day_of_week=False
):
group_cols = ["id"]
if by_day_of_week:
group_cols += ["day_of_week"]
if not by_day_of_week:
feature_name = f"rolling_{agg_func}_{window_length}_{forecast_horizon}"
else:
feature_name = f"seasonal_rolling_{agg_func}_{window_length}_{forecast_horizon}"
return (
df.assign(day_of_week=df.index.dayofweek)
.groupby(group_cols, group_keys=False)
.sales.rolling(
window_length, closed="right", min_periods=1
) # only requires 1 observation to be non-NaN
.agg({"sales": agg_func})
.reset_index()
.assign(date=lambda x: x.date + pd.Timedelta(days=28))
.set_index("date")
.rename(columns={"sales": feature_name})
), feature_name
calendar = pd.read_parquet(f"{data_dir}/calendar.parquet")
prices = pd.read_parquet(f"{data_dir}/prices.parquet")
lag_features = [1, 7, 28]
rolling_features = {
"mean": [7, 28],
"std": [7, 28],
}
seasonal_rolling_features = {
"mean": [4, 8],
"std": [4, 8],
}
def feature_engineering(df, horizon):
cont_feats = []
for lag in lag_features:
fe_table, feature_name = calc_lag(df, lag, horizon)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
df = df.reset_index("id")
for agg_func, windows in rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg(df, window, horizon, agg_func)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
for agg_func, windows in seasonal_rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg(
df, window, horizon, agg_func, by_day_of_week=True
)
df = df.merge(
fe_table.drop(columns="day_of_week"), on=["id", "date"], how="left"
)
cont_feats.append(feature_name)
df = (
df.merge(calendar[["snap_TX"]], on="date", how="left")
.merge(prices, on=["date", "store_id", "item_id"], how="left")
.assign(
day_of_week=lambda x: x.index.dayofweek,
day_of_month=lambda x: x.index.day,
month=lambda x: x.index.month,
year=lambda x: x.index.year,
)
)
cont_feats += ["sell_price", "day_of_week", "day_of_month", "month", "year"]
cat_feats = ["id", "item_id", "dept_id", "cat_id", "store_id", "snap_TX"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
df[enc_cat_feats] = OrdinalEncoder().fit_transform(df[cat_feats])
max_date = df.index.get_level_values("date").max()
train = df.loc[: max_date - pd.Timedelta(days=28), :]
val = df.loc[
max_date
- pd.Timedelta(days=28 - (horizon - 7) - 1) : max_date
- pd.Timedelta(days=28 - horizon),
:,
]
price_feats = train.groupby("id").agg(
max_price=("sell_price", "max"),
median_price=("sell_price", "median"),
)
train = train.merge(price_feats, on="id", how="left")
val = val.merge(price_feats, on="id", how="left")
cont_feats += ["max_price", "median_price"]
return (train, val, cont_feats, enc_cat_feats)
# train, val, cont_feats, cat_feats = feature_engineering(sample_1_rmd, 14)
for horizon in [7, 14, 21, 28]: # 7, 14, 21, 28
train, val, cont_feats, cat_feats = feature_engineering(sample_1_rmd, horizon)
train.to_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val.to_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
pd.DataFrame({"cont_columns": cont_feats}).to_parquet(
f"{write_dir}/exp_{horizon}_cont.parquet"
)
pd.DataFrame({"cat_columns": cat_feats}).to_parquet(
f"{write_dir}/exp_{horizon}_cat.parquet"
)
for horizon in [7, 14, 21, 28]:
train = pd.read_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val = pd.read_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
cont_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cont.parquet").values.flatten()
)
cat_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cat.parquet").values.flatten()
)
# list(np.array(cont_feats).flatten())
# train[cont_feats].head()
# train[cat_feats].head()
# [c for c in train.columns if c not in np.union1d(cont_feats , cat_feats)]
def weighted_rmse(preds, data):
squared_error = (preds - data.get_label()) ** 2
value = np.mean(np.sqrt(squared_error / data.get_weight()))
return "rmsse", value, False
def rmsse(train, val, y_pred):
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
score = (
val.assign(squared_error=(val.sales - y_pred) ** 2)
.groupby("id")
.squared_error.mean()
.to_frame()
.merge(train_scale, on="id")
.assign(rmsse=lambda x: np.sqrt(x.squared_error / x.scale))
.rmsse.mean()
)
return score
# #### Training a model once and then evaluating it on successive folds without retraining it.
params = dict(
objective="tweedie",
metric="None",
tweedie_variance_power=1.1,
learning_rate=0.05,
min_samples_leaf=100,
subsample=0.3,
feature_fraction=0.3,
deterministic=True,
)
val_dfs = []
preds = []
hz_rmsse_1 = []
model = None
for horizon in [7, 14, 21, 28]: # 7, 14, 21, 28
# train, val, cont_feats, cat_feats = feature_engineering(data, horizon)
train = pd.read_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val = pd.read_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
cont_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cont.parquet").values.flatten()
)
cat_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cat.parquet").values.flatten()
)
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
val = val.merge(train_scale, on="id")
train_dset = lgb.Dataset(
train[cont_feats + cat_feats],
train["sales"],
)
val_dset = lgb.Dataset(
val[cont_feats + cat_feats],
val["sales"],
weight=val["scale"],
)
callbacks = [lgb.early_stopping(100), lgb.log_evaluation(50)]
if model is None:
model = lgb.train(
params,
train_dset,
num_boost_round=100,
valid_sets=[val_dset],
callbacks=callbacks,
feval=weighted_rmse,
)
pred = model.predict(val[cont_feats + cat_feats])
val_dfs.append(val)
preds.append(pred)
print(f"On horizon {horizon}")
h_rmsse = rmsse(train, val, pred)
print(f"Horizon RMSSE: {h_rmsse}")
hz_rmsse_1.append((horizon, h_rmsse))
val = pd.concat(val_dfs)
preds = np.concatenate(preds)
overall_rmsse_1 = rmsse(train, val, preds)
print(f"RMSSE: {overall_rmsse_1}")
print(f"RMSSE: {overall_rmsse_1}")
hz_rmsse_1
# #### A model retrained on successive folds
params = dict(
objective="tweedie",
metric="None",
tweedie_variance_power=1.1,
learning_rate=0.05,
min_samples_leaf=100,
subsample=0.3,
feature_fraction=0.3,
deterministic=True,
)
val_dfs = []
preds = []
hz_rmsse_2 = []
for horizon in [7, 14, 21, 28]: # 7, 14, 21, 28
# train, val, cont_feats, cat_feats = feature_engineering(data, horizon)
train = pd.read_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val = pd.read_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
cont_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cont.parquet").values.flatten()
)
cat_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cat.parquet").values.flatten()
)
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
val = val.merge(train_scale, on="id")
train_dset = lgb.Dataset(
train[cont_feats + cat_feats],
train["sales"],
)
val_dset = lgb.Dataset(
val[cont_feats + cat_feats],
val["sales"],
weight=val["scale"],
)
callbacks = [lgb.early_stopping(100), lgb.log_evaluation(50)]
model = lgb.train(
params,
train_dset,
num_boost_round=100,
valid_sets=[val_dset],
callbacks=callbacks,
feval=weighted_rmse,
)
pred = model.predict(val[cont_feats + cat_feats])
val_dfs.append(val)
preds.append(pred)
print(f"On horizon {horizon}")
h_rmsse = rmsse(train, val, pred)
print(f"Horizon RMSSE: {h_rmsse}")
hz_rmsse_2.append((horizon, h_rmsse))
val = pd.concat(val_dfs)
preds = np.concatenate(preds)
overall_rmsse_2 = rmsse(train, val, preds)
print(f"RMSSE: {overall_rmsse_2}")
print(f"RMSSE: {overall_rmsse_2}")
hz_rmsse_2
# ### Model drift
df_1 = pd.DataFrame(hz_rmsse_1, columns=["horizon", "rmsse_once_trained_model"])
df_2 = pd.DataFrame(hz_rmsse_2, columns=["horizon_1", "rmsse_retrained_model"])
pd.concat([df_1, df_2], axis=1).drop("horizon_1", axis=1).plot(x="horizon", kind="line")
# ### Examine price feature
def feature_engineering_2(df, horizon):
cont_feats = []
for lag in lag_features:
fe_table, feature_name = calc_lag(df, lag, horizon)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
df = df.reset_index("id")
for agg_func, windows in rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg(df, window, horizon, agg_func)
df = df.merge(fe_table, on=["id", "date"], how="left")
cont_feats.append(feature_name)
for agg_func, windows in seasonal_rolling_features.items():
for window in windows:
fe_table, feature_name = calc_rolling_agg(
df, window, horizon, agg_func, by_day_of_week=True
)
df = df.merge(
fe_table.drop(columns="day_of_week"), on=["id", "date"], how="left"
)
cont_feats.append(feature_name)
df = (
df.merge(calendar[["snap_TX"]], on="date", how="left")
.merge(prices, on=["date", "store_id", "item_id"], how="left")
.assign(
day_of_week=lambda x: x.index.dayofweek,
day_of_month=lambda x: x.index.day,
month=lambda x: x.index.month,
year=lambda x: x.index.year,
)
)
cont_feats += ["sell_price", "day_of_week", "day_of_month", "month", "year"]
cat_feats = ["id", "item_id", "dept_id", "cat_id", "store_id", "snap_TX"]
enc_cat_feats = [f"{feat}_enc" for feat in cat_feats]
df[enc_cat_feats] = OrdinalEncoder().fit_transform(df[cat_feats])
max_date = df.index.get_level_values("date").max()
train = df.loc[: max_date - pd.Timedelta(days=28), :]
val = df.loc[
max_date
- pd.Timedelta(days=28 - (horizon - 7) - 1) : max_date
- pd.Timedelta(days=28 - horizon),
:,
]
price_feats = train.groupby("id").agg(
max_price=("sell_price", "max"),
median_price=("sell_price", "median"),
)
train = train.merge(price_feats, on="id", how="left")
val_ = val.merge(price_feats, on="id", how="left")
val_w_date = val.reset_index("date").merge(price_feats, on="id", how="left")
cont_feats += ["max_price", "median_price"]
return (train, val_, val_w_date, cont_feats, enc_cat_feats)
horizon = 14
train, val, val_w_date, cont_feats, cat_feats = feature_engineering_2(
sample_1_rmd, horizon
)
train.to_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val.to_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
val_w_date.to_parquet(f"{write_dir}/exp_{horizon}_val_w_date.parquet")
pd.DataFrame({"cont_columns": cont_feats}).to_parquet(
f"{write_dir}/exp_{horizon}_cont.parquet"
)
pd.DataFrame({"cat_columns": cat_feats}).to_parquet(
f"{write_dir}/exp_{horizon}_cat.parquet"
)
# #### Train with price
params = dict(
objective="tweedie",
metric="None",
tweedie_variance_power=1.1,
learning_rate=0.05,
min_samples_leaf=100,
subsample=0.3,
feature_fraction=0.3,
deterministic=True,
)
# Read Ready data
train = pd.read_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val = pd.read_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
val_w_date = pd.read_parquet(f"{write_dir}/exp_{horizon}_val_w_date.parquet")
cont_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cont.parquet").values.flatten()
)
cat_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cat.parquet").values.flatten()
)
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
val = val.merge(train_scale, on="id")
train_dset = lgb.Dataset(
train[cont_feats + cat_feats],
train["sales"],
)
val_dset = lgb.Dataset(
val[cont_feats + cat_feats],
val["sales"],
weight=val["scale"],
)
callbacks = [lgb.early_stopping(100), lgb.log_evaluation(50)]
model = lgb.train(
params,
train_dset,
num_boost_round=100,
valid_sets=[val_dset],
callbacks=callbacks,
feval=weighted_rmse,
)
print(f"On horizon {horizon}")
h_rmsse = rmsse(train, val, pred)
print(f"Horizon RMSSE: {h_rmsse}")
import matplotlib.pyplot as plt
id_to_plot = "FOODS_1_004_TX_2_evaluation"
max_date = sample_1_rmd.index.get_level_values("date").max()
plot_val = val_w_date.assign(pred=pred)
plot_data = sample_1_rmd.reset_index("id")[
max_date - pd.Timedelta(days=30) :
].reset_index()
plt.figure(figsize=(8, 6))
plt.plot(
plot_data.loc[plot_data.id == id_to_plot, "date"],
plot_data.loc[plot_data.id == id_to_plot, "sales"],
label="actuals",
)
plt.plot(
plot_val.loc[plot_val.id == id_to_plot, "date"],
plot_val.loc[plot_val.id == id_to_plot, "pred"],
label="forecast",
)
plt.legend()
plt.xticks(rotation=45)
# #### Train without price
params = dict(
objective="tweedie",
metric="None",
tweedie_variance_power=1.1,
learning_rate=0.05,
min_samples_leaf=100,
subsample=0.3,
feature_fraction=0.3,
deterministic=True,
)
# Read Ready data
train = pd.read_parquet(f"{write_dir}/exp_{horizon}_train.parquet")
val = pd.read_parquet(f"{write_dir}/exp_{horizon}_val.parquet")
val_w_date = pd.read_parquet(f"{write_dir}/exp_{horizon}_val_w_date.parquet")
cont_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cont.parquet").values.flatten()
)
cat_feats = list(
pd.read_parquet(f"{write_dir}/exp_{horizon}_cat.parquet").values.flatten()
)
[cont_feats.remove(p) for p in ["sell_price", "max_price", "median_price"]]
train_scale = (
train.assign(
scale=train.groupby("id").sales.diff() ** 2,
)
.groupby("id")
.scale.mean()
)
val = val.merge(train_scale, on="id")
train_dset = lgb.Dataset(
train[cont_feats + cat_feats],
train["sales"],
)
val_dset = lgb.Dataset(
val[cont_feats + cat_feats],
val["sales"],
weight=val["scale"],
)
callbacks = [lgb.early_stopping(100), lgb.log_evaluation(50)]
model = lgb.train(
params,
train_dset,
num_boost_round=100,
valid_sets=[val_dset],
callbacks=callbacks,
feval=weighted_rmse,
)
print(f"On horizon {horizon}")
h_rmsse = rmsse(train, val, pred)
print(f"Horizon RMSSE: {h_rmsse}")
import matplotlib.pyplot as plt
id_to_plot = "FOODS_1_004_TX_2_evaluation"
max_date = sample_1_rmd.index.get_level_values("date").max()
plot_val = val_w_date.assign(pred=pred)
plot_data = sample_1_rmd.reset_index("id")[
max_date - pd.Timedelta(days=30) :
].reset_index()
plt.figure(figsize=(8, 6))
plt.plot(
plot_data.loc[plot_data.id == id_to_plot, "date"],
plot_data.loc[plot_data.id == id_to_plot, "sales"],
label="actuals",
)
plt.plot(
plot_val.loc[plot_val.id == id_to_plot, "date"],
plot_val.loc[plot_val.id == id_to_plot, "pred"],
label="forecast",
)
plt.legend()
plt.xticks(rotation=45)
|
import pandas as pd
import matplotlib.pyplot as plt
import time
import seaborn as sns
# as opções thousands e decimal, permite alterar o separador de milha e decimal
arquivo_csv = (
"/kaggle/input/tarefa-final-curso-ebac-python/Python_M10_support material.csv"
)
df_base = pd.read_csv(arquivo_csv, thousands=".", decimal=",")
df_base.describe()
# validando os tipo de dados na colunas
df_base.dtypes
# # Validação dos campos
# validando se ha dados incorretos como sexo = "ABC" por exemplo
sexo = df_base.groupby(["sexo"])["sexo"].count()
print(sexo)
estado_civil = df_base.groupby(["estado_civil"])["estado_civil"].count()
print(estado_civil)
salario_anual = df_base.groupby(["salario_anual"])["salario_anual"].count()
print(salario_anual)
escolaridade = df_base.groupby(["escolaridade"])["escolaridade"].count()
print(escolaridade)
# ### Conclusão:
# Dada a representatividade das observações com valor 'na' , para não haver impacto na análise , os mesmos serão considerados , mas o valor 'na' será substituido por 'Não informado'
df_base["escolaridade"] = df_base["escolaridade"].replace("na", value="Não informado")
df_base["salario_anual"] = df_base["salario_anual"].replace("na", value="Não informado")
df_base["estado_civil"] = df_base["estado_civil"].replace("na", value="Não informado")
# # ANÁLISE
# definir o tamanho padrão dos gráficos
LARGURA = 15
ALTURA = 10
distribuicao = df_base.groupby(["sexo", "salario_anual"])["id"].count().reset_index()
print(distribuicao)
plt.figure(figsize=(LARGURA, ALTURA))
ax = sns.barplot(x="salario_anual", y="id", hue="sexo", data=distribuicao)
ax.set(ylabel="Total", xlabel="Faixa Salarial")
ax.set_title("Distruibuição dos clientes por sexo", fontsize=20)
# # Análise por Estado Civil
distribuicao_clientes = (
df_base.groupby(["sexo", "estado_civil"])["id"].count().reset_index()
)
print(distribuicao_clientes)
plt.figure(figsize=(LARGURA, ALTURA))
ax = sns.barplot(x="estado_civil", y="id", hue="sexo", data=distribuicao_clientes)
ax.set(ylabel="Total", xlabel="Estado Civil")
ax.set_title("Distruibuição dos Clientes por Sexo e Estado Civil", fontsize=20)
estado_civil = (
df_base.groupby(["sexo", "estado_civil"])["valor_transacoes_12m"]
.sum()
.reset_index()
)
plt.figure(figsize=(LARGURA, ALTURA))
ax = sns.barplot(
x="estado_civil", y="valor_transacoes_12m", hue="sexo", data=estado_civil
)
ax.set(ylabel="Compras", xlabel="Faixa Salarial")
ax.set_title("Total de Compras x Estado Civil", fontsize=20)
df_dados = pd.DataFrame(df_base, columns=["escolaridade", "limite_credito"])
escola_limite_soma = df_base.groupby("escolaridade").sum().reset_index()
plt.figure(figsize=(LARGURA, ALTURA))
ax = sns.barplot(x="escolaridade", y="limite_credito", data=escola_limite_soma)
for index, row in escola_limite_soma.iterrows():
ax.text(
row.name,
row.limite_credito,
round(row.limite_credito / 10000000, 2),
color="black",
ha="center",
va="bottom",
fontsize=14,
)
ax.set_title("Limite de crédito Total x Escolaridade (R$ milhões)", fontsize=20)
# ### Média do limite de crédito por idade e escolaridade
limite_sexo = (
df_base.groupby(["escolaridade", "sexo", "salario_anual", "idade", "estado_civil"])[
"limite_credito"
]
.mean()
.reset_index()
)
plt.figure(figsize=(LARGURA, ALTURA))
limite = sns.relplot(
data=limite_sexo,
x="idade",
y="limite_credito",
col="salario_anual",
hue="escolaridade",
col_wrap=3,
)
# ### Média de compras por idade e escolaridade
compras = (
df_base.groupby(["escolaridade", "sexo", "salario_anual", "idade", "estado_civil"])[
"valor_transacoes_12m"
]
.sum()
.reset_index()
)
compra_escolaridade = sns.relplot(
data=compras,
x="idade",
y="valor_transacoes_12m",
col="salario_anual",
hue="escolaridade",
col_wrap=2,
)
compra_escolaridade = sns.relplot(
data=compras,
x="idade",
y="valor_transacoes_12m",
col="estado_civil",
hue="sexo",
col_wrap=2,
)
# ### Total do compra de compras por salário e sexo
compra_x_sexo = sns.relplot(
data=compras,
x="idade",
y="valor_transacoes_12m",
col="salario_anual",
hue="sexo",
col_wrap=2,
)
# ### Média do limite de por salário e sexo
limite_x_sexo = sns.relplot(
data=limite_sexo,
x="idade",
y="limite_credito",
col="salario_anual",
hue="sexo",
col_wrap=2,
)
limite_faixa = (
df_base.groupby(["salario_anual"])["valor_transacoes_12m"].mean().reset_index()
)
plt.figure(figsize=(LARGURA, ALTURA))
ax = sns.barplot(x="salario_anual", y="valor_transacoes_12m", data=limite_faixa)
for index, row in limite_faixa.iterrows():
ax.text(
row.name,
row.valor_transacoes_12m,
round(row.valor_transacoes_12m, 2),
color="black",
ha="center",
va="bottom",
fontsize=14,
)
ax.set_title("Valor médio da Compra x Salário", fontsize=20)
# # Conclusão
# Visto que o público feminino compra mais que o masculino, há oportunidade de aumento de receita haja uma campanha de prospeção do público feminino
#
|
# Reference: https://www.kaggle.com/code/jessemostipak/getting-started-tpus-cassava-leaf-disease/notebook
# Setup Environment
import math, re, os
import tensorflow as tf
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow import keras
from functools import partial
from sklearn.model_selection import train_test_split
print("Tensorflow version " + tf.__version__)
# Decect TPU
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Device:", tpu.master())
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.TPUStrategy(tpu)
except:
strategy = tf.distribute.get_strategy()
print("Number of replicas:", strategy.num_replicas_in_sync)
AUTOTUNE = tf.data.experimental.AUTOTUNE
# GCS_PATH = KaggleDatasets().get_gcs_path()
BATCH_SIZE = 16 * strategy.num_replicas_in_sync
IMAGE_SIZE = [512, 512]
CLASSES = ["0", "1", "2", "3", "4"]
EPOCHS = 25
from kaggle_datasets import KaggleDatasets
GCS_PATH = KaggleDatasets().get_gcs_path()
TRAINING_FILENAMES, VALID_FILENAMES = train_test_split(
tf.io.gfile.glob(GCS_PATH + "/train_tfrecords/ld_train*.tfrec"),
test_size=0.35,
random_state=5,
)
TEST_FILENAMES = tf.io.gfile.glob(GCS_PATH + "/test_tfrecords/ld_test*.tfrec")
def decode_image(image):
image = tf.image.decode_jpeg(image, channels=3)
image = tf.cast(image, tf.float32) / 255.0
image = tf.reshape(image, [*IMAGE_SIZE, 3])
return image
def read_tfrecord(example, labeled):
tfrecord_format = (
{
"image": tf.io.FixedLenFeature([], tf.string),
"target": tf.io.FixedLenFeature([], tf.int64),
}
if labeled
else {
"image": tf.io.FixedLenFeature([], tf.string),
"image_name": tf.io.FixedLenFeature([], tf.string),
}
)
example = tf.io.parse_single_example(example, tfrecord_format)
image = decode_image(example["image"])
if labeled:
label = tf.cast(example["target"], tf.int32)
return image, label
idnum = example["image_name"]
return image, idnum
# 原版,对数据集进行了随机洗牌
""""""
def load_dataset(filenames, labeled=True, ordered=False):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False # disable order, increase speed
dataset = tf.data.TFRecordDataset(
filenames, num_parallel_reads=AUTOTUNE
) # automatically interleaves reads from multiple files
dataset = dataset.with_options(
ignore_order
) # uses data as soon as it streams in, rather than in its original order
dataset = dataset.map(
partial(read_tfrecord, labeled=labeled), num_parallel_calls=AUTOTUNE
)
return dataset
"""
# 没有随机洗牌的版本,只是为了做数据可视化,训练模型时还要换回原版
def load_dataset(filenames, labeled=True, ordered=True):
ignore_order = tf.data.Options()
if not ordered:
ignore_order.experimental_deterministic = False
dataset = tf.data.TFRecordDataset(filenames, num_parallel_reads=AUTOTUNE)
dataset = dataset.with_options(ignore_order)
dataset = dataset.map(partial(read_tfrecord, labeled=labeled), num_parallel_calls=AUTOTUNE)
return dataset
"""
def data_augment(image, label):
image = tf.image.random_flip_left_right(image)
image = tf.image.random_brightness(image, 0.1)
image = tf.image.random_contrast(image, 0.9, 1.3)
image = tf.image.random_hue(image, 0.1)
image = tf.image.random_saturation(image, 0.8, 1.2)
height = 224
width = 224
original_height = tf.shape(image)[0]
original_width = tf.shape(image)[1]
# image = tf.image.pad_to_bounding_box(image, 0, 0, original_height + 4, original_width + 4)
# image = tf.image.random_crop(image, size=[height, width, 3])
return image, label
def get_training_dataset():
dataset = load_dataset(TRAINING_FILENAMES, labeled=True)
dataset = dataset.map(data_augment, num_parallel_calls=AUTOTUNE)
dataset = dataset.repeat()
dataset = dataset.shuffle(2048)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def get_validation_dataset(ordered=False):
dataset = load_dataset(VALID_FILENAMES, labeled=True, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.cache()
dataset = dataset.prefetch(AUTOTUNE)
return dataset
def get_test_dataset(ordered=False):
dataset = load_dataset(TEST_FILENAMES, labeled=False, ordered=ordered)
dataset = dataset.batch(BATCH_SIZE)
dataset = dataset.prefetch(AUTOTUNE)
return dataset
# numpy and matplotlib defaults
'''
np.set_printoptions(threshold=15, linewidth=80)
def batch_to_numpy_images_and_labels(data):
images, labels = data
numpy_images = images.numpy()
numpy_labels = labels.numpy()
if numpy_labels.dtype == object: # binary string in this case, these are image ID strings
numpy_labels = [None for _ in enumerate(numpy_images)]
# If no labels, only image IDs, return None for labels (this is the case for test data)
return numpy_images, numpy_labels
def title_from_label_and_target(label, correct_label):
if correct_label is None:
return CLASSES[label], True
correct = (label == correct_label)
return "{} [{}{}{}]".format(CLASSES[label], 'OK' if correct else 'NO', u"\u2192" if not correct else '',
CLASSES[correct_label] if not correct else ''), correct
def display_one_plant(image, title, subplot, red=False, titlesize=16):
plt.subplot(*subplot)
plt.axis('off')
plt.imshow(image)
if len(title) > 0:
plt.title(title, fontsize=int(titlesize) if not red else int(titlesize/1.2), color='red' if red else 'black', fontdict={'verticalalignment':'center'}, pad=int(titlesize/1.5))
return (subplot[0], subplot[1], subplot[2]+1)
def display_batch_of_images(databatch, predictions=None):
"""This will work with:
display_batch_of_images(images)
display_batch_of_images(images, predictions)
display_batch_of_images((images, labels))
display_batch_of_images((images, labels), predictions)
"""
# data
images, labels = batch_to_numpy_images_and_labels(databatch)
if labels is None:
labels = [None for _ in enumerate(images)]
# auto-squaring: this will drop data that does not fit into square or square-ish rectangle
rows = int(math.sqrt(len(images)))
cols = len(images)//rows
# size and spacing
FIGSIZE = 13.0
SPACING = 0.1
subplot=(rows,cols,1)
if rows < cols:
plt.figure(figsize=(FIGSIZE,FIGSIZE/cols*rows))
else:
plt.figure(figsize=(FIGSIZE/rows*cols,FIGSIZE))
# display
for i, (image, label) in enumerate(zip(images[:rows*cols], labels[:rows*cols])):
title = '' if label is None else CLASSES[label]
correct = True
if predictions is not None:
title, correct = title_from_label_and_target(predictions[i], label)
dynamic_titlesize = FIGSIZE*SPACING/max(rows,cols)*40+3 # magic formula tested to work from 1x1 to 10x10 images
subplot = display_one_plant(image, title, subplot, not correct, titlesize=dynamic_titlesize)
#layout
plt.tight_layout()
if label is None and predictions is None:
plt.subplots_adjust(wspace=0, hspace=0)
else:
plt.subplots_adjust(wspace=SPACING, hspace=SPACING)
plt.show()
'''
"""
def display_one_original_and_one_preprocessed_image_per_class(original_dataset, preprocessed_dataset):
original_images, original_labels = batch_to_numpy_images_and_labels(original_dataset)
preprocessed_images, preprocessed_labels = batch_to_numpy_images_and_labels(preprocessed_dataset)
plt.figure(figsize=(20, 8))
for label in range(len(CLASSES)):
# Find the first occurrence of each label
index = np.where(original_labels == label)[0][0]
# Display original image
plt.subplot(2, len(CLASSES), label + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(original_images[index])
plt.xlabel("Original\nClass: {}".format(CLASSES[label]))
# Display preprocessed image
plt.subplot(2, len(CLASSES), label + 1 + len(CLASSES))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(preprocessed_images[index])
plt.xlabel("Preprocessed\nClass: {}".format(CLASSES[label]))
plt.show()
def display_two_original_and_two_preprocessed_image_per_class(original_dataset, preprocessed_dataset):
original_images, original_labels = batch_to_numpy_images_and_labels(original_dataset)
preprocessed_images, preprocessed_labels = batch_to_numpy_images_and_labels(preprocessed_dataset)
plt.figure(figsize=(20, 16))
for label in range(len(CLASSES)):
# Find the first occurrence of each label
indices = np.where(original_labels == label)[0][:2]
for i, index in enumerate(indices):
# Display original image
plt.subplot(4, len(CLASSES), 2 * label + i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(original_images[index])
plt.xlabel("Original\nClass: {}".format(CLASSES[label]))
# Display preprocessed image
plt.subplot(4, len(CLASSES), 2 * label + i + 1 + 2 * len(CLASSES))
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(preprocessed_images[index])
plt.xlabel("Preprocessed\nClass: {}".format(CLASSES[label]))
plt.show()
"""
"""
# 获得原始数据集
original_dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=True)
original_dataset = original_dataset.batch(80)
original_batch = next(iter(original_dataset))
# 获得预处理后的数据集
preprocessed_dataset = load_dataset(TRAINING_FILENAMES, labeled=True, ordered=True)
preprocessed_dataset = preprocessed_dataset.map(data_augment, num_parallel_calls=AUTOTUNE)
preprocessed_dataset = preprocessed_dataset.batch(80)
preprocessed_batch = next(iter(preprocessed_dataset))
# 显示原始图像和预处理后的图像
#display_one_image_per_class(original_batch, preprocessed_batch)
"""
# display_one_original_and_one_preprocessed_image_per_class(original_batch, preprocessed_batch)
# display_two_original_and_two_preprocessed_image_per_class(original_batch, preprocessed_batch)
training_dataset = get_training_dataset()
training_dataset = training_dataset.unbatch().batch(20)
train_batch = iter(training_dataset)
# load our validation dataset for EDA
validation_dataset = get_validation_dataset()
validation_dataset = validation_dataset.unbatch().batch(20)
valid_batch = iter(validation_dataset)
# load our test dataset for EDA
testing_dataset = get_test_dataset()
testing_dataset = testing_dataset.unbatch().batch(20)
test_batch = iter(testing_dataset)
lr_scheduler = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-5, decay_steps=10000, decay_rate=0.9
)
with strategy.scope():
img_adjust_layer = tf.keras.layers.Lambda(
tf.keras.applications.resnet50.preprocess_input, input_shape=[*IMAGE_SIZE, 3]
)
base_model = tf.keras.applications.ResNet50(weights="imagenet", include_top=False)
base_model.trainable = False
model = tf.keras.Sequential(
[
tf.keras.layers.BatchNormalization(renorm=True),
img_adjust_layer,
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(8, activation="relu"),
tf.keras.layers.Dense(len(CLASSES), activation="softmax"),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr_scheduler, epsilon=0.001),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
""""""
from tensorflow.keras.applications import EfficientNetB7
lr_scheduler = keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate=1e-5, decay_steps=10000, decay_rate=0.9
)
with strategy.scope():
img_adjust_layer = tf.keras.layers.Lambda(
tf.keras.applications.efficientnet.preprocess_input,
input_shape=[*IMAGE_SIZE, 3],
)
base_model = EfficientNetB7(weights="imagenet", include_top=False)
base_model.trainable = False
model = tf.keras.Sequential(
[
tf.keras.layers.BatchNormalization(renorm=True),
img_adjust_layer,
base_model,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(8, activation="relu"),
tf.keras.layers.Dense(len(CLASSES), activation="softmax"),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=lr_scheduler, epsilon=0.001),
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
def count_data_items(filenames):
n = [
int(re.compile(r"-([0-9]*)\.").search(filename).group(1))
for filename in filenames
]
return np.sum(n)
NUM_TRAINING_IMAGES = count_data_items(TRAINING_FILENAMES)
NUM_VALIDATION_IMAGES = count_data_items(VALID_FILENAMES)
NUM_TEST_IMAGES = count_data_items(TEST_FILENAMES)
print(
"Dataset: {} training images, {} validation images, {} (unlabeled) test images".format(
NUM_TRAINING_IMAGES, NUM_VALIDATION_IMAGES, NUM_TEST_IMAGES
)
)
# load data
train_dataset = get_training_dataset()
valid_dataset = get_validation_dataset()
STEPS_PER_EPOCH = NUM_TRAINING_IMAGES // BATCH_SIZE
VALID_STEPS = NUM_VALIDATION_IMAGES // BATCH_SIZE
history = model.fit(
train_dataset,
steps_per_epoch=STEPS_PER_EPOCH,
epochs=EPOCHS,
validation_data=valid_dataset,
validation_steps=VALID_STEPS,
)
model.summary()
# print out variables available to us
print(history.history.keys())
# create learning curves to evaluate model performance
history_frame = pd.DataFrame(history.history)
history_frame.loc[:, ["loss", "val_loss"]].plot()
history_frame.loc[
:, ["sparse_categorical_accuracy", "val_sparse_categorical_accuracy"]
].plot()
# this code will convert our test image data to a float32
def to_float32(image, label):
return tf.cast(image, tf.float32), label
test_ds = get_test_dataset(ordered=True)
test_ds = test_ds.map(to_float32)
print("Computing predictions...")
test_images_ds = testing_dataset
test_images_ds = test_ds.map(lambda image, idnum: image)
probabilities = model.predict(test_images_ds)
predictions = np.argmax(probabilities, axis=-1)
print(predictions)
|
import pandas as pd # package for high-performance, easy-to-use data structures and data analysis
import numpy as np # fundamental package for scientific computing with Python
from pandas import DataFrame, Series
import matplotlib
import matplotlib.pyplot as plt # for plotting
import seaborn as sns # for making plots with seaborn
color = sns.color_palette() # init color object
import plotly.offline as py # create embed interactive plots
py.init_notebook_mode(connected=True)
from plotly.offline import init_notebook_mode, iplot
init_notebook_mode(connected=True)
import plotly.graph_objs as go
import plotly.offline as offline
offline.init_notebook_mode()
import plotly.tools as tls
import squarify
import re
from mpl_toolkits.basemap import Basemap
from numpy import array
from matplotlib import cm
from wordcloud import WordCloud
# Supress unnecessary warnings so that presentation looks clean
import warnings
warnings.filterwarnings("ignore")
# Print all rows and columns
# pd.set_option('display.max_columns', None)
# pd.set_option('display.max_rows', None)
from sklearn import preprocessing
from nltk.corpus import stopwords
from textblob import TextBlob
import datetime as dt
import warnings
import string
import time
# stop_words = []
stop_words = list(set(stopwords.words("english")))
warnings.filterwarnings("ignore")
punctuation = string.punctuation
def generate_data_quality(data):
d = data.dtypes[data.dtypes != "object"].index.values
data[d] = data[d].astype("float64")
mean = DataFrame({"mean": data[d].mean()})
std_dev = DataFrame({"std_dev": data[d].std()})
missing = DataFrame({"missing": data[d].isnull().sum()})
obs = DataFrame({"obs": np.repeat(data[d].shape[0], len(d))}, index=d)
missing_perc = DataFrame(
{"missing_perc": data[d].isnull().sum() / data[d].shape[0]}
)
minimum = DataFrame({"min": data[d].min()})
maximum = DataFrame({"max": data[d].max()})
unique = DataFrame({"unique": data[d].apply(lambda x: len(x.unique()), axis=0)})
q5 = DataFrame({"q5": data[d].apply(lambda x: x.dropna().quantile(0.05))})
q10 = DataFrame({"q10": data[d].apply(lambda x: x.dropna().quantile(0.10))})
q25 = DataFrame({"q25": data[d].apply(lambda x: x.dropna().quantile(0.25))})
q50 = DataFrame({"q50": data[d].apply(lambda x: x.dropna().quantile(0.50))})
q75 = DataFrame({"q75": data[d].apply(lambda x: x.dropna().quantile(0.75))})
q85 = DataFrame({"q85": data[d].apply(lambda x: x.dropna().quantile(0.85))})
q95 = DataFrame({"q95": data[d].apply(lambda x: x.dropna().quantile(0.95))})
q99 = DataFrame({"q99": data[d].apply(lambda x: x.dropna().quantile(0.99))})
DQ = pd.concat(
[
mean,
std_dev,
obs,
missing,
missing_perc,
minimum,
maximum,
unique,
q5,
q10,
q25,
q50,
q75,
q85,
q95,
q99,
],
axis=1,
)
c = data.dtypes[data.dtypes == "object"].index.values
Mean = DataFrame({"mean": np.repeat("Not Applicable", len(c))}, index=c)
Std_Dev = DataFrame({"std_dev": np.repeat("Not Applicable", len(c))}, index=c)
Missing = DataFrame({"missing": data[c].isnull().sum()})
Obs = DataFrame({"obs": np.repeat(data[d].shape[0], len(c))}, index=c)
Missing_perc = DataFrame(
{"missing_perc": data[c].isnull().sum() / data[c].shape[0]}
)
Minimum = DataFrame({"min": np.repeat("Not Applicable", len(c))}, index=c)
Maximum = DataFrame({"max": np.repeat("Not Applicable", len(c))}, index=c)
Unique = DataFrame({"unique": data[c].apply(lambda x: len(x.unique()), axis=0)})
Q5 = DataFrame({"q5": np.repeat("Not Applicable", len(c))}, index=c)
Q10 = DataFrame({"q10": np.repeat("Not Applicable", len(c))}, index=c)
Q25 = DataFrame({"q25": np.repeat("Not Applicable", len(c))}, index=c)
Q50 = DataFrame({"q50": np.repeat("Not Applicable", len(c))}, index=c)
Q75 = DataFrame({"q75": np.repeat("Not Applicable", len(c))}, index=c)
Q85 = DataFrame({"q85": np.repeat("Not Applicable", len(c))}, index=c)
Q95 = DataFrame({"q95": np.repeat("Not Applicable", len(c))}, index=c)
Q99 = DataFrame({"q99": np.repeat("Not Applicable", len(c))}, index=c)
dq = pd.concat(
[
Mean,
Std_Dev,
Obs,
Missing,
Missing_perc,
Minimum,
Maximum,
Unique,
Q5,
Q10,
Q25,
Q50,
Q75,
Q85,
Q95,
Q99,
],
axis=1,
)
DQ = pd.concat([DQ, dq])
DQ.to_csv("data_audit.csv")
from nltk.corpus import stopwords
REPLACE_BY_SPACE_RE = re.compile("[/(){}\[\]\|@,;]")
BAD_SYMBOLS_RE = re.compile("[^0-9a-z #+_]")
STOPWORDS = set(stopwords.words("english"))
def text_prepare(text):
"""
text: a string
return: modified initial string
"""
text = text.lower() # lowercase text
text = REPLACE_BY_SPACE_RE.sub(
" ", text
) # replace REPLACE_BY_SPACE_RE symbols by space in text
text = BAD_SYMBOLS_RE.sub(
"", text
) # delete symbols which are in BAD_SYMBOLS_RE from text
temp = [
s.strip() for s in text.split() if s not in STOPWORDS
] # delete stopwords from text
new_text = ""
for i in temp:
new_text += i + " "
text = new_text
return text.strip()
# import numpy as np # linear algebra
# import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# import matplotlib
# import matplotlib.pyplot as plt
# import seaborn as sns
# import statsmodels.api as sm
# %matplotlib inline
# from sklearn.model_selection import train_test_split
# from sklearn.linear_model import LinearRegression
# from sklearn.feature_selection import RFE
# from sklearn.linear_model import RidgeCV, LassoCV, Ridge, Lasso
# # Any results you write to the current directory are saved as output.
# #/kaggle/input/combined_data.csv
# #/kaggle/input/projects/sample_data_audit.csv
# #/kaggle/input/projects/projects.csv/projects.csv
# #/kaggle/input/projects/essays.csv/essays.csv
# Read the files in data frames
df_es = pd.read_csv("/kaggle/input/outcome-value/essays.csv/essays.csv")
df_out = pd.read_csv("/kaggle/input/outcome-value/outcomes.csv/outcomes.csv")
df_proj = pd.read_csv("/kaggle/input/outcome-value/projects.csv/projects.csv")
# Join the data frames based on the project id, leaving out rows which dont have any outcome/classified yet
df = (
df_proj.merge(
df_es, left_index=True, right_index=True, how="inner", suffixes=("", "_y")
)
).merge(df_out, left_index=True, right_index=True, how="inner", suffixes=("", "_y"))
df.drop(list(df.filter(regex="_y$")), axis=1, inplace=True)
# """print(df_out.shape)
# print(df_proj.shape)
# print(df_es.shape)
# print(df_out.head())
# print(df_proj.head())
# print(df_es.head())"""
print(df_out.shape)
print(df_proj.shape)
print(df_es.shape)
print(df.shape)
# To free up some memory use garbage collector and set the initial data frames as null
import gc
df_es = pd.DataFrame()
df_out = pd.DataFrame()
df_proj = pd.DataFrame()
del [[df_es, df_out, df_proj]]
gc.collect()
# # A look into the data tell us that lots of column has value 't','f' and nan as unique values.
# We need to remap t as 1 and f as 0.
# lets create a Directory and see what are the columns which have only 3 unique values in them
unique_dict = {}
for col in df.columns:
if len(df[col].unique()) < 4:
unique_dict[col] = df[col].unique()
# To Simplify computation lets mark the nan as 0 for the below columns -
# at_least_1_teacher_referred_donor
# at_least_1_green_donation
# three_or_more_non_teacher_referred_donors
# one_non_teacher_referred_donor_giving_100_plus
# donation_from_thoughtful_donor
# cretaed the map
t_f_map = {"t": 1, "f": 0, np.nan: 0}
# for all columns apply the map. This will transform categorical to numeric variables. we dont require on hot Encoding here
for col in unique_dict.keys():
df[col] = df[col].map(t_f_map)
generate_data_quality(df)
# # Quick check
# df['at_least_1_green_donation']
# df.describe()
#
# get the missing data in number/Percentages
total = df.isnull().sum().sort_values(ascending=False)
percent = (df.isnull().sum() / df.isnull().count() * 100).sort_values(ascending=False)
missing_data = pd.concat([total, percent], axis=1, keys=["Total", "Percent"])
missing_data.head(50)
# with almost 30% blank value in secondary_focus_subject and secondary_focus_area there is no point in keeping this field in prediction model as there is no way to gather information for this.
df = df.drop(["secondary_focus_area", "secondary_focus_subject"], axis=1)
# df.to_csv('output.csv')
temp = df["is_exciting"].value_counts()
labels = temp.index
sizes = (temp / temp.sum()) * 100
trace = go.Pie(labels=labels, values=sizes, hoverinfo="label+percent")
layout = go.Layout(title="Project proposal is approved or not")
data = [trace]
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# # clearly there is a class imbalance in the data under analysis
temp = df["school_state"].value_counts()
# print("Total number of states : ",len(temp))
trace = go.Bar(
x=temp.index,
y=(temp / temp.sum()) * 100,
)
data = [trace]
layout = go.Layout(
title="Distribution of School states in % ",
xaxis=dict(title="State Name", tickfont=dict(size=14, color="rgb(107, 107, 107)")),
yaxis=dict(
title="Count of project proposals submitted in %",
titlefont=dict(size=16, color="rgb(107, 107, 107)"),
tickfont=dict(size=14, color="rgb(107, 107, 107)"),
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="schoolStateNames")
# # So its clear CA has the higest number of submission and next based is NY.
# This seems to be a important variable to consider. Needs to check if we can bin it some categories.
temp = df["grade_level"].value_counts()
print("Total number of project grade categories : ", len(temp))
trace = go.Bar(
x=temp.index,
y=(temp / temp.sum()) * 100,
)
data = [trace]
layout = go.Layout(
title="Distribution of project_grade_category (school grade levels) in %",
xaxis=dict(
title="school grade levels", tickfont=dict(size=14, color="rgb(107, 107, 107)")
),
yaxis=dict(
title="Count of project proposals submitted in % ",
titlefont=dict(size=16, color="rgb(107, 107, 107)"),
tickfont=dict(size=14, color="rgb(107, 107, 107)"),
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="schoolStateNames")
# Need to get dummies for this columns. Out of 4 school grade levels, Project proposals submission in school grade levels is higher for **Grades Prek-2** which is approximately **41 %** followed by **Grades 3-5** which has approx. **34 %**.
# Need to check impact of primary_focus_subject and primary_focus_area
df["primary_focus_subject"].unique()
df["primary_focus_area"].unique()
temp = df["primary_focus_area"].value_counts()
print("Total number of project based on focus area : ", len(temp))
trace = go.Bar(
x=temp.index,
y=(temp / temp.sum()) * 100,
)
data = [trace]
layout = go.Layout(
title="Distribution of primary_focus_area (school grade levels) in %",
xaxis=dict(
title="Primary focus area", tickfont=dict(size=14, color="rgb(107, 107, 107)")
),
yaxis=dict(
title="Count of project proposals submitted in % ",
titlefont=dict(size=16, color="rgb(107, 107, 107)"),
tickfont=dict(size=14, color="rgb(107, 107, 107)"),
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="focusArea")
temp = df["primary_focus_subject"].value_counts()
print("Total number of project based on focus subject : ", len(temp))
trace = go.Bar(
x=temp.index,
y=(temp / temp.sum()) * 100,
)
data = [trace]
layout = go.Layout(
title="Distribution of primary_focus_Subject (school grade levels) in %",
xaxis=dict(
title="Primary focus Subject",
tickfont=dict(size=14, color="rgb(107, 107, 107)"),
),
yaxis=dict(
title="Count of project proposals submitted in % ",
titlefont=dict(size=16, color="rgb(107, 107, 107)"),
tickfont=dict(size=14, color="rgb(107, 107, 107)"),
),
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig, filename="focusSubject")
total_cnt = df["primary_focus_area"].value_counts()
total_exiting = df["primary_focus_area"][df["is_exciting"] == 1].value_counts()
impact = pd.concat([total_cnt, total_exiting], axis=1, keys=["Total", "existing"])
impact["percentage_sucess"] = (impact["existing"] / impact["Total"]) * 100
impact.head(25)
total_cnt = df["primary_focus_subject"].value_counts()
total_exiting = df["primary_focus_subject"][df["is_exciting"] == 1].value_counts()
impact = pd.concat([total_cnt, total_exiting], axis=1, keys=["Total", "existing"])
impact["percentage_sucess"] = (impact["existing"] / impact["Total"]) * 100
impact.head(25)
# droping the primary_focus_subject parameter as this parameter is not providing any more insight than simply using primary_focus_area
# more over the success percentage is simmilar accross Category.
df = df.drop(["primary_focus_subject"], axis=1)
df["is_exciting"][df["teacher_prefix"].isnull()]
temp = df["school_state"].value_counts()
# print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["school_state"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["school_state"] == val] == 0))
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Popular School states in terms of project acceptance rate and project rejection rate",
barmode="stack",
width=1000,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# Again The importance of CA/NY are far more than other states from FL onwards.
df["school_state"].value_counts()
temp = df["teacher_prefix"].value_counts()
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["teacher_prefix"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["teacher_prefix"] == val] == 0))
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Popular Teacher prefixes in terms of project acceptance rate and project rejection rate",
barmode="stack",
width=1000,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# There is a clear imbalance of gender vs submisition
df.isnull().sum()
df["teacher_prefix"].value_counts()
print(df["school_metro"].value_counts())
print("Number of unique county - ", len(df["school_county"].unique()))
print("Number of unique school_metro - ", len(df["school_metro"].unique()))
print("Number of unique school_city - ", len(df["school_city"].unique()))
df = df.drop(
[
"school_metro",
"school_zip",
"school_city",
"school_longitude",
"school_ncesid",
"school_latitude",
],
axis=1,
)
# cretaed the map. Simply the map
gender_map = {"Mrs.": 1, "Ms.": 1, "Mr.": 0, "Dr.": 0, "Mr. & Mrs.": 0}
df["teacher_prefix"] = df["teacher_prefix"].map(gender_map)
temp = df["title"].value_counts().head(20)
# print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["title"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["title"] == val] == 0))
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Popular project titles in terms of project acceptance rate and project rejection rate",
barmode="stack",
width=1000,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
temp = pd.DataFrame(df["school_state"].value_counts()).reset_index()
temp.columns = ["state_code", "num_proposals"]
data = [
dict(
type="choropleth",
locations=temp["state_code"],
locationmode="USA-states",
z=temp["num_proposals"].astype(float),
text=temp["state_code"],
colorscale="Red",
marker=dict(line=dict(width=0.7)),
colorbar=dict(
autotick=False, tickprefix="", title="Number of project proposals"
),
)
]
layout = dict(
title="Project Proposals by US States",
geo=dict(
scope="usa",
projection=dict(type="albers usa"),
showlakes=True,
lakecolor="rgb(255, 255, 255)",
),
)
fig = dict(data=data, layout=layout)
py.iplot(fig, validate=False)
df["date_posted"] = pd.to_datetime(df["date_posted"])
df["month_created"] = df["date_posted"].dt.month
df["weekday_created"] = df["date_posted"].dt.weekday
df["date_created"] = df["date_posted"].dt.date
temp = df["month_created"].value_counts()
# print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["month_created"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["month_created"] == val] == 0))
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Project Proposal Submission Month Distribution", barmode="stack", width=1000
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
temp = df["weekday_created"].value_counts()
# print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["weekday_created"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["weekday_created"] == val] == 0))
temp.index = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Project Proposal Submission weekday Distribution",
barmode="stack",
width=1000,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
# Project Proposals Mean Acceptance Rate by US States
temp = pd.DataFrame(
df.groupby("school_state")["is_exciting"].apply(np.mean)
).reset_index()
temp.columns = ["state_code", "num_proposals"]
data = [
dict(
type="choropleth",
locations=temp["state_code"],
locationmode="USA-states",
z=temp["num_proposals"].astype(float),
text=temp["state_code"],
colorscale="Red",
marker=dict(line=dict(width=0.7)),
colorbar=dict(
autotick=False, tickprefix="", title="Number of project proposals"
),
)
]
layout = dict(
title="Project Proposals Mean Acceptance Rate by US States",
geo=dict(
scope="usa",
projection=dict(type="albers usa"),
showlakes=True,
lakecolor="rgb(255, 255, 255)",
),
)
fig = dict(data=data, layout=layout)
py.iplot(fig, validate=False)
# Teacher_prefix and is_exciting Intervals Correlation
cols = ["teacher_prefix", "is_exciting"]
cm = sns.light_palette("red", as_cmap=True)
pd.crosstab(df[cols[0]], df[cols[1]]).style.background_gradient(cmap=cm)
# #Teacher_number_of_previously_posted_projects and project_is_approved Intervals Correlation
# cols = ['teacher_number_of_previously_posted_projects', 'project_is_approved']
# cm = sns.light_palette("red", as_cmap=True)
# pd.crosstab(df[cols[0]], df[cols[1]]).style.background_gradient(cmap = cm)
# Correlation Matrix
corr = df.corr()
plt.figure(figsize=(12, 12))
sns.heatmap(
corr,
xticklabels=corr.columns.values,
yticklabels=corr.columns.values,
annot=True,
cmap="cubehelix",
square=True,
)
plt.title("Correlation between different features")
pd.DataFrame(corr).to_csv("corr.csv")
corr
df.describe(exclude=None)
# Created a normalized price columns from total_price_excluding_optional_support and total_price_including_optional_support. This is higly correlated feature
#
df["price"] = (
df["total_price_excluding_optional_support"]
+ df["total_price_including_optional_support"]
)
df["price"] = (df["price"] - df["price"].mean()) / (
df["price"].max() - df["price"].min()
)
df["price"].describe()
df["school_nlns"].value_counts()
df = df.drop(
[
"total_price_excluding_optional_support",
"total_price_including_optional_support",
],
axis=1,
)
temp = df["title"].value_counts().head(25)
# print(temp.values)
temp_y0 = []
temp_y1 = []
for val in temp.index:
temp_y1.append(np.sum(df["is_exciting"][df["title"] == val] == 1))
temp_y0.append(np.sum(df["is_exciting"][df["title"] == val] == 0))
trace1 = go.Bar(x=temp.index, y=temp_y1, name="Accepted Proposals")
trace2 = go.Bar(x=temp.index, y=temp_y0, name="Rejected Proposals")
data = [trace1, trace2]
layout = go.Layout(
title="Popular project titles in terms of project acceptance rate and project rejection rate",
barmode="stack",
width=1000,
)
fig = go.Figure(data=data, layout=layout)
py.iplot(fig)
df.info()
second_analysis_false = {}
second_analysis_true = {}
for col in df.columns:
if len(df[col].unique()) < 30:
second_analysis_false[col] = (
df[col][df["is_exciting"] == 0].value_counts().to_frame()
)
second_analysis_true[col] = (
df[col][df["is_exciting"] == 1].value_counts().to_frame()
)
with open("second_analysis_fasle.csv", "w") as f:
for key in second_analysis_false.keys():
f.write("%s,%s\n" % (key, second_analysis_false[key]))
with open("second_analysis_true.csv", "w") as f:
for key in second_analysis_true.keys():
f.write("%s,%s\n" % (key, second_analysis_true[key]))
# ](http://)
# Clearly shows the importance of the parameters fully_funded
# at_least_1_green_donation
# great_chat
# three_or_more_non_teacher_referred_donors
# one_non_teacher_referred_donor_giving_100_plus
df["is_exciting"][df["grade_level"].isnull()]
(df["is_exciting"][df["resource_type"].isnull()]).value_counts()
(df["is_exciting"][df["title"].isnull()]).value_counts()
(df["is_exciting"][df["essay"].isnull()]).value_counts()
(df["is_exciting"][df["short_description"].isnull()]).value_counts()
df["grade_level"].dropna(inplace=True)
temp_data = df.dropna(subset=["short_description"])
# converting into lowercase
temp_data["short_description"] = temp_data["short_description"].apply(
lambda x: " ".join(x.lower() for x in x.split())
)
temp_data["short_description"] = temp_data["short_description"].map(text_prepare)
wordcloud = WordCloud(max_font_size=50, width=600, height=300).generate(
" ".join(temp_data["short_description"].values)
)
plt.figure(figsize=(15, 8))
plt.imshow(wordcloud)
plt.title("Word Cloud of short_description", fontsize=35)
plt.axis("off")
plt.show()
temp_data = df.dropna(subset=["essay"])
# converting into lowercase
temp_data["essay"] = temp_data["essay"].apply(
lambda x: " ".join(x.lower() for x in x.split())
)
temp_data["essay"] = temp_data["essay"].map(text_prepare)
wordcloud = WordCloud(max_font_size=50, width=600, height=300).generate(
" ".join(temp_data["essay"].values)
)
plt.figure(figsize=(15, 8))
plt.imshow(wordcloud)
plt.title("Word Cloud of essay", fontsize=35)
plt.axis("off")
plt.show()
print("Building model using Light GBM and finding AUC(Area Under Curve)")
from sklearn.model_selection import train_test_split
X_train_l, X_valid_l, y_train_l, y_valid_l = train_test_split(
train_data, y, test_size=0.10, random_state=2018
)
params = {
"boosting_type": "gbdt",
"objective": "binary",
"metric": "auc",
"max_depth": 7,
"num_leaves": 32,
"learning_rate": 0.02,
"feature_fraction": 0.80,
"bagging_fraction": 0.80,
"bagging_freq": 5,
"verbose": 0,
"lambda_l2": 1,
}
import lightgbm as lgb
evals_result = {} # to record eval results for plotting
model_lgb = lgb.train(
params,
lgb.Dataset(X_train_l, y_train_l),
num_boost_round=10000,
valid_sets=[lgb.Dataset(X_valid_l, y_valid_l)],
early_stopping_rounds=100,
evals_result=evals_result,
verbose_eval=25,
)
from sklearn.metrics import roc_auc_score
valid_preds_lgb = model_lgb.predict(X_valid_l, num_iteration=model_lgb.best_iteration)
test_preds = model_lgb.predict(test_data, num_iteration=model_lgb.best_iteration)
auc = roc_auc_score(y_valid_l, valid_preds_lgb)
print("AUC:", auc)
print("Building model using XGBoost and finding AUC(Area Under Curve)")
import xgboost as xgb
xgb_params = {
"eta": 0.2,
"max_depth": 5,
"subsample": 0.8,
"colsample_bytree": 0.8,
"objective": "binary:logistic",
"eval_metric": "auc",
"seed": 1234,
}
X_train_x, X_valid_x, y_train_x, y_valid_x = train_test_split(
train_data, y, test_size=0.33, random_state=2018
)
d_train = xgb.DMatrix(X_train_x, y_train_x)
d_valid = xgb.DMatrix(X_valid_x, y_valid_x)
d_test = xgb.DMatrix(test_data)
watchlist = [(d_train, "train"), (d_valid, "valid")]
model_xgb = xgb.train(
xgb_params, d_train, 500, watchlist, verbose_eval=50, early_stopping_rounds=20
)
xgb_pred_test = model_xgb.predict(d_test)
xgb_pred_valid = model_xgb.predict(d_valid)
auc = roc_auc_score(y_valid_x, xgb_pred_valid)
print("AUC:", auc)
from sklearn.metrics import roc_curve
from sklearn import metrics
fpr, tpr, thresholds = roc_curve(y_valid_l, valid_preds_lgb)
roc_auc = metrics.auc(fpr, tpr)
fpr_1, tpr_1, thresholds_1 = roc_curve(y_valid_x, xgb_pred_valid)
roc_auc_1 = metrics.auc(fpr_1, tpr_1)
plt.figure(figsize=(15, 8))
plt.title("Receiver Operating Characteristic for different ML algorithms")
plt.plot(fpr, tpr, "b", label="LGBM-AUC = %0.2f" % roc_auc)
plt.plot(fpr_1, tpr_1, "g", label="XGBoost-AUC = %0.2f" % roc_auc_1)
plt.legend(loc="upper left")
plt.plot([0, 1], [0, 1], "r--")
plt.xlim([0, 1])
plt.ylim([0, 1])
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.show()
from sklearn.metrics import precision_recall_curve
precision, recall, threshold = precision_recall_curve(y_valid_l, valid_preds_lgb)
def plot_precision_and_recall(precision, recall, threshold):
plt.plot(threshold, precision[:-1], "r-", label="precision", linewidth=5)
plt.plot(threshold, recall[:-1], "b", label="recall", linewidth=5)
plt.xlabel("threshold", fontsize=19)
plt.legend(loc="upper right", fontsize=19)
plt.ylim([0, 1])
plt.figure(figsize=(15, 8))
plot_precision_and_recall(precision, recall, threshold)
plt.show()
def plot_precision_vs_recall(precision, recall):
plt.plot(recall, precision, "g--", linewidth=2.5)
plt.ylabel("recall", fontsize=19)
plt.xlabel("precision", fontsize=19)
plt.axis([0, 1.5, 0, 1.5])
plt.figure(figsize=(15, 8))
plot_precision_vs_recall(precision, recall)
plt.show()
print("Plot metrics during training...")
ax = lgb.plot_metric(evals_result, metric="auc")
plt.figure(figsize=(15, 8))
plt.show()
lgb.plot_importance(model_lgb, max_num_features=12)
plt.title("Feature importances by LightGBM")
plt.yticks(fontsize=14)
plt.xticks(fontsize=14)
plt.show()
|
# Biblioteki i konfiguracja notebooka
import pandas as pd
import json
import os
import wordcloud
from datetime import datetime
# Odczytujemy JSON'y i mergujemy je do jednej tabeli
# Lista wszystkich jsonow z katalogu roboczego
dir = r"/kaggle/input/hydepark"
json_list = [
os.path.join(dir, file) for file in os.listdir(dir) if file.endswith(".json")
]
# Funkcja do naprawy kodowania
def parse_obj(obj):
for key in obj:
if isinstance(obj[key], str):
obj[key] = obj[key].encode("latin_1").decode("utf-8")
elif isinstance(obj[key], list):
obj[key] = list(
map(
lambda x: x
if type(x) != str
else x.encode("latin_1").decode("utf-8"),
obj[key],
)
)
pass
return obj
# Lista tabel z pojedynczych plikow
json_df_list = []
for json_file in json_list:
text = open(json_file).read()
data = json.loads(text, object_hook=parse_obj)
df = pd.DataFrame(data["messages"])
# Zamien timestamp w milisekundach na date i zrob z niego indeks wiersza w tabeli
df["timestamp_ms"] = df["timestamp_ms"].apply(
lambda x: datetime.fromtimestamp(x / 1000)
)
df.set_index(["timestamp_ms"], inplace=True)
json_df_list.append(df)
master_df = pd.concat(json_df_list, sort=False, axis=0).sort_index()
# # Wstepna analiza
# ### dlugosc wiadomosci, godzina, content, timeline
master_df["massage_length"] = master_df["content"].apply(lambda x: len(str(x)))
master_df["massage_length"].hist(
by=master_df["sender_name"], range=[0, 100], figsize=(20, 20)
)
|
import pickle
import gc
import os
import cv2
from cv2 import resize
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from pathlib import Path
from sklearn.utils.class_weight import compute_class_weight
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from typing import Union
from typing import List
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
DEFAULT_H, DEFAULT_W = 137, 236
SIZE = 128
LABEL_PATH = Path("train.csv")
DATADIR = Path("/kaggle/input/bengaliai-cv19")
FEATHERDIR = Path("/kaggle/input/bengaliaicv19feather")
TEST_FEATHER_FORM = "test_image_data_ID.feather"
TRAIN_FEATHER_FORM = "train_image_data_ID.feather"
WEIGHTS_FILE = "/kaggle/input/julien-4-epochs-densenet121-bengali/model.pt"
test = pd.read_csv(DATADIR / "test.csv")
train = pd.read_csv(DATADIR / "train.csv")
train_labels = train[["grapheme_root", "vowel_diacritic", "consonant_diacritic"]].values
submission_df = pd.read_csv(DATADIR / "sample_submission.csv")
# loading feather format files
def load_images(train_test, indices=["0", "1", "2", "3"]):
"""
Utility function to Load the images from both the location and return them
:param train_test:
:return:
"""
path_form = {"train": TRAIN_FEATHER_FORM, "test": TEST_FEATHER_FORM}[train_test]
imgs_list = []
# sequentially load all four files.
for id in indices:
# Form the path of the files.
path = FEATHERDIR / path_form.replace("ID", id)
print("Loading", path)
df = pd.read_feather(path)
imgs = df.iloc[:, 1:].to_numpy()
imgs_list.append(imgs)
del imgs
gc.collect()
imgs_list = np.concatenate(imgs_list)
imgs_list = imgs_list.reshape(-1, DEFAULT_H, DEFAULT_W)
return imgs_list
def get_data(train_test, indices=["0", "1", "2", "3"]):
"""
A combined function to load both trian and label?
:return:
"""
# Load all images into a variable.
imgs = load_images(train_test, indices=indices)
if train_test == "train":
labels = load_labels()
all_data = list(zip(imgs, labels))
else:
all_data = imgs
return all_data
# compute label weights
def compute_labeled_weights():
grapheme_labels = train_labels[:, 0]
vowel_labels = train_labels[:, 1]
consonant_labels = train_labels[:, 2]
grapheme_weights = np.clip(
compute_class_weight(
"balanced", list(range(np.max(grapheme_labels) + 1)), grapheme_labels
),
0.5,
3,
)
vowel_weights = np.clip(
compute_class_weight(
"balanced", list(range(np.max(vowel_labels) + 1)), vowel_labels
),
0.5,
3,
)
consonant_weights = np.clip(
compute_class_weight(
"balanced", list(range(np.max(consonant_labels) + 1)), consonant_labels
),
0.5,
3,
)
weights = {
"grapheme": grapheme_weights,
"vowel": vowel_weights,
"consonant": consonant_weights,
}
return weights
weights = compute_labeled_weights()
# use a dictionary instead of the YAML config file
dataset_cfg = {
"aug_cfg": {
"RandomBrightnessContrast": 0.5,
"Gauss_Noise": 0.5,
"blurring_prob": 0.5,
"rotation_prob": 0.25,
"rotation_degree": 20,
"grid_distrortion_prob": 0.25,
"coarse_dropout_prob": 0.5,
"resize_shape": (128, 128),
"crop": True,
"to_rgb": True,
"normalize_mean": 0.069228,
"normalize_std": 0.20515,
}
}
def content_crop(img):
"""
cut out the section of image where there is the most of character
:param img: raw black white image, scale [0 to 255]
:return: cut out img
"""
y_list, x_list = np.where(img < 235)
x_min, x_max = np.min(x_list), np.max(x_list)
y_min, y_max = np.min(y_list), np.max(y_list)
img = img[y_min:y_max, x_min:x_max]
return img
# removed the data augmentations in the Preprocessos since we don't need them for inference
class Preprocessor(object):
def __init__(self, dataset_cfg):
aug_cfg = dataset_cfg["aug_cfg"]
self.resize_shape = aug_cfg["resize_shape"]
self.crop = aug_cfg["crop"]
self.to_rgb = aug_cfg["to_rgb"]
self.normalize_mean = aug_cfg["normalize_mean"]
self.normalize_std = aug_cfg["normalize_std"]
def __call__(self, img, normalize=True):
if self.crop:
img = content_crop(img)
img = resize(img, self.resize_shape)
if self.to_rgb:
img = np.repeat(np.expand_dims(img, axis=-1), 3, axis=-1)
if not normalize:
return img
# normalize to 0-1
img = img / 255
if self.normalize_mean is not None:
img = (img - self.normalize_mean) / self.normalize_std
return img
# Modified the Dataset to return an image and the name of the image
class BengaliDataset(Dataset):
"""
Torch data set object for the bengali data
"""
def __init__(self, data_list: List, data_cfg, fname, indices=None):
"""
:param data_list: list of raw data consists of (image, labels)
:param data_cfg: data config node
"""
self.data_list = data_list
self.data_size = len(data_list)
if indices is None:
indices = np.arange(self.data_size)
self.indices = indices
self.preprocessor = Preprocessor(data_cfg)
# get image names
if fname:
self.df = pd.read_feather(FEATHERDIR / fname)
self.fname = fname
def __len__(self) -> int:
return len(self.indices)
def __getitem__(self, idx: int) -> (np.ndarray, np.ndarray):
idx = self.indices[idx]
img = self.data_list[idx]
x = self.preprocessor(img)
name = self.df.iloc[idx, 0]
return x, name
# collator was changed to return images and names
class BengaliDataBatchCollator(object):
"""
Custom collator
"""
def __init__(self):
pass
def __call__(self, batch: List) -> (torch.Tensor, torch.Tensor):
"""
:param batch:
:return:
"""
inputs = np.array([x[0] for x in batch])
inputs = torch.tensor(inputs)
inputs = inputs.permute([0, 3, 1, 2])
names = [x[1] for x in batch]
return inputs, names
# densenet121 - copy pasted from Pytorch github
import re
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint as cp
from collections import OrderedDict
# from .utils import load_state_dict_from_url
from torch import Tensor
from torch.jit.annotations import List
from torch.hub import load_state_dict_from_url
__all__ = ["DenseNet", "densenet121"]
model_urls = {
"densenet121": "https://download.pytorch.org/models/densenet121-a639ec97.pth",
}
class _DenseLayer(nn.Module):
def __init__(
self,
num_input_features,
growth_rate,
bn_size,
drop_rate,
memory_efficient=False,
):
super(_DenseLayer, self).__init__()
self.add_module("norm1", nn.BatchNorm2d(num_input_features)),
self.add_module("relu1", nn.ReLU(inplace=True)),
self.add_module(
"conv1",
nn.Conv2d(
num_input_features,
bn_size * growth_rate,
kernel_size=1,
stride=1,
bias=False,
),
),
self.add_module("norm2", nn.BatchNorm2d(bn_size * growth_rate)),
self.add_module("relu2", nn.ReLU(inplace=True)),
self.add_module(
"conv2",
nn.Conv2d(
bn_size * growth_rate,
growth_rate,
kernel_size=3,
stride=1,
padding=1,
bias=False,
),
),
self.drop_rate = float(drop_rate)
self.memory_efficient = memory_efficient
def bn_function(self, inputs):
# type: (List[Tensor]) -> Tensor
concated_features = torch.cat(inputs, 1)
bottleneck_output = self.conv1(
self.relu1(self.norm1(concated_features))
) # noqa: T484
return bottleneck_output
# todo: rewrite when torchscript supports any
def any_requires_grad(self, input):
# type: (List[Tensor]) -> bool
for tensor in input:
if tensor.requires_grad:
return True
return False
@torch.jit.unused # noqa: T484
def call_checkpoint_bottleneck(self, input):
# type: (List[Tensor]) -> Tensor
def closure(*inputs):
return self.bn_function(*inputs)
return cp.checkpoint(closure, input)
@torch.jit._overload_method # noqa: F811
def forward(self, input):
# type: (List[Tensor]) -> (Tensor)
pass
@torch.jit._overload_method # noqa: F811
def forward(self, input):
# type: (Tensor) -> (Tensor)
pass
# torchscript does not yet support *args, so we overload method
# allowing it to take either a List[Tensor] or single Tensor
def forward(self, input): # noqa: F811
if isinstance(input, Tensor):
prev_features = [input]
else:
prev_features = input
if self.memory_efficient and self.any_requires_grad(prev_features):
if torch.jit.is_scripting():
raise Exception("Memory Efficient not supported in JIT")
bottleneck_output = self.call_checkpoint_bottleneck(prev_features)
else:
bottleneck_output = self.bn_function(prev_features)
new_features = self.conv2(self.relu2(self.norm2(bottleneck_output)))
if self.drop_rate > 0:
new_features = F.dropout(
new_features, p=self.drop_rate, training=self.training
)
return new_features
class _DenseBlock(nn.ModuleDict):
_version = 2
def __init__(
self,
num_layers,
num_input_features,
bn_size,
growth_rate,
drop_rate,
memory_efficient=False,
):
super(_DenseBlock, self).__init__()
for i in range(num_layers):
layer = _DenseLayer(
num_input_features + i * growth_rate,
growth_rate=growth_rate,
bn_size=bn_size,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.add_module("denselayer%d" % (i + 1), layer)
def forward(self, init_features):
features = [init_features]
for name, layer in self.items():
new_features = layer(features)
features.append(new_features)
return torch.cat(features, 1)
class _Transition(nn.Sequential):
def __init__(self, num_input_features, num_output_features):
super(_Transition, self).__init__()
self.add_module("norm", nn.BatchNorm2d(num_input_features))
self.add_module("relu", nn.ReLU(inplace=True))
self.add_module(
"conv",
nn.Conv2d(
num_input_features,
num_output_features,
kernel_size=1,
stride=1,
bias=False,
),
)
self.add_module("pool", nn.AvgPool2d(kernel_size=2, stride=2))
class DenseNet(nn.Module):
r"""Densenet-BC model class, based on
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
growth_rate (int) - how many filters to add each layer (`k` in paper)
block_config (list of 4 ints) - how many layers in each pooling block
num_init_features (int) - the number of filters to learn in the first convolution layer
bn_size (int) - multiplicative factor for number of bottle neck layers
(i.e. bn_size * k features in the bottleneck layer)
drop_rate (float) - dropout rate after each dense layer
num_classes (int) - number of classification classes
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
__constants__ = ["features"]
def __init__(
self,
growth_rate=32,
block_config=(6, 12, 24, 16),
num_init_features=64,
bn_size=4,
drop_rate=0,
num_classes=1000,
memory_efficient=False,
):
super(DenseNet, self).__init__()
# First convolution
self.features = nn.Sequential(
OrderedDict(
[
(
"conv0",
nn.Conv2d(
3,
num_init_features,
kernel_size=7,
stride=2,
padding=3,
bias=False,
),
),
("norm0", nn.BatchNorm2d(num_init_features)),
("relu0", nn.ReLU(inplace=True)),
("pool0", nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
]
)
)
# Each denseblock
num_features = num_init_features
for i, num_layers in enumerate(block_config):
block = _DenseBlock(
num_layers=num_layers,
num_input_features=num_features,
bn_size=bn_size,
growth_rate=growth_rate,
drop_rate=drop_rate,
memory_efficient=memory_efficient,
)
self.features.add_module("denseblock%d" % (i + 1), block)
num_features = num_features + num_layers * growth_rate
if i != len(block_config) - 1:
trans = _Transition(
num_input_features=num_features,
num_output_features=num_features // 2,
)
self.features.add_module("transition%d" % (i + 1), trans)
num_features = num_features // 2
# Final batch norm
self.features.add_module("norm5", nn.BatchNorm2d(num_features))
# Linear layer
self.classifier = nn.Linear(num_features, num_classes)
# Official init from torch repo.
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.constant_(m.bias, 0)
def forward(self, x):
features = self.features(x)
out = F.relu(features, inplace=True)
out = F.adaptive_avg_pool2d(out, (1, 1))
out = torch.flatten(out, 1)
out = self.classifier(out)
return out
def _load_state_dict(model, model_url, progress):
# '.'s are no longer allowed in module names, but previous _DenseLayer
# has keys 'norm.1', 'relu.1', 'conv.1', 'norm.2', 'relu.2', 'conv.2'.
# They are also in the checkpoints in model_urls. This pattern is used
# to find such keys.
pattern = re.compile(
r"^(.*denselayer\d+\.(?:norm|relu|conv))\.((?:[12])\.(?:weight|bias|running_mean|running_var))$"
)
state_dict = load_state_dict_from_url(model_url, progress=progress)
for key in list(state_dict.keys()):
res = pattern.match(key)
if res:
new_key = res.group(1) + res.group(2)
state_dict[new_key] = state_dict[key]
del state_dict[key]
model.load_state_dict(state_dict)
def _densenet(
arch, growth_rate, block_config, num_init_features, pretrained, progress, **kwargs
):
model = DenseNet(growth_rate, block_config, num_init_features, **kwargs)
if pretrained:
_load_state_dict(model, model_urls[arch], progress)
return model
def densenet121(pretrained=False, progress=True, **kwargs):
r"""Densenet-121 model from
`"Densely Connected Convolutional Networks" <https://arxiv.org/pdf/1608.06993.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
memory_efficient (bool) - If True, uses checkpointing. Much more memory efficient,
but slower. Default: *False*. See `"paper" <https://arxiv.org/pdf/1707.06990.pdf>`_
"""
return _densenet(
"densenet121", 32, (6, 12, 24, 16), 64, pretrained, progress, **kwargs
)
def build_densenet_backbone(backbone_cfg, **kwargs):
"""
:param backbone_cfg: backbone config node
:param kwargs:
:return: backbone module
"""
model = densenet121(pretrained=False)
# mode = Densenet(**kwargs)
if backbone_cfg.get("pretrained_path"):
pretrained_path = backbone_cfg["pretrained_path"]
state_dict = torch.load(pretrained_path, map_location="cpu")
model.load_state_dict(state_dict, strict=False)
return model
# head and backbone config
model_cfg = {
"head_cfg": {
"head_name": "simple_head",
"activation": "leaky_relu",
"output_dims": [168, 11, 7],
"input_dims": 1000, # densenet121
"hidden_dims": [512, 256],
"bn": True,
"dropout": -1,
},
"backbone_cfg": {
"pretrained_path": "/kaggle/input/julien-4-epochs-densenet121-bengali/model.pt"
},
}
from torch import nn
import torch.nn.functional as F
from typing import Union
ACTIVATION_FN = {
"relu": F.relu,
"relu6": F.relu6,
"elu": F.elu,
"leaky_relu": F.leaky_relu,
None: None,
}
class LinearLayer(nn.Module):
def __init__(self, input_dim, output_dim, activation, bn, dropout_rate=-1):
super(LinearLayer, self).__init__()
self.input_dim = input_dim
self.output_dim = output_dim
self.linear = nn.Linear(input_dim, output_dim)
self.activation_fn = ACTIVATION_FN[activation]
if bn:
self.bn = nn.BatchNorm1d(self.output_dim)
else:
self.bn = None
if dropout_rate > 0:
self.dropout = nn.Dropout(p=dropout_rate)
else:
self.dropout = None
def forward(self, x):
# LINEAR -> BN -> ACTIVATION -> DROPOUT
x = self.linear(x)
if self.bn is not None:
x = self.bn(x)
if self.activation_fn is not None:
x = self.activation_fn(x, inplace=True)
if self.dropout is not None:
x = self.dropout(x)
return x
from torch import nn
def build_head(head_cfg):
return SimplePredictionHead(head_cfg)
class SimplePredictionHead(nn.Module):
def __init__(self, head_cfg):
super(SimplePredictionHead, self).__init__()
self.fc_layers = []
input_dim = head_cfg["input_dims"]
# first hidden layers
for hidden_dim in head_cfg["hidden_dims"]:
self.fc_layers.append(
LinearLayer(
input_dim,
hidden_dim,
bn=head_cfg["bn"],
activation=head_cfg["activation"],
dropout_rate=head_cfg["dropout"],
)
)
input_dim = hidden_dim
output_dims = head_cfg["output_dims"]
# prediction layer
self.fc_layers.append(
LinearLayer(
input_dim, sum(output_dims), bn=False, activation=None, dropout_rate=-1
)
)
self.fc_layers = nn.Sequential(*self.fc_layers)
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.ones_(m.weight)
nn.init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
nn.init.zeros_(m.bias)
def forward(self, x):
return self.fc_layers(x)
class BaselineModel(nn.Module):
def __init__(self, model_cfg):
super(BaselineModel, self).__init__()
self.backbone = build_densenet_backbone(model_cfg["backbone_cfg"])
self.head = build_head(model_cfg["head_cfg"])
self.heads_dims = model_cfg["head_cfg"]["output_dims"]
def forward(self, x):
x = self.backbone(x)
x = self.head(x)
grapheme_logits, vowel_logits, consonant_logits = torch.split(
x, self.heads_dims, dim=1
)
return grapheme_logits, vowel_logits, consonant_logits
solver_cfg = {
"optimizer": "adam",
"lr": 1e-4,
"loss_function": torch.nn.CrossEntropyLoss,
"total_epochs": 1,
"label_weights_path": None,
}
def build_optimizer(model, solver_cfg):
parameters = model.parameters()
optimizers = {"adam": torch.optim.Adam, "SGD": torch.optim.SGD}
opti_type = solver_cfg["optimizer"]
lr = solver_cfg["lr"]
return optimizers[opti_type](parameters, lr=lr)
from typing import List
class EvalBlock(nn.Module):
def __init__(self, loss_fn: str, weights: List[float]):
super(EvalBlock, self).__init__()
self.loss_fn = loss_func(torch.tensor(weights))
def forward(self, logits, labels):
loss = self.loss_fn(logits, labels)
acc = (torch.argmax(logits, dim=1) == labels).float().mean()
return loss, acc
class MultiHeadsEval(nn.Module):
def __init__(self, solver_cfg):
super(MultiHeadsEval, self).__init__()
grapheme_weights = weights["grapheme"]
vowel_weights = weights["vowel"]
consonant_weights = weights["consonant"]
loss_fn = solver_cfg["loss_function"]
self.grapheme_eval = EvalBlock(loss_fn, grapheme_weights)
self.vowel_eval = EvalBlock(loss_fn, vowel_weights)
self.consonant_eval = EvalBlock(loss_fn, consonant_weights)
def forward(self, grapheme_logits, vowel_logits, consonant_logits, labels):
grapheme_loss, grapheme_acc = self.grapheme_eval(grapheme_logits, labels[:, 0])
vowel_loss, vowel_acc = self.vowel_eval(vowel_logits, labels[:, 1])
consonant_loss, consonant_acc = self.consonant_eval(
consonant_logits, labels[:, 2]
)
loss = grapheme_loss + vowel_loss + consonant_loss
acc = (grapheme_acc + vowel_acc + consonant_acc) / 3
eval_result = {
"grapheme_loss": grapheme_loss,
"grapheme_acc": grapheme_acc,
"vowel_loss": vowel_loss,
"vowel_acc": vowel_acc,
"consonant_loss": consonant_loss,
"consonant_acc": consonant_acc,
"loss": loss,
"acc": acc,
}
return eval_result
def build_evaluator(solver_cfg):
return MultiHeadsEval(solver_cfg)
# ## Build model, evaluator, get the optimizer and loss function
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
model = BaselineModel(model_cfg)
model.to(device)
loss_func = solver_cfg["loss_function"]
evaluator = build_evaluator(solver_cfg)
evaluator.float().to(device)
opt = build_optimizer(model, solver_cfg)
model = BaselineModel(model_cfg)
state_dict = torch.load(WEIGHTS_FILE, map_location="cpu")
model.load_state_dict(state_dict["model_state"])
model.to(device)
# ## Test inference
def test_eval():
model.eval()
test_data = [
"test_image_data_0.feather",
"test_image_data_1.feather",
"test_image_data_2.feather",
"test_image_data_3.feather",
]
row_id, target = [], []
batch_size = 1
for idx, fname in enumerate(test_data):
test_images = get_data("test", indices=[str(idx)])
test_dataset = BengaliDataset(test_images, dataset_cfg, fname=fname)
test_collator = BengaliDataBatchCollator()
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
shuffle=False,
collate_fn=test_collator,
num_workers=4,
)
with torch.no_grad():
for idx, (inputs, name) in enumerate(test_loader):
inputs = inputs.to(device)
grapheme_logits, vowel_logits, consonant_logits = model(inputs.float())
grapheme_logits = grapheme_logits.argmax(-1)
vowel_logits = vowel_logits.argmax(-1)
consonant_logits = consonant_logits.argmax(-1)
# use a for loop if batch_size > 1
row_id += [
f"{name}_grapheme_root",
f"{name}_vowel_diacritic",
f"{name}_consonant_diacritic",
]
target += [
grapheme_logits.item(),
vowel_logits.item(),
consonant_logits.item(),
]
return pd.DataFrame({"row_id": row_id, "target": target})
df_submission = test_eval()
df_submission
df_submission.to_csv("submission.csv", index=False)
# model.eval()
# test_data = ['test_image_data_2.feather','test_image_data_3.feather']
# row_id,target = [],[]
# batch_size=3
# for idx, fname in enumerate(test_data):
# test_images = get_data('test', indices=[str(idx)])
# test_dataset = BengaliDataset(test_images, dataset_cfg, fname=fname)
# test_collator = BengaliDataBatchCollator()
# test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False, collate_fn=test_collator,
# num_workers=4)
# with torch.no_grad():
# for inputs, names in test_loader:
# print(inputs.shape)
# plt.imshow(inputs[2][0])
# break
# break
# def bbox(img):
# rows = np.any(img, axis=1)
# cols = np.any(img, axis=0)
# rmin, rmax = np.where(rows)[0][[0, -1]]
# cmin, cmax = np.where(cols)[0][[0, -1]]
# return rmin, rmax, cmin, cmax
# def crop_resize(img0, size=SIZE, pad=16):
# #crop a box around pixels large than the threshold
# #some images contain line at the sides
# ymin,ymax,xmin,xmax = bbox(img0[5:-5,5:-5] > 80)
# #cropping may cut too much, so we need to add it back
# xmin = xmin - 13 if (xmin > 13) else 0
# ymin = ymin - 10 if (ymin > 10) else 0
# xmax = xmax + 13 if (xmax < DEFAULT_W - 13) else DEFAULT_W
# ymax = ymax + 10 if (ymax < DEFAULT_H - 10) else DEFAULT_H
# img = img0[ymin:ymax,xmin:xmax]
# #remove lo intensity pixels as noise
# img[img < 28] = 0
# lx, ly = xmax-xmin,ymax-ymin
# l = max(lx,ly) + pad
# #make sure that the aspect ratio is kept in rescaling
# img = np.pad(img, [((l-ly)//2,), ((l-lx)//2,)], mode='constant')
# return cv2.resize(img,(size,size))
# class GraphemeDataset(Dataset):
# def __init__(self, fname):
# print(fname)
# self.df = pd.read_feather(fname)
# self.data = 255 - self.df.iloc[:, 1:].values.reshape(-1, DEFAULT_H, DEFAULT_W).astype(np.uint8)
# def __len__(self):
# return len(self.data)
# def __getitem__(self, idx):
# name = self.df.iloc[idx,0]
# #normalize each image by its max val
# img = (self.data[idx]*(255.0/self.data[idx].max())).astype(np.uint8)
# img = crop_resize(img)
# img = img.astype(np.float32)/255.0
# img = np.repeat(np.expand_dims(img, axis=-1), 3, axis=-1)
# #img = np.tranpose(x, (2, 0, 1))
# return img, name
# test_data = ['/kaggle/input/bengaliaicv19feather/test_image_data_0.feather',
# '/kaggle/input/bengaliaicv19feather/test_image_data_1.feather',
# '/kaggle/input/bengaliaicv19feather/test_image_data_2.feather',
# '/kaggle/input/bengaliaicv19feather/test_image_data_3.feather']
# %%time
# ## Inference a little faster using @Iafoss and @peters technique
# row_id,target = [],[]
# for fname in test_data:
# #data = pd.read_feather(f'/kaggle/input/bengaliai-cv19/{fname}')
# test_image = GraphemeDataset(fname)
# dl = torch.utils.data.DataLoader(test_image,batch_size=128,num_workers=4,shuffle=False)
# with torch.no_grad():
# for x,y in dl:
# x = x.float()#.cuda()
# x = x.to(device)
# x = x.permute(0,3,1,2)
# p1,p2,p3 = model(x)
# print(p3)
# p1 = p1.argmax(1).cpu().detach().numpy()
# p2 = p2.argmax(1).cpu().detach().numpy()
# p3 = p3.argmax(1).cpu().detach().numpy()
# print(p1, p2, p3)
# for idx,name in enumerate(y):
# row_id += [f'{name}_vowel_diacritic',f'{name}_grapheme_root',
# f'{name}_consonant_diacritic']
# target += [p1[idx].item(),p2[idx].item(),p3[idx].item()]
# submission_df = pd.DataFrame({'row_id': row_id, 'target': target})
# submission_df.to_csv('submission.csv', index=False)
# submission_df.head(20)
# %%time
# ## Inference a little faster using @Iafoss and @peters technique
# row_id,target = [],[]
# for fname in test_data:
# #data = pd.read_feather(f'/kaggle/input/bengaliai-cv19/{fname}')
# test_image = GraphemeDataset(fname)
# dl = torch.utils.data.DataLoader(test_image,batch_size=128,num_workers=4,shuffle=False)
# with torch.no_grad():
# for x,y in dl:
# plt.imshow(x[1])
# break
|
import numpy as np
import pandas as pd
import os
import math
from tqdm.notebook import tqdm
from sklearn.exceptions import ConvergenceWarning
import warnings
from matplotlib import pyplot as plt
import seaborn as sns
from scipy import stats as scs
warnings.simplefilter(action="ignore", category=FutureWarning)
warnings.simplefilter(action="ignore", category=ConvergenceWarning)
PATH = "/kaggle/input/house-prices-advanced-regression-techniques/"
train = pd.read_csv(PATH + "train.csv")
test = pd.read_csv(PATH + "test.csv")
train.head()
# show which columns have nans
def show_nans():
nas = train.isna().sum() + test.isna().sum()
return nas[nas != 0]
# plots numeric feature
def explore_cont(feature, data=train, kind="reg", order=1):
plt.figure(1, figsize=(10, 10))
sns.jointplot(x=feature, y="SalePrice", data=data, kind=kind, order=order)
plt.show()
# plots categorical feature
def explore_cat(feature, data=train, kind="reg"):
plt.figure(1, figsize=(10, 10))
sns.violinplot(x=feature, y="SalePrice", data=data, bw=0.2)
plt.show()
# drops feature from both sets
def drop(col):
train.drop(columns=[col], inplace=True)
test.drop(columns=[col], inplace=True)
# used for creating new feature
def apply(col, new_col, func, drop_col=False):
train[new_col] = train[col].apply(func)
test[new_col] = test[col].apply(func)
if drop_col:
drop(col)
# fill nans
def fillna(col, fill_with="NA"):
train[col].fillna(fill_with, inplace=True)
test[col].fillna(fill_with, inplace=True)
# plots histogram
def show_hist(values):
plt.figure(1, figsize=(10, 6))
sns.distplot(values)
plt.show()
print("skew:", scs.skew(values))
print("kurtosis:", scs.kurtosis(values))
# define target variable conversions
target_trans = lambda price: np.log1p(price) ** 0.5
target_inv_trans = lambda price: np.expm1(price**2)
# convert
train["SalePrice"] = target_trans(train["SalePrice"])
# visualize
show_hist(train["SalePrice"])
show_nans()
explore_cont("LotArea")
explore_cont("GrLivArea")
explore_cont("TotalBsmtSF")
# categorical that support 'NA'
features_cat_with_na = [
"Alley",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"BsmtQual",
"Fence",
"FireplaceQu",
"GarageCond",
"GarageFinish",
"GarageQual",
"GarageType",
"MiscFeature",
"PoolQC",
]
# numerical that have NaNs
features_cont_with_na = [
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtFullBath",
"BsmtUnfSF",
"BsmtHalfBath",
"GarageArea",
"GarageCars",
"GarageYrBlt",
"LotFrontage",
"MasVnrArea",
"TotalBsmtSF",
]
# possibly being removed
features_with_too_much_nas = ["MiscFeature", "PoolQC", "Fence", "FireplaceQu", "Alley"]
for feat in features_cat_with_na:
fillna(feat)
for feat in features_cont_with_na:
fillna(feat, train[feat].mean())
# these features don't support 'NA', they have different values
fillna("Electrical", "SBrkr")
fillna("Exterior1st", "Other")
fillna("Exterior2nd", "Other")
fillna("Functional", "Typ")
fillna("KitchenQual", "TA")
fillna("SaleType", "Oth")
fillna("MasVnrType", "None")
# just with mode
fillna("MSZoning", train["MSZoning"].mode()[0])
fillna("Utilities", train["MSZoning"].mode()[0])
# remove outliers based on plots above
train = train[train["LotArea"] < 30000]
train = train[train["GrLivArea"] < 4000]
train = train[train["TotalBsmtSF"] < 2800]
# extract target and ids for test set
target = train["SalePrice"]
test_ids = test["Id"]
# remove target and ids
train.drop(columns=["SalePrice", "Id"], inplace=True)
test.drop(columns=["Id"], inplace=True)
train.head()
# extract categorical features names for CatBoost
cat_features = list(train.select_dtypes(exclude=["int", "float"]).columns.values)
len(cat_features)
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_log_error as msle
from sklearn.metrics import make_scorer
from catboost import CatBoostRegressor
# split train set
x_train, x_test, y_train, y_test = train_test_split(
train, target, test_size=0.2, random_state=289
)
# make score function for GridSearchCV
def score_func(y_true, y_pred, **kwargs):
return msle(target_inv_trans(y_true), target_inv_trans(y_pred), **kwargs) ** 0.5
# hyperparams setting
def make_search(estimator, params, verbose=1):
scorer = make_scorer(score_func, greater_is_better=False)
search = GridSearchCV(estimator, params, cv=5, scoring=scorer, verbose=0, n_jobs=-1)
search.fit(x_train, y_train)
results = pd.DataFrame()
for k, v in search.cv_results_.items():
results[k] = v
results = results.sort_values(by="rank_test_score")
best_params_row = results[results["rank_test_score"] == 1]
mean, std = (
best_params_row["mean_test_score"].iloc[0],
best_params_row["std_test_score"].iloc[0],
)
best_params = best_params_row["params"].iloc[0]
if verbose:
print(
"%s: %.4f (%.4f) with params" % (estimator.__class__.__name__, -mean, std),
best_params,
)
return best_params
depths = list(range(2, 7))
estimators = [50, 100, 200, 400, 700, 1000, 1500]
# i calculated them earlier
best_params = {
"n_estimators": 1500,
"max_depth": 5,
"random_state": 289,
"cat_features": cat_features,
"verbose": False,
}
# pass True to rerun search
if False:
search_params = {
"n_estimators": estimators,
"max_depth": depths,
"random_state": [289],
"cat_features": [cat_features],
"verbose": [False],
}
best_params = make_search(CatBoostRegressor(), search_params)
# fitting best model
model = CatBoostRegressor()
model.set_params(**best_params)
model.fit(x_train, y_train)
y_true = target_inv_trans(y_test)
y_pred = target_inv_trans(model.predict(x_test))
print("msle = %.4f" % msle(y_true, y_pred) ** 0.5)
res = pd.DataFrame()
res["Id"] = test_ids
res["SalePrice"] = target_inv_trans(model.predict(test))
res.to_csv("submission.csv", index=False)
res.head(20)
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
df = pd.read_csv("/kaggle/input/suv-nanze/suv.csv")
df.drop("User ID", axis=1, inplace=True)
df.head(5)
df.Gender = pd.get_dummies(df.Gender, drop_first=True)
X = df.to_numpy()
np.random.seed = 0
X = X[np.random.permutation(X.shape[0])]
y = X[:, -1]
X = X[:, :-1]
split = int(X.shape[0] * 0.8)
X_train = X[:split]
y_train = y[:split]
X_test = X[split:]
y_test = y[split:]
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
# using sklearn is very simple and not practical
# the hidden layers > the neurons in each layer > must be tuple or list > not inp or out
# activation function and other parameters
clf = MLPClassifier(hidden_layer_sizes=(10, 20, 4))
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
clf = MLPClassifier(hidden_layer_sizes=(100,))
clf.fit(X_train, y_train)
print(clf.score(X_train, y_train))
print(clf.score(X_test, y_test))
# overfit model
train_acc = []
test_acc = []
for n in range(5, 200, 10):
clf = MLPClassifier(hidden_layer_sizes=(n,))
clf.fit(X_train, y_train)
train_acc.append(clf.score(X_train, y_train))
test_acc.append(clf.score(X_test, y_test))
plt.plot(train_acc, "b-", label="train_acc")
plt.plot(test_acc, "r-", label="test_acc")
plt.legend()
# it has the gap between the train and test acc > so it is overfit always > lack of data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.