file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
69090031
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Import Train & Test Datasets
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data
# ## Exploring Patterns in the dataset
survived = train_data.loc[train_data.Survived == 1]["Survived"]
rate_survival = sum(survived) / len(train_data)
print("% of people who survived:", rate_survival)
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of women who survived:", rate_women)
print("% of men who survived:", rate_men)
# **MI Scores**
features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
y = train_data["Survived"]
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
for colname in X_test.select_dtypes("object"):
X_test[colname], _ = X_test[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
discrete_features = X.dtypes == int
X.dtypes
from sklearn.feature_selection import mutual_info_regression
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores # show a few features with their MI scores
import matplotlib.pyplot as plt
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
# ## Random Forest Classifier
X
X_test
y
# Original
# y = train_data["Survived"]
# features = ["Pclass", "Sex", "SibSp", "Parch"]
# X = pd.get_dummies(train_data[features])
# X_test = pd.get_dummies(test_data[features])
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
# print(predictions)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
output
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090031.ipynb
| null | null |
[{"Id": 69090031, "ScriptId": 18778072, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890028, "CreationDate": "07/26/2021 18:10:34", "VersionNumber": 5.0, "Title": "My Titanic Notebook", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 112.0, "LinesInsertedFromPrevious": 60.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 52.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Import Train & Test Datasets
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data
# ## Exploring Patterns in the dataset
survived = train_data.loc[train_data.Survived == 1]["Survived"]
rate_survival = sum(survived) / len(train_data)
print("% of people who survived:", rate_survival)
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of women who survived:", rate_women)
print("% of men who survived:", rate_men)
# **MI Scores**
features = ["Pclass", "Sex", "SibSp", "Parch", "Embarked"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
y = train_data["Survived"]
# Label encoding for categoricals
for colname in X.select_dtypes("object"):
X[colname], _ = X[colname].factorize()
for colname in X_test.select_dtypes("object"):
X_test[colname], _ = X_test[colname].factorize()
# All discrete features should now have integer dtypes (double-check this before using MI!)
discrete_features = X.dtypes == int
X.dtypes
from sklearn.feature_selection import mutual_info_regression
def make_mi_scores(X, y, discrete_features):
mi_scores = mutual_info_regression(X, y, discrete_features=discrete_features)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores)
plt.yticks(width, ticks)
plt.title("Mutual Information Scores")
mi_scores = make_mi_scores(X, y, discrete_features)
mi_scores # show a few features with their MI scores
import matplotlib.pyplot as plt
plt.figure(dpi=100, figsize=(8, 5))
plot_mi_scores(mi_scores)
# ## Random Forest Classifier
X
X_test
y
# Original
# y = train_data["Survived"]
# features = ["Pclass", "Sex", "SibSp", "Parch"]
# X = pd.get_dummies(train_data[features])
# X_test = pd.get_dummies(test_data[features])
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
# print(predictions)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
output
| false | 0 | 1,057 | 0 | 1,057 | 1,057 |
||
69090504
|
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# # Creating datapoints to test (Phase-1)
np.random.seed(100) # Seed for reproducibility
# Creating uniform distribution of 10k length
distri = np.random.rand(1, 10000)
# Creating Gaussian distribution of 10k length
distri_gaussian = np.random.normal(
loc=np.mean(distri), scale=np.std(distri), size=(1, 10000)
)
# Creating uniform distribution of 10k length of different mean & Std Dev
distri_gaussian_2 = np.random.normal(
loc=np.mean(distri) + 1, scale=np.std(distri) + 0.5, size=(1, 10000)
)
# ----------------------------------------------------------------------------------------------------
distri[0:10]
# # Visualizing the distributions
ax = sns.distplot(distri, kde=True, color="g")
ax = sns.distplot(distri_gaussian, kde=True, color="b")
ax = sns.distplot(distri_gaussian_2, kde=True, color="r")
# -----------------------------------------------------------------------------------------------------------
ax.set(
xlabel="Distribution",
ylabel="Probability density",
title="KDE Comparison of all three distributions",
)
# # One-Sample t-test :
# - Used to test the if a hypothesised mean value about a population can be accepted based on the sample from the population available
# - The parametric version uses students t-distribution to calculate the critical value and the p-value
# - H0 - The mean of the population is x
# - Ha - The mean of the population is not x (two-sided)
# ### Data Creation (Sample)
np.random.seed(100) # Reproducible results
distri_norm = np.random.normal(30, 5, 1000) # mean=30, std dev=5,n=1k
# --------------------------------------------------------------------
print(distri_norm[0:5])
sns.distplot(distri_norm)
# UDF for manual creation of one-tailed t-test along with scipy benchmarking
from scipy.stats import t # Importing t-table calculator
"""
H0 (Null Hypothesis) : The population mean is 34 i.e 34 is the accuracte estimate of mean of the population from which the sample is drawn
H1 (Alternative Hypothesis) : The population mean not equal 34 (Two-tailed)
"""
def one_tailed_ttest(distri_norm_gen, hypo_mean_gen, mode, alpha=0.05):
mean_sample = np.mean(distri_norm_gen)
print("Mean of sample :", mean_sample)
n = len(distri_norm_gen)
print("No of obs in sample :", n)
std_sample = np.std(
distri_norm_gen, ddof=1
) # ddof = no to subtract from n for degree of freedom, in this case n-1
print("Standard Dev of sample :", std_sample)
hypo_mean = hypo_mean_gen # The hypothesised mean of the population
print("Hypothesized mean of population :", hypo_mean)
# -----------------------------------------------------------------------------------------------------------------
# Calculating t-statistic for the test
t_stat = ((mean_sample - hypo_mean) / std_sample) * np.sqrt(n)
print("T-statistic :", t_stat)
# Conditional approach for two-tailed/one-tailed system
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, n - 1)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, n - 1)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat <= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, n - 1)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, n - 1)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat >= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), n - 1)
print("Critical t-values are:", -t_critical, "&", t_critical)
# confirm with cdf
if t_stat >= t_critical:
p_val = 2 * (
1 - t.cdf(t_stat, n - 1)
) # Twice since this time its on one-side,
# but the alt hypothesis is for both sides (greater & smaller)
elif t_stat <= -t_critical:
p_val = 2 * t.cdf(t_stat, n - 1) # Same explanation as above
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking against Scipy package of the same functionality :-------#"
)
print(stats.ttest_1samp(a=distri_norm_gen, popmean=hypo_mean_gen, alternative=mode))
# ---------------------------------------------------------------------------------------------------------------
# Invoking the UDF
one_tailed_ttest(
distri_norm_gen=distri_norm, hypo_mean_gen=34, mode="two-sided", alpha=0.05
)
# ### Findings : The test statistic as well p-value generated by both manual & scipy implementation are same and the hypothesis can indeed be rejected
# # Two-Sampled tests :
# 1. Unpaired two-sample tests :-
# - Parametric Test (t-test)
# - Non-Parametric Test (Wilcoxon Rank Sum test/Man Whitney U test)
# 2. Paired two-sample tests (Used in A/B testing) :-
# - Parametric Test (t-test)
# - Non-Parametric Test (Wilcoxon Signed Rank test)
#
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
# ### Generating datapoints of varied distributions to check the performance of the tests
np.random.seed(100) # Set seed for consistent results
# -----------------------------------------------------------------------------------------
distri_norm_1 = np.random.normal(20, 5, 1000) # mean=20, std dev=5,n=1000
distri_norm_2 = np.random.normal(18.8, 5, 1000) # mean=18.8, std dev=8,n=1000
distri_norm_3 = np.random.normal(20, 5, 700) # mean=20, std dev=5,n=700
distri_norm_4 = np.random.normal(26, 9, 1000) # mean=26, std dev=9,n=700
distri_norm_5 = np.random.normal(13, 9, 1500) # mean=13, std dev=9,n=1500
# -----------------------------------------------------------------------------------------
dict_ = {1: "r", 2: "b", 3: "g", 4: "y", 5: "b"}
# -----------------------------------------------------------------------------------------
counter = 1
for dist in [distri_norm_1, distri_norm_2, distri_norm_3, distri_norm_4, distri_norm_5]:
color_ = dict_[counter]
sns.distplot(dist, color=color_, hist=False)
counter += 1
# ## Two-Tailed Parametric tests (Students t-tests):
def two_sample_ttest(
group_1, group_2, mode, paired_status="unpaired", alpha=0.05
): # Paired Status can be either 'paired'/'unpaired'
if paired_status == "unpaired": # For independent observations
print("#------------- UN-PAIRED 2 sample t-test --------------#")
# Calculation of parameters from Group 1 ------------------------
g1_mean = np.mean(group_1)
print("Group 1 mean :", g1_mean)
g1_std = np.std(group_1, ddof=1)
print("Std Dev of group 1:", g1_std)
n_1 = len(group_1)
print("No of Obs in group 1:", n_1)
# Calculation of parameters from Group 2 ------------------------
g2_mean = np.mean(group_2)
print("Group 2 mean :", g2_mean)
g2_std = np.std(group_2, ddof=1)
print("Std Dev of group 2:", g2_std)
n_2 = len(group_2)
print("No of Obs in group 1:", n_2)
# ---------------------------------------------------------------
combined_dof = n_1 + n_2 - 2
print("Combined DoF:", combined_dof)
# Denominator for the t statistic to be calculated
denom_1 = np.sqrt(((g1_std**2) / n_1) + ((g2_std**2) / n_2))
t_stat = (g1_mean - g2_mean) / denom_1
print("t-statistic :", t_stat)
# --------------------------------------------------------------------------------------
# Conditional Statements for two-tailed or one-tailed. Generally two-tailed tests are used
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (
t_stat <= t_critical
): # One can alternatively apply > condition on p-value
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (
t_stat >= t_critical
): # One can alternatively apply > condition on p-value
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), combined_dof)
print("Critical t-values are:", -t_critical, "&", t_critical)
p_val = 2 * (1 - t.cdf(abs(t_stat), combined_dof))
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking results from Scipy package of the same functionality :-------#"
)
print(stats.ttest_ind(group_1, group_2, alternative=mode, equal_var=False))
elif paired_status == "paired":
print("#------------- PAIRED 2 sample t-test --------------#")
assert len(group_1) == len(
group_2
), "Length of distri not matching" # For paired tests, the lengths of groups must be same
g1_mean = np.mean(group_1)
print("Group 1 mean :", g1_mean)
g2_mean = np.mean(group_2)
print("Group 2 mean :", g2_mean)
n = len(group_1)
print("No of Obs in groups :", n)
d1 = np.sum(np.square(group_1 - group_2))
d2 = np.sum(group_1 - group_2)
s = np.sqrt((d1 - (d2**2 / n)) / (n - 1))
print("S value :", s)
combined_dof = n - 1
t_stat = (g1_mean - g2_mean) / np.sqrt((s**2) / n)
print("t-statistic :", t_stat)
# --------------------------------------------------------------------------------------
# Conditional statement for two-tailed or on-tailed
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat <= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat >= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), combined_dof)
print("Critical t-values are:", -t_critical, "&", t_critical)
p_val = (1 - t.cdf(abs(t_stat), combined_dof)) * 2.0
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking results from Scipy package of the same functionality :-------#"
)
print(stats.ttest_rel(group_1, group_2, alternative=mode))
# ----------------------------------------------------------------------------------------------------------
# Invoking the above UDF for parametric two-tailed tests
two_sample_ttest(
group_1=distri_norm_1,
group_2=distri_norm_4,
mode="two-sided",
paired_status="paired",
alpha=0.05,
)
# ### Findings - The t-statistic and p-values from both manual and scipy implementations are matching (for p-value, the difference is very low)
# ## Two-Tailed Non-Parametric Tests :
# 1. Man-Whitney U test (unpaired)
# 2. Wilcoxon Signed Rank test (Paired)
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
def rank_sum_fn(a, b): # To execute the rank sum process used in Man-Whitney Test
print("Group 1 length :", len(a))
print("Group 2 length :", len(b))
# ---------------------------------------------------------------------------------------------------------
df = pd.DataFrame()
df["a"] = a
df = df.append(pd.DataFrame(b, columns=["b"]))
df["combined"] = np.where(
df["a"].isnull() == True,
df["b"],
np.where(df["b"].isnull() == True, df["a"], np.nan),
)
df.sort_values(["combined"], ascending=True, inplace=True)
df["rank"] = df["combined"].rank(method="average", na_option="keep")
# ---------------------------------------------------------------------------------------------------------
rank_sum_a = df[df["a"].isnull() == False]["rank"].sum()
rank_sum_b = df[df["b"].isnull() == False]["rank"].sum()
print("Group 1 rank sum :", rank_sum_a)
print("Group 2 rank sum :", rank_sum_b)
return rank_sum_a, rank_sum_b
def man_whitney_u(
group_1, group_2, mode
): # To execute till test-statistic calculation only
n1 = len(group_1)
n2 = len(group_2)
r1, r2 = rank_sum_fn(group_1, group_2) # Utilising the rank sum UDF defined above
u1 = r1 - n1 * (n1 + 1) / 2 # method 2, step 3
u2 = n1 * n2 - u1
u = max(u1, u2)
print("U statistic :", u)
p = stats.norm.sf(u) # Needs to be corrected for ties between groups
# print(p)
print("------- Benchmarking the statistic value from scipy-------")
print(stats.mannwhitneyu(x=group_1, y=group_2, alternative=mode))
return u
man_whitney_u(group_1=distri_norm_1, group_2=distri_norm_2, mode="two-sided")
# ## Wilcoxon Signed Rank Test (yet to be added)
# # Three or more samples test :
# 1. Parametric :-
# - ANOVA (Only One-way is described here) + Tukeys HSD as post-hoc test
# 2. Non-Parametric :-
# - Kruskal Wallis + Dunn's as post-hoc test
# - Friedmann's + Dunn's as post-hoc test
#
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
# ## One-Way ANOVA (yet to be added)
# One-Way ANOVA
# ## Kruskal Wallis Omnibus Test (Un-Paired) - for 3 groups only
def rank_sum_fn_3(
a, b, c
): # UDF to calculate rank sums of 3 groups (an extensionof a similar function above)
print("Group 1 length :", len(a))
print("Group 2 length :", len(b))
print("Group 3 length :", len(c))
# ---------------------------------------------------------------------------------------------------------
# Creating a dataframe for ease of simultaneous operations
df = pd.DataFrame()
df["a"] = a
df = df.append(pd.DataFrame(b, columns=["b"]))
df = df.append(pd.DataFrame(c, columns=["c"]))
df["combined"] = np.where(
(df["a"].isnull() == True) & (df["c"].isnull() == True),
df["b"],
np.where(
(df["b"].isnull() == True) & (df["a"].isnull() == True),
df["c"],
np.where(
(df["b"].isnull() == True) & (df["c"].isnull() == True), df["a"], np.nan
),
),
)
df.sort_values(["combined"], ascending=True, inplace=True)
df["rank"] = df["combined"].rank(method="average", na_option="keep")
# ---------------------------------------------------------------------------------------------------------
# Extracting rank sums
rank_sum_a = df[df["a"].isnull() == False]["rank"].sum()
rank_sum_b = df[df["b"].isnull() == False]["rank"].sum()
rank_sum_c = df[df["c"].isnull() == False]["rank"].sum()
print("Group 1 rank sum :", rank_sum_a)
print("Group 2 rank sum :", rank_sum_b)
print("Group 3 rank sum :", rank_sum_c)
return rank_sum_a, rank_sum_b, rank_sum_c
def kruskal_wallis_omnibus(group_1, group_2, group_3, alpha=0.05):
n1 = len(group_1)
n2 = len(group_2)
n3 = len(group_3)
n = n1 + n2 + n3
r1, r2, r3 = rank_sum_fn_3(
group_1, group_2, group_3
) # Calculating the rank-sum of all the groups
cum_sum = ((r1**2) / n1) + ((r2**2) / n2) + ((r3**2) / n3) #
h_stat = 12 / (n * (n + 1)) * cum_sum - 3 * (n + 1)
h_critical = stats.chi2.ppf(alpha, 2) # dof for KW is no of groups - 1
p_val = 1.0 - stats.chi2.cdf(h_stat, 2)
print("#----------- Results from manual code ------------#")
if h_stat > h_critical:
print(
"Can reject H0, p-value:",
p_val,
"h_stat:",
h_stat,
"critical_h_stat:",
h_critical,
)
else:
print(
"Cannot reject H0, p-value:",
p_val,
"h_stat:",
h_stat,
"critical_h_stat:",
h_critical,
)
print("#----------- Benchmarking from scipy module --------------#")
stat, p = stats.kruskal(group_1, group_2, group_3)
print(p, stat)
# -------------------------------------------------------------------------------------------------------
# Invoking the above UDF
kruskal_wallis_omnibus(
group_1=distri_norm_1, group_2=distri_norm_2, group_3=distri_norm_3, alpha=0.05
)
# ### Finding : The test statistic & p-value from manual and scipy implementation are matching
# ## Friedman's Omnibus Test (Paired) - for 3 groups only
def friedmans_omnibus(group_1, group_2, group_3, alpha=0.05):
assert (
len(group_1) == len(group_2) == len(group_3)
), "The group lengths are dissimilar - Please check"
n = len(group_1)
df = pd.DataFrame()
df["a"] = list(group_1)
df["b"] = list(group_2)
df["c"] = list(group_3)
df = df.rank(axis=1, ascending=True).astype(int)
a_rank_sum = df["a"].sum()
b_rank_sum = df["b"].sum()
c_rank_sum = df["c"].sum()
fm_stat = (12 / (n * 3 * 4)) * (
a_rank_sum**2 + b_rank_sum**2 + c_rank_sum**2
) - (3 * n * 4)
fm_critical = stats.chi2.ppf(alpha, 2) # dof for friedman is no of groups - 1
p_val = 1.0 - stats.chi2.cdf(fm_stat, 2)
print("#----------- Results from manual code ------------#")
if fm_stat > fm_critical:
print(
"Can reject H0, p-value:",
p_val,
"h_stat:",
fm_stat,
"critical_h_stat:",
fm_critical,
)
else:
print(
"Cannot reject H0, p-value:",
p_val,
"h_stat:",
fm_stat,
"critical_h_stat:",
fm_critical,
)
print("#----------- Benchamrking from scipy module --------------#")
stat, p = stats.friedmanchisquare(group_1, group_2, group_3)
print(p, stat)
friedmans_omnibus(distri_norm_1, distri_norm_2, distri_norm_4, alpha=0.05)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090504.ipynb
| null | null |
[{"Id": 69090504, "ScriptId": 18592897, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4811800, "CreationDate": "07/26/2021 18:18:48", "VersionNumber": 3.0, "Title": "Major Hypothesis Tests From Scratch", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 553.0, "LinesInsertedFromPrevious": 25.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 528.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import random
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import warnings
warnings.filterwarnings("ignore")
# # Creating datapoints to test (Phase-1)
np.random.seed(100) # Seed for reproducibility
# Creating uniform distribution of 10k length
distri = np.random.rand(1, 10000)
# Creating Gaussian distribution of 10k length
distri_gaussian = np.random.normal(
loc=np.mean(distri), scale=np.std(distri), size=(1, 10000)
)
# Creating uniform distribution of 10k length of different mean & Std Dev
distri_gaussian_2 = np.random.normal(
loc=np.mean(distri) + 1, scale=np.std(distri) + 0.5, size=(1, 10000)
)
# ----------------------------------------------------------------------------------------------------
distri[0:10]
# # Visualizing the distributions
ax = sns.distplot(distri, kde=True, color="g")
ax = sns.distplot(distri_gaussian, kde=True, color="b")
ax = sns.distplot(distri_gaussian_2, kde=True, color="r")
# -----------------------------------------------------------------------------------------------------------
ax.set(
xlabel="Distribution",
ylabel="Probability density",
title="KDE Comparison of all three distributions",
)
# # One-Sample t-test :
# - Used to test the if a hypothesised mean value about a population can be accepted based on the sample from the population available
# - The parametric version uses students t-distribution to calculate the critical value and the p-value
# - H0 - The mean of the population is x
# - Ha - The mean of the population is not x (two-sided)
# ### Data Creation (Sample)
np.random.seed(100) # Reproducible results
distri_norm = np.random.normal(30, 5, 1000) # mean=30, std dev=5,n=1k
# --------------------------------------------------------------------
print(distri_norm[0:5])
sns.distplot(distri_norm)
# UDF for manual creation of one-tailed t-test along with scipy benchmarking
from scipy.stats import t # Importing t-table calculator
"""
H0 (Null Hypothesis) : The population mean is 34 i.e 34 is the accuracte estimate of mean of the population from which the sample is drawn
H1 (Alternative Hypothesis) : The population mean not equal 34 (Two-tailed)
"""
def one_tailed_ttest(distri_norm_gen, hypo_mean_gen, mode, alpha=0.05):
mean_sample = np.mean(distri_norm_gen)
print("Mean of sample :", mean_sample)
n = len(distri_norm_gen)
print("No of obs in sample :", n)
std_sample = np.std(
distri_norm_gen, ddof=1
) # ddof = no to subtract from n for degree of freedom, in this case n-1
print("Standard Dev of sample :", std_sample)
hypo_mean = hypo_mean_gen # The hypothesised mean of the population
print("Hypothesized mean of population :", hypo_mean)
# -----------------------------------------------------------------------------------------------------------------
# Calculating t-statistic for the test
t_stat = ((mean_sample - hypo_mean) / std_sample) * np.sqrt(n)
print("T-statistic :", t_stat)
# Conditional approach for two-tailed/one-tailed system
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, n - 1)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, n - 1)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat <= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, n - 1)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, n - 1)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat >= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), n - 1)
print("Critical t-values are:", -t_critical, "&", t_critical)
# confirm with cdf
if t_stat >= t_critical:
p_val = 2 * (
1 - t.cdf(t_stat, n - 1)
) # Twice since this time its on one-side,
# but the alt hypothesis is for both sides (greater & smaller)
elif t_stat <= -t_critical:
p_val = 2 * t.cdf(t_stat, n - 1) # Same explanation as above
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking against Scipy package of the same functionality :-------#"
)
print(stats.ttest_1samp(a=distri_norm_gen, popmean=hypo_mean_gen, alternative=mode))
# ---------------------------------------------------------------------------------------------------------------
# Invoking the UDF
one_tailed_ttest(
distri_norm_gen=distri_norm, hypo_mean_gen=34, mode="two-sided", alpha=0.05
)
# ### Findings : The test statistic as well p-value generated by both manual & scipy implementation are same and the hypothesis can indeed be rejected
# # Two-Sampled tests :
# 1. Unpaired two-sample tests :-
# - Parametric Test (t-test)
# - Non-Parametric Test (Wilcoxon Rank Sum test/Man Whitney U test)
# 2. Paired two-sample tests (Used in A/B testing) :-
# - Parametric Test (t-test)
# - Non-Parametric Test (Wilcoxon Signed Rank test)
#
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
# ### Generating datapoints of varied distributions to check the performance of the tests
np.random.seed(100) # Set seed for consistent results
# -----------------------------------------------------------------------------------------
distri_norm_1 = np.random.normal(20, 5, 1000) # mean=20, std dev=5,n=1000
distri_norm_2 = np.random.normal(18.8, 5, 1000) # mean=18.8, std dev=8,n=1000
distri_norm_3 = np.random.normal(20, 5, 700) # mean=20, std dev=5,n=700
distri_norm_4 = np.random.normal(26, 9, 1000) # mean=26, std dev=9,n=700
distri_norm_5 = np.random.normal(13, 9, 1500) # mean=13, std dev=9,n=1500
# -----------------------------------------------------------------------------------------
dict_ = {1: "r", 2: "b", 3: "g", 4: "y", 5: "b"}
# -----------------------------------------------------------------------------------------
counter = 1
for dist in [distri_norm_1, distri_norm_2, distri_norm_3, distri_norm_4, distri_norm_5]:
color_ = dict_[counter]
sns.distplot(dist, color=color_, hist=False)
counter += 1
# ## Two-Tailed Parametric tests (Students t-tests):
def two_sample_ttest(
group_1, group_2, mode, paired_status="unpaired", alpha=0.05
): # Paired Status can be either 'paired'/'unpaired'
if paired_status == "unpaired": # For independent observations
print("#------------- UN-PAIRED 2 sample t-test --------------#")
# Calculation of parameters from Group 1 ------------------------
g1_mean = np.mean(group_1)
print("Group 1 mean :", g1_mean)
g1_std = np.std(group_1, ddof=1)
print("Std Dev of group 1:", g1_std)
n_1 = len(group_1)
print("No of Obs in group 1:", n_1)
# Calculation of parameters from Group 2 ------------------------
g2_mean = np.mean(group_2)
print("Group 2 mean :", g2_mean)
g2_std = np.std(group_2, ddof=1)
print("Std Dev of group 2:", g2_std)
n_2 = len(group_2)
print("No of Obs in group 1:", n_2)
# ---------------------------------------------------------------
combined_dof = n_1 + n_2 - 2
print("Combined DoF:", combined_dof)
# Denominator for the t statistic to be calculated
denom_1 = np.sqrt(((g1_std**2) / n_1) + ((g2_std**2) / n_2))
t_stat = (g1_mean - g2_mean) / denom_1
print("t-statistic :", t_stat)
# --------------------------------------------------------------------------------------
# Conditional Statements for two-tailed or one-tailed. Generally two-tailed tests are used
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (
t_stat <= t_critical
): # One can alternatively apply > condition on p-value
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (
t_stat >= t_critical
): # One can alternatively apply > condition on p-value
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), combined_dof)
print("Critical t-values are:", -t_critical, "&", t_critical)
p_val = 2 * (1 - t.cdf(abs(t_stat), combined_dof))
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking results from Scipy package of the same functionality :-------#"
)
print(stats.ttest_ind(group_1, group_2, alternative=mode, equal_var=False))
elif paired_status == "paired":
print("#------------- PAIRED 2 sample t-test --------------#")
assert len(group_1) == len(
group_2
), "Length of distri not matching" # For paired tests, the lengths of groups must be same
g1_mean = np.mean(group_1)
print("Group 1 mean :", g1_mean)
g2_mean = np.mean(group_2)
print("Group 2 mean :", g2_mean)
n = len(group_1)
print("No of Obs in groups :", n)
d1 = np.sum(np.square(group_1 - group_2))
d2 = np.sum(group_1 - group_2)
s = np.sqrt((d1 - (d2**2 / n)) / (n - 1))
print("S value :", s)
combined_dof = n - 1
t_stat = (g1_mean - g2_mean) / np.sqrt((s**2) / n)
print("t-statistic :", t_stat)
# --------------------------------------------------------------------------------------
# Conditional statement for two-tailed or on-tailed
if mode == "less":
# Calculating critical t-value
t_critical = t.ppf(alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat <= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "greater":
# Calculating critical t-value
t_critical = t.ppf(1 - alpha, combined_dof)
print("Critical t-value:", t_critical)
# confirm with cdf
p_val = 1 - t.cdf(t_stat, combined_dof)
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if t_stat >= t_critical:
print("H0 Rejected")
else:
print("Cannot reject H0")
elif mode == "two-sided":
# Calculating critical t-value
t_critical = t.ppf(1 - (alpha / 2), combined_dof)
print("Critical t-values are:", -t_critical, "&", t_critical)
p_val = (1 - t.cdf(abs(t_stat), combined_dof)) * 2.0
print("p-value associated with t_statistic :", p_val)
print(
"#------------------------ Status by manual code -----------------------------#"
)
if (t_stat <= -t_critical) | (t_stat >= t_critical):
print("H0 Rejected")
else:
print("Cannot reject H0")
print(
"#----------- Benchmarking results from Scipy package of the same functionality :-------#"
)
print(stats.ttest_rel(group_1, group_2, alternative=mode))
# ----------------------------------------------------------------------------------------------------------
# Invoking the above UDF for parametric two-tailed tests
two_sample_ttest(
group_1=distri_norm_1,
group_2=distri_norm_4,
mode="two-sided",
paired_status="paired",
alpha=0.05,
)
# ### Findings - The t-statistic and p-values from both manual and scipy implementations are matching (for p-value, the difference is very low)
# ## Two-Tailed Non-Parametric Tests :
# 1. Man-Whitney U test (unpaired)
# 2. Wilcoxon Signed Rank test (Paired)
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
def rank_sum_fn(a, b): # To execute the rank sum process used in Man-Whitney Test
print("Group 1 length :", len(a))
print("Group 2 length :", len(b))
# ---------------------------------------------------------------------------------------------------------
df = pd.DataFrame()
df["a"] = a
df = df.append(pd.DataFrame(b, columns=["b"]))
df["combined"] = np.where(
df["a"].isnull() == True,
df["b"],
np.where(df["b"].isnull() == True, df["a"], np.nan),
)
df.sort_values(["combined"], ascending=True, inplace=True)
df["rank"] = df["combined"].rank(method="average", na_option="keep")
# ---------------------------------------------------------------------------------------------------------
rank_sum_a = df[df["a"].isnull() == False]["rank"].sum()
rank_sum_b = df[df["b"].isnull() == False]["rank"].sum()
print("Group 1 rank sum :", rank_sum_a)
print("Group 2 rank sum :", rank_sum_b)
return rank_sum_a, rank_sum_b
def man_whitney_u(
group_1, group_2, mode
): # To execute till test-statistic calculation only
n1 = len(group_1)
n2 = len(group_2)
r1, r2 = rank_sum_fn(group_1, group_2) # Utilising the rank sum UDF defined above
u1 = r1 - n1 * (n1 + 1) / 2 # method 2, step 3
u2 = n1 * n2 - u1
u = max(u1, u2)
print("U statistic :", u)
p = stats.norm.sf(u) # Needs to be corrected for ties between groups
# print(p)
print("------- Benchmarking the statistic value from scipy-------")
print(stats.mannwhitneyu(x=group_1, y=group_2, alternative=mode))
return u
man_whitney_u(group_1=distri_norm_1, group_2=distri_norm_2, mode="two-sided")
# ## Wilcoxon Signed Rank Test (yet to be added)
# # Three or more samples test :
# 1. Parametric :-
# - ANOVA (Only One-way is described here) + Tukeys HSD as post-hoc test
# 2. Non-Parametric :-
# - Kruskal Wallis + Dunn's as post-hoc test
# - Friedmann's + Dunn's as post-hoc test
#
# - H0 - The distributions are similar (drawn from same distribution)
# - Ha - The distributions are not similar (drawn from different ditributions)
# ## One-Way ANOVA (yet to be added)
# One-Way ANOVA
# ## Kruskal Wallis Omnibus Test (Un-Paired) - for 3 groups only
def rank_sum_fn_3(
a, b, c
): # UDF to calculate rank sums of 3 groups (an extensionof a similar function above)
print("Group 1 length :", len(a))
print("Group 2 length :", len(b))
print("Group 3 length :", len(c))
# ---------------------------------------------------------------------------------------------------------
# Creating a dataframe for ease of simultaneous operations
df = pd.DataFrame()
df["a"] = a
df = df.append(pd.DataFrame(b, columns=["b"]))
df = df.append(pd.DataFrame(c, columns=["c"]))
df["combined"] = np.where(
(df["a"].isnull() == True) & (df["c"].isnull() == True),
df["b"],
np.where(
(df["b"].isnull() == True) & (df["a"].isnull() == True),
df["c"],
np.where(
(df["b"].isnull() == True) & (df["c"].isnull() == True), df["a"], np.nan
),
),
)
df.sort_values(["combined"], ascending=True, inplace=True)
df["rank"] = df["combined"].rank(method="average", na_option="keep")
# ---------------------------------------------------------------------------------------------------------
# Extracting rank sums
rank_sum_a = df[df["a"].isnull() == False]["rank"].sum()
rank_sum_b = df[df["b"].isnull() == False]["rank"].sum()
rank_sum_c = df[df["c"].isnull() == False]["rank"].sum()
print("Group 1 rank sum :", rank_sum_a)
print("Group 2 rank sum :", rank_sum_b)
print("Group 3 rank sum :", rank_sum_c)
return rank_sum_a, rank_sum_b, rank_sum_c
def kruskal_wallis_omnibus(group_1, group_2, group_3, alpha=0.05):
n1 = len(group_1)
n2 = len(group_2)
n3 = len(group_3)
n = n1 + n2 + n3
r1, r2, r3 = rank_sum_fn_3(
group_1, group_2, group_3
) # Calculating the rank-sum of all the groups
cum_sum = ((r1**2) / n1) + ((r2**2) / n2) + ((r3**2) / n3) #
h_stat = 12 / (n * (n + 1)) * cum_sum - 3 * (n + 1)
h_critical = stats.chi2.ppf(alpha, 2) # dof for KW is no of groups - 1
p_val = 1.0 - stats.chi2.cdf(h_stat, 2)
print("#----------- Results from manual code ------------#")
if h_stat > h_critical:
print(
"Can reject H0, p-value:",
p_val,
"h_stat:",
h_stat,
"critical_h_stat:",
h_critical,
)
else:
print(
"Cannot reject H0, p-value:",
p_val,
"h_stat:",
h_stat,
"critical_h_stat:",
h_critical,
)
print("#----------- Benchmarking from scipy module --------------#")
stat, p = stats.kruskal(group_1, group_2, group_3)
print(p, stat)
# -------------------------------------------------------------------------------------------------------
# Invoking the above UDF
kruskal_wallis_omnibus(
group_1=distri_norm_1, group_2=distri_norm_2, group_3=distri_norm_3, alpha=0.05
)
# ### Finding : The test statistic & p-value from manual and scipy implementation are matching
# ## Friedman's Omnibus Test (Paired) - for 3 groups only
def friedmans_omnibus(group_1, group_2, group_3, alpha=0.05):
assert (
len(group_1) == len(group_2) == len(group_3)
), "The group lengths are dissimilar - Please check"
n = len(group_1)
df = pd.DataFrame()
df["a"] = list(group_1)
df["b"] = list(group_2)
df["c"] = list(group_3)
df = df.rank(axis=1, ascending=True).astype(int)
a_rank_sum = df["a"].sum()
b_rank_sum = df["b"].sum()
c_rank_sum = df["c"].sum()
fm_stat = (12 / (n * 3 * 4)) * (
a_rank_sum**2 + b_rank_sum**2 + c_rank_sum**2
) - (3 * n * 4)
fm_critical = stats.chi2.ppf(alpha, 2) # dof for friedman is no of groups - 1
p_val = 1.0 - stats.chi2.cdf(fm_stat, 2)
print("#----------- Results from manual code ------------#")
if fm_stat > fm_critical:
print(
"Can reject H0, p-value:",
p_val,
"h_stat:",
fm_stat,
"critical_h_stat:",
fm_critical,
)
else:
print(
"Cannot reject H0, p-value:",
p_val,
"h_stat:",
fm_stat,
"critical_h_stat:",
fm_critical,
)
print("#----------- Benchamrking from scipy module --------------#")
stat, p = stats.friedmanchisquare(group_1, group_2, group_3)
print(p, stat)
friedmans_omnibus(distri_norm_1, distri_norm_2, distri_norm_4, alpha=0.05)
| false | 0 | 6,078 | 0 | 6,078 | 6,078 |
||
69090546
|
<jupyter_start><jupyter_text>Heart Attack Analysis & Prediction Dataset
## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below.
[Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor)
[Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba)
[Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach)
[Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray)
[Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross)
[Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019)
[17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017)
## About this dataset
- Age : Age of the patient
- Sex : Sex of the patient
- exang: exercise induced angina (1 = yes; 0 = no)
- ca: number of major vessels (0-3)
- cp : Chest Pain type chest pain type
- Value 1: typical angina
- Value 2: atypical angina
- Value 3: non-anginal pain
- Value 4: asymptomatic
- trtbps : resting blood pressure (in mm Hg)
- chol : cholestoral in mg/dl fetched via BMI sensor
- fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
- rest_ecg : resting electrocardiographic results
- Value 0: normal
- Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
- Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
- thalach : maximum heart rate achieved
- target : 0= less chance of heart attack 1= more chance of heart attack
n
Kaggle dataset identifier: heart-attack-analysis-prediction-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('heart-attack-analysis-prediction-dataset/heart.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 303 entries, 0 to 302
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 303 non-null int64
1 sex 303 non-null int64
2 cp 303 non-null int64
3 trtbps 303 non-null int64
4 chol 303 non-null int64
5 fbs 303 non-null int64
6 restecg 303 non-null int64
7 thalachh 303 non-null int64
8 exng 303 non-null int64
9 oldpeak 303 non-null float64
10 slp 303 non-null int64
11 caa 303 non-null int64
12 thall 303 non-null int64
13 output 303 non-null int64
dtypes: float64(1), int64(13)
memory usage: 33.3 KB
<jupyter_text>Examples:
{
"age": 63.0,
"sex": 1.0,
"cp": 3.0,
"trtbps": 145.0,
"chol": 233.0,
"fbs": 1.0,
"restecg": 0.0,
"thalachh": 150.0,
"exng": 0.0,
"oldpeak": 2.3,
"slp": 0.0,
"caa": 0.0,
"thall": 1.0,
"output": 1.0
}
{
"age": 37.0,
"sex": 1.0,
"cp": 2.0,
"trtbps": 130.0,
"chol": 250.0,
"fbs": 0.0,
"restecg": 1.0,
"thalachh": 187.0,
"exng": 0.0,
"oldpeak": 3.5,
"slp": 0.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
{
"age": 41.0,
"sex": 0.0,
"cp": 1.0,
"trtbps": 130.0,
"chol": 204.0,
"fbs": 0.0,
"restecg": 0.0,
"thalachh": 172.0,
"exng": 0.0,
"oldpeak": 1.4,
"slp": 2.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
{
"age": 56.0,
"sex": 1.0,
"cp": 1.0,
"trtbps": 120.0,
"chol": 236.0,
"fbs": 0.0,
"restecg": 1.0,
"thalachh": 178.0,
"exng": 0.0,
"oldpeak": 0.8,
"slp": 2.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
<jupyter_script>#
# EDA ON CHANCES OF ❤️ ATTACK
# A heart attack occurs when an artery supplying your heart with blood and oxygen becomes blocked. Fatty deposits build up over time, forming plaques in your heart's arteries. If a plaque ruptures, a blood clot can form and block your arteries, causing a heart attack..
# TABLE OF CONTENT
# 0 PROLOGUE
# 1 IMPORTING LIBRARIES
# 2 DATA DESCRIPTION AND DATA CLEANING
# 2.1 Import Data
# 2.2 Data types
# 2.3 Missing values
#
# 2.4 Duplicates
# 3 ANALYSIS
#
# 3.1 Uni-Vriate Analysis:
#
# 3.2 Bi-Vriate Analysis:
#
# 3.3 Multi-Vriate Analysis:
# 4 FINAL CONCLUSIONS
# 5 MODELLING
# 0 PROLOGUE
# In this work, exploratory data analysis has been carried out for what are the reasons which can effect the heart attack
# FEATURES:
# AGE - AGE OF THE PATIENT
#
# SEX - SEX OF THE PATIENT , (1:MALE , 0: FEMALE)
# EXANG - EXERCISE INCLUDE ANGIA (1=YES, 0=NO)
#
# CA - NUMBER OF MAJOR VESSELS (0-3)
#
# CP - CHEST PAIN TYPE (Value 1: typical angina,
# Value2: atypical angina,
# Value 3: non-anginal pain,
# Value 4: asymptomatic
#
# TRTBPS - RESTING BLOOD PRESSURE IN (MM|HG)
#
# CHOL - CHOLESTROL IN (MG|DL) FETCHED VIA BMI SENSOR
#
# FBS - (FASTING BLOOD SUGAR > 120 MG/DL) (1=TRUE, 0=FALSE)
#
# REST-ECG -(RESTING ELECTROCARDIOGRAPHIC RESULTS) Value 0: normal,
# Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV),
# Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
#
# THALACH -MAXIMUM HEAERT RATE ACHIEVED
# TARGET -0=LESS CHANCE OF HEART ATTACK, 1= MORE CHANCE OF HEART ATTACK
#
# ANSWER TO THE FOLLOWING QUESTIONS ARE GIVEN:
# Increasing in age have any effect towards heart attack.
# Does increase in cholestrol level in body have any effect towards the heart attack
# Increase in blood pressure have any relation with heart attack.
# If you liked this notebook, please upvote.
# 😊😊😊
# # 1 IMPORTING LIBRARIES
# LIBRARIES:
# Library pandas will be required to work with data in tabular representation.
# Library numpy will be required to round the data in the correlation matrix.
# Library missingno will be required to visualize missing values in the data.
#
# Library matplotlib, seaborn, plotly required for data visualization.
#
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.express as px
import missingno
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.figure_factory as ff
#
# # 2 DATA DESCRIPTION AND DATA CLEANING
# In this block, cleaning part will be carried out, data types, missing values, duplicates.
# 2.1 Import Data
# Reading Data:
df = pd.read_csv("/kaggle/input/heart-attack-analysis-prediction-dataset/heart.csv")
df.head() # Loading the First Five Rows:
# Let's Look The Dimensions Of The Data:
print(f"The Data-Set Contain {df.shape[0]} Rows and {df.shape[1]} Columns")
#
# 2.2 Data Types
# Check Data Types
df.dtypes
# Data contains 13 Numerical columns and 1 Float Column.
# 2.3 Missing values
# Let's calculate the percentage of blanks and filled values for all columns.
# loop through the columns and check the missing values
for col in df.columns:
pct_missing = df[col].isnull().sum()
print(f"{col} - {pct_missing :.1%}")
# Build a matrix of missing values
missingno.matrix(df, fontsize=16)
plt.show()
# CONCLUSION: The data has no missing values, so no further transformations are required.
# 2.4 Duplicates
# Check The Duplicates In the Data-Set:
df.duplicated().sum()
# There is 1 Duplicate Value Present in the Data-set.
# We will drop the Duplicate value:
df = df.drop_duplicates(keep="first")
# CONCLUSION:Now our Data is Clean We can do Further Analysis.
# 3. Analysis:
# 3.1 Uni-variate Analysis:
plt.figure(figsize=(20, 10))
sns.countplot(x=df["age"])
plt.title("COUNT OF PATIENTS AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["age"])
plt.title("DISTRIBUTION OF AGE", fontsize=20)
plt.show()
s = df["sex"].value_counts().reset_index()
px.pie(s, names="index", values="sex", title="%AGE OF MALE AND FEMALE PATIENTS:")
c = df["cp"].value_counts().reset_index()
plt.figure(figsize=(20, 10))
sns.barplot(x=c["index"], y=c["cp"])
plt.title("TYPE OF CHEST PAIN WITH NUMBER OF PATIENTS", fontsize=20)
plt.xlabel("TYPE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["trtbps"])
plt.title("DISTRIBUTION OF BLOOD PRESSURE AROUND PATIENTS", fontsize=20)
plt.xlabel("BLOOD PRESSURE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["chol"])
plt.title("DISTRIBUTION OF CHOLESTROL LEVEL AROUND PATIENTS", fontsize=20)
plt.xlabel("CHOLESTROL LEVEL", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["thalachh"])
plt.title("DISTRIBUTION OF HEART RATE AROUND PATIENTS", fontsize=20)
plt.xlabel("HEART RATE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
#
# INSIGHT : Uni-Variate Analysis Of The Data:
# Most of the patients have age (50-60).In which maximum number of Patients have age 56.
# Around 68.2% (207) are Male patients and 31.8% (96) are Female patients .
# Most of the patients have type Value 1 Chest Pain that is typical angina Value.
# Most of the patients Blood Pressure lies between (130-140).
# Most of the patients Chlostrol level lies between (200-250).
# Most of the patients Heart Rate lies between (155-165).
#
# 3.2 Bi-Variate Analysis:
# ### 01. AGE
plt.figure(figsize=(10, 6))
sns.histplot(data=df, x="age", hue="output")
plt.title("DOES AGE EFFECT THE HEART-ATTACK")
plt.show()
v = pd.crosstab(df["age"], df["output"]).reset_index()
v.columns = ["age", "low_risk", "high_risk"]
px.line(v, v["age"], v["high_risk"], title="RISK OF HIGH HEART-ATTACK WITH AGE")
px.line(v, v["age"], v["low_risk"], title="RISK OF LOW HEART-ATTACK WITH AGE")
plt.figure(figsize=(20, 10))
sns.lineplot(y="trtbps", x="age", data=df)
plt.title("BLOOD PRESSURE WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("BLOOD PRESSURE", fontsize=20)
plt.show()
plt.figure(figsize=(20, 10))
sns.lineplot(y="chol", x="age", data=df)
plt.title("CHOLESTROL LEVEL WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("CHOLESTROL LEVEL", fontsize=20)
plt.show()
# #### THERE IS HIGH CHANCES OF INCREASE IN CHOLESTROL LEVEL IN THE BODY WITH INCREASE IN THE AGE:
plt.figure(figsize=(20, 10))
sns.lineplot(y="thalachh", x="age", data=df)
plt.title("HEART RATE WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("HEART RATE", fontsize=20)
plt.show()
# #### AS WE CAN SEE WITH INCREASE IN THE AGE OF THE PATIENTS THE HEART RATE IS GETTING LOWER:
# INSIGHT : Bi-Variate Analysis Of The Data:
# There is no strong Relationship with age and heart attack.So we can't say with Increasing the Age There is high Chance of Heart attack or Low Chance of Heart Attack.
# There is high chance of Increase in Blood Pressure in the body With Increase in Age .
# There is high chance of Increase in Cholestrol Level in the body with increase in Age.
# There is high chance of Increase in Heart Rate in the body with increase in Age
#
# 3.3 Multi-Variate Analysis:
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="chol", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND CHOLESTROL")
plt.show()
# ### Q1: DOES INCREASE IN blood pressure IN BODY HAVE ANY EFFECT TOWARDS THE HEART ATTACK:
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="trtbps", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND BLOOD PRESSURE")
plt.show()
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="thalachh", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND MAXIMUM HEART RATE")
plt.show()
plt.figure(figsize=(20, 6))
sns.heatmap(df.corr(), annot=True, cmap="PuBuGn")
#
# INSIGHT : Multi-Variate Analysis Of The Data:
# With increasing in age Cholestrol level is also increasing but With increasing Cholestrol Level in the body it doesn't show the good Relation.So we can't say that it really effect the Heart Attack
# Increasing in the Blood Pressure have high Risk of Heart Attack .
# Person with High Heart Rate Have High Risk of Heart Attack .
#
# # 4 FINAL CONCLUSIONS
# With increasing the age there is no strong relationship of Heart-Attack .
# Increasing in heart rate will have high Risk of heart attack:
# Increasing in Blood Pressure will have high Risk of heart attack:
# Increasing in Cholestrol Level will have high Risk of heart attack:
# 5. Modelling:
# ## Data preprocessing:
# ## As we can see none of the Feature is highly Correlated to Target Variable:
# ### So we Take all the Features and do Modelling:
# ## >>.Before that let's handle outliers:
# Using log transformation
df["age"] = np.log(df.age)
df["trtbps"] = np.log(df.trtbps)
df["chol"] = np.log(df.chol)
df["thalachh"] = np.log(df.thalachh)
print("---Log Transform performed---")
X = df.drop("output", axis=1)
y = df["output"]
# ## 01. KNN
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
scores = []
for i in range(1, 50):
model = KNeighborsClassifier(n_neighbors=i)
model.fit(X_train, y_train)
scores.append(accuracy_score(y_test, model.predict(X_test)))
plt.figure(figsize=(15, 6))
sns.lineplot(np.arange(1, 50), scores)
plt.show()
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X_train, y_train)
knnpred = knn.predict(X_test)
accuracy_score(y_test, knnpred)
from sklearn import metrics
sns.heatmap(metrics.confusion_matrix(y_test, knnpred), annot=True)
# ## 02.Logistic Regression:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=10)
model.fit(X_train, y_train)
y_test_pred = model.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score = accuracy_score(y_test, y_test_pred)
accuracy_score
# ## 03. Decison Tree
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
dt = model.predict(X_test)
metrics.accuracy_score(y_test, dt)
sns.heatmap(metrics.confusion_matrix(y_test, dt), annot=True)
# ## 04.Random Forest
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, y_train)
rf = model.predict(X_test)
metrics.accuracy_score(y_test, rf)
# ## 05.Svm
from sklearn.svm import SVC
model = SVC(kernel="rbf")
model.fit(X_train, y_train)
sv = model.predict(X_test)
metrics.accuracy_score(y_test, sv)
# ## 06.Ada Boost
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(learning_rate=0.15, n_estimators=25)
model.fit(X_train, y_train)
ab = model.predict(X_test)
metrics.accuracy_score(y_test, ab)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090546.ipynb
|
heart-attack-analysis-prediction-dataset
|
rashikrahmanpritom
|
[{"Id": 69090546, "ScriptId": 16629853, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6990402, "CreationDate": "07/26/2021 18:19:39", "VersionNumber": 12.0, "Title": "CHANCES OF \ud83d\udc96 ATTACK :", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 494.0, "LinesInsertedFromPrevious": 95.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 399.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91862846, "KernelVersionId": 69090546, "SourceDatasetVersionId": 2047221}]
|
[{"Id": 2047221, "DatasetId": 1226038, "DatasourceVersionId": 2087216, "CreatorUserId": 4730101, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2021 11:40:59", "VersionNumber": 2.0, "Title": "Heart Attack Analysis & Prediction Dataset", "Slug": "heart-attack-analysis-prediction-dataset", "Subtitle": "A dataset for heart attack classification", "Description": "## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below.\n\n\n[Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor)\n\n[Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba)\n\n[Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach)\n\n[Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray)\n\n[Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross)\n\n[Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019)\n\n[17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017)\n\n## About this dataset\n\n- Age : Age of the patient\n\n- Sex : Sex of the patient\n\n- exang: exercise induced angina (1 = yes; 0 = no)\n\n- ca: number of major vessels (0-3)\n\n- cp : Chest Pain type chest pain type\n - Value 1: typical angina\n - Value 2: atypical angina\n - Value 3: non-anginal pain\n - Value 4: asymptomatic\n \n- trtbps : resting blood pressure (in mm Hg)\n- chol : cholestoral in mg/dl fetched via BMI sensor\n- fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)\n- rest_ecg : resting electrocardiographic results\n - Value 0: normal\n - Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)\n - Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria\n \n- thalach : maximum heart rate achieved\n- target : 0= less chance of heart attack 1= more chance of heart attack\n\nn", "VersionNotes": "heart csv update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1226038, "CreatorUserId": 4730101, "OwnerUserId": 4730101.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2047221.0, "CurrentDatasourceVersionId": 2087216.0, "ForumId": 1244179, "Type": 2, "CreationDate": "03/22/2021 08:19:12", "LastActivityDate": "03/22/2021", "TotalViews": 870835, "TotalDownloads": 138216, "TotalVotes": 3197, "TotalKernels": 1050}]
|
[{"Id": 4730101, "UserName": "rashikrahmanpritom", "DisplayName": "Rashik Rahman", "RegisterDate": "03/24/2020", "PerformanceTier": 3}]
|
#
# EDA ON CHANCES OF ❤️ ATTACK
# A heart attack occurs when an artery supplying your heart with blood and oxygen becomes blocked. Fatty deposits build up over time, forming plaques in your heart's arteries. If a plaque ruptures, a blood clot can form and block your arteries, causing a heart attack..
# TABLE OF CONTENT
# 0 PROLOGUE
# 1 IMPORTING LIBRARIES
# 2 DATA DESCRIPTION AND DATA CLEANING
# 2.1 Import Data
# 2.2 Data types
# 2.3 Missing values
#
# 2.4 Duplicates
# 3 ANALYSIS
#
# 3.1 Uni-Vriate Analysis:
#
# 3.2 Bi-Vriate Analysis:
#
# 3.3 Multi-Vriate Analysis:
# 4 FINAL CONCLUSIONS
# 5 MODELLING
# 0 PROLOGUE
# In this work, exploratory data analysis has been carried out for what are the reasons which can effect the heart attack
# FEATURES:
# AGE - AGE OF THE PATIENT
#
# SEX - SEX OF THE PATIENT , (1:MALE , 0: FEMALE)
# EXANG - EXERCISE INCLUDE ANGIA (1=YES, 0=NO)
#
# CA - NUMBER OF MAJOR VESSELS (0-3)
#
# CP - CHEST PAIN TYPE (Value 1: typical angina,
# Value2: atypical angina,
# Value 3: non-anginal pain,
# Value 4: asymptomatic
#
# TRTBPS - RESTING BLOOD PRESSURE IN (MM|HG)
#
# CHOL - CHOLESTROL IN (MG|DL) FETCHED VIA BMI SENSOR
#
# FBS - (FASTING BLOOD SUGAR > 120 MG/DL) (1=TRUE, 0=FALSE)
#
# REST-ECG -(RESTING ELECTROCARDIOGRAPHIC RESULTS) Value 0: normal,
# Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV),
# Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
#
# THALACH -MAXIMUM HEAERT RATE ACHIEVED
# TARGET -0=LESS CHANCE OF HEART ATTACK, 1= MORE CHANCE OF HEART ATTACK
#
# ANSWER TO THE FOLLOWING QUESTIONS ARE GIVEN:
# Increasing in age have any effect towards heart attack.
# Does increase in cholestrol level in body have any effect towards the heart attack
# Increase in blood pressure have any relation with heart attack.
# If you liked this notebook, please upvote.
# 😊😊😊
# # 1 IMPORTING LIBRARIES
# LIBRARIES:
# Library pandas will be required to work with data in tabular representation.
# Library numpy will be required to round the data in the correlation matrix.
# Library missingno will be required to visualize missing values in the data.
#
# Library matplotlib, seaborn, plotly required for data visualization.
#
import pandas as pd
import numpy as np
import seaborn as sns
import plotly.express as px
import missingno
import matplotlib.pyplot as plt
from plotly.subplots import make_subplots
import plotly.graph_objects as go
import plotly.figure_factory as ff
#
# # 2 DATA DESCRIPTION AND DATA CLEANING
# In this block, cleaning part will be carried out, data types, missing values, duplicates.
# 2.1 Import Data
# Reading Data:
df = pd.read_csv("/kaggle/input/heart-attack-analysis-prediction-dataset/heart.csv")
df.head() # Loading the First Five Rows:
# Let's Look The Dimensions Of The Data:
print(f"The Data-Set Contain {df.shape[0]} Rows and {df.shape[1]} Columns")
#
# 2.2 Data Types
# Check Data Types
df.dtypes
# Data contains 13 Numerical columns and 1 Float Column.
# 2.3 Missing values
# Let's calculate the percentage of blanks and filled values for all columns.
# loop through the columns and check the missing values
for col in df.columns:
pct_missing = df[col].isnull().sum()
print(f"{col} - {pct_missing :.1%}")
# Build a matrix of missing values
missingno.matrix(df, fontsize=16)
plt.show()
# CONCLUSION: The data has no missing values, so no further transformations are required.
# 2.4 Duplicates
# Check The Duplicates In the Data-Set:
df.duplicated().sum()
# There is 1 Duplicate Value Present in the Data-set.
# We will drop the Duplicate value:
df = df.drop_duplicates(keep="first")
# CONCLUSION:Now our Data is Clean We can do Further Analysis.
# 3. Analysis:
# 3.1 Uni-variate Analysis:
plt.figure(figsize=(20, 10))
sns.countplot(x=df["age"])
plt.title("COUNT OF PATIENTS AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["age"])
plt.title("DISTRIBUTION OF AGE", fontsize=20)
plt.show()
s = df["sex"].value_counts().reset_index()
px.pie(s, names="index", values="sex", title="%AGE OF MALE AND FEMALE PATIENTS:")
c = df["cp"].value_counts().reset_index()
plt.figure(figsize=(20, 10))
sns.barplot(x=c["index"], y=c["cp"])
plt.title("TYPE OF CHEST PAIN WITH NUMBER OF PATIENTS", fontsize=20)
plt.xlabel("TYPE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["trtbps"])
plt.title("DISTRIBUTION OF BLOOD PRESSURE AROUND PATIENTS", fontsize=20)
plt.xlabel("BLOOD PRESSURE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["chol"])
plt.title("DISTRIBUTION OF CHOLESTROL LEVEL AROUND PATIENTS", fontsize=20)
plt.xlabel("CHOLESTROL LEVEL", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
sns.displot(df["thalachh"])
plt.title("DISTRIBUTION OF HEART RATE AROUND PATIENTS", fontsize=20)
plt.xlabel("HEART RATE", fontsize=20)
plt.ylabel("COUNT", fontsize=20)
plt.show()
#
# INSIGHT : Uni-Variate Analysis Of The Data:
# Most of the patients have age (50-60).In which maximum number of Patients have age 56.
# Around 68.2% (207) are Male patients and 31.8% (96) are Female patients .
# Most of the patients have type Value 1 Chest Pain that is typical angina Value.
# Most of the patients Blood Pressure lies between (130-140).
# Most of the patients Chlostrol level lies between (200-250).
# Most of the patients Heart Rate lies between (155-165).
#
# 3.2 Bi-Variate Analysis:
# ### 01. AGE
plt.figure(figsize=(10, 6))
sns.histplot(data=df, x="age", hue="output")
plt.title("DOES AGE EFFECT THE HEART-ATTACK")
plt.show()
v = pd.crosstab(df["age"], df["output"]).reset_index()
v.columns = ["age", "low_risk", "high_risk"]
px.line(v, v["age"], v["high_risk"], title="RISK OF HIGH HEART-ATTACK WITH AGE")
px.line(v, v["age"], v["low_risk"], title="RISK OF LOW HEART-ATTACK WITH AGE")
plt.figure(figsize=(20, 10))
sns.lineplot(y="trtbps", x="age", data=df)
plt.title("BLOOD PRESSURE WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("BLOOD PRESSURE", fontsize=20)
plt.show()
plt.figure(figsize=(20, 10))
sns.lineplot(y="chol", x="age", data=df)
plt.title("CHOLESTROL LEVEL WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("CHOLESTROL LEVEL", fontsize=20)
plt.show()
# #### THERE IS HIGH CHANCES OF INCREASE IN CHOLESTROL LEVEL IN THE BODY WITH INCREASE IN THE AGE:
plt.figure(figsize=(20, 10))
sns.lineplot(y="thalachh", x="age", data=df)
plt.title("HEART RATE WITH AGE", fontsize=20)
plt.xlabel("AGE", fontsize=20)
plt.ylabel("HEART RATE", fontsize=20)
plt.show()
# #### AS WE CAN SEE WITH INCREASE IN THE AGE OF THE PATIENTS THE HEART RATE IS GETTING LOWER:
# INSIGHT : Bi-Variate Analysis Of The Data:
# There is no strong Relationship with age and heart attack.So we can't say with Increasing the Age There is high Chance of Heart attack or Low Chance of Heart Attack.
# There is high chance of Increase in Blood Pressure in the body With Increase in Age .
# There is high chance of Increase in Cholestrol Level in the body with increase in Age.
# There is high chance of Increase in Heart Rate in the body with increase in Age
#
# 3.3 Multi-Variate Analysis:
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="chol", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND CHOLESTROL")
plt.show()
# ### Q1: DOES INCREASE IN blood pressure IN BODY HAVE ANY EFFECT TOWARDS THE HEART ATTACK:
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="trtbps", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND BLOOD PRESSURE")
plt.show()
plt.figure(figsize=(10, 6))
sns.lineplot(x="age", y="thalachh", hue="output", data=df)
plt.title("EFFECT OF HEART ATTACK WITH INCREASE IN AGE AND MAXIMUM HEART RATE")
plt.show()
plt.figure(figsize=(20, 6))
sns.heatmap(df.corr(), annot=True, cmap="PuBuGn")
#
# INSIGHT : Multi-Variate Analysis Of The Data:
# With increasing in age Cholestrol level is also increasing but With increasing Cholestrol Level in the body it doesn't show the good Relation.So we can't say that it really effect the Heart Attack
# Increasing in the Blood Pressure have high Risk of Heart Attack .
# Person with High Heart Rate Have High Risk of Heart Attack .
#
# # 4 FINAL CONCLUSIONS
# With increasing the age there is no strong relationship of Heart-Attack .
# Increasing in heart rate will have high Risk of heart attack:
# Increasing in Blood Pressure will have high Risk of heart attack:
# Increasing in Cholestrol Level will have high Risk of heart attack:
# 5. Modelling:
# ## Data preprocessing:
# ## As we can see none of the Feature is highly Correlated to Target Variable:
# ### So we Take all the Features and do Modelling:
# ## >>.Before that let's handle outliers:
# Using log transformation
df["age"] = np.log(df.age)
df["trtbps"] = np.log(df.trtbps)
df["chol"] = np.log(df.chol)
df["thalachh"] = np.log(df.thalachh)
print("---Log Transform performed---")
X = df.drop("output", axis=1)
y = df["output"]
# ## 01. KNN
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=5)
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
from sklearn.metrics import accuracy_score
from sklearn.neighbors import KNeighborsClassifier
scores = []
for i in range(1, 50):
model = KNeighborsClassifier(n_neighbors=i)
model.fit(X_train, y_train)
scores.append(accuracy_score(y_test, model.predict(X_test)))
plt.figure(figsize=(15, 6))
sns.lineplot(np.arange(1, 50), scores)
plt.show()
knn = KNeighborsClassifier(n_neighbors=7)
knn.fit(X_train, y_train)
knnpred = knn.predict(X_test)
accuracy_score(y_test, knnpred)
from sklearn import metrics
sns.heatmap(metrics.confusion_matrix(y_test, knnpred), annot=True)
# ## 02.Logistic Regression:
from sklearn.linear_model import LogisticRegression
model = LogisticRegression(random_state=10)
model.fit(X_train, y_train)
y_test_pred = model.predict(X_test)
from sklearn.metrics import accuracy_score
accuracy_score = accuracy_score(y_test, y_test_pred)
accuracy_score
# ## 03. Decison Tree
from sklearn.tree import DecisionTreeClassifier
model = DecisionTreeClassifier()
model.fit(X_train, y_train)
dt = model.predict(X_test)
metrics.accuracy_score(y_test, dt)
sns.heatmap(metrics.confusion_matrix(y_test, dt), annot=True)
# ## 04.Random Forest
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, y_train)
rf = model.predict(X_test)
metrics.accuracy_score(y_test, rf)
# ## 05.Svm
from sklearn.svm import SVC
model = SVC(kernel="rbf")
model.fit(X_train, y_train)
sv = model.predict(X_test)
metrics.accuracy_score(y_test, sv)
# ## 06.Ada Boost
from sklearn.ensemble import AdaBoostClassifier
model = AdaBoostClassifier(learning_rate=0.15, n_estimators=25)
model.fit(X_train, y_train)
ab = model.predict(X_test)
metrics.accuracy_score(y_test, ab)
|
[{"heart-attack-analysis-prediction-dataset/heart.csv": {"column_names": "[\"age\", \"sex\", \"cp\", \"trtbps\", \"chol\", \"fbs\", \"restecg\", \"thalachh\", \"exng\", \"oldpeak\", \"slp\", \"caa\", \"thall\", \"output\"]", "column_data_types": "{\"age\": \"int64\", \"sex\": \"int64\", \"cp\": \"int64\", \"trtbps\": \"int64\", \"chol\": \"int64\", \"fbs\": \"int64\", \"restecg\": \"int64\", \"thalachh\": \"int64\", \"exng\": \"int64\", \"oldpeak\": \"float64\", \"slp\": \"int64\", \"caa\": \"int64\", \"thall\": \"int64\", \"output\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 303 entries, 0 to 302\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 303 non-null int64 \n 1 sex 303 non-null int64 \n 2 cp 303 non-null int64 \n 3 trtbps 303 non-null int64 \n 4 chol 303 non-null int64 \n 5 fbs 303 non-null int64 \n 6 restecg 303 non-null int64 \n 7 thalachh 303 non-null int64 \n 8 exng 303 non-null int64 \n 9 oldpeak 303 non-null float64\n 10 slp 303 non-null int64 \n 11 caa 303 non-null int64 \n 12 thall 303 non-null int64 \n 13 output 303 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 33.3 KB\n", "summary": "{\"age\": {\"count\": 303.0, \"mean\": 54.366336633663366, \"std\": 9.082100989837857, \"min\": 29.0, \"25%\": 47.5, \"50%\": 55.0, \"75%\": 61.0, \"max\": 77.0}, \"sex\": {\"count\": 303.0, \"mean\": 0.6831683168316832, \"std\": 0.46601082333962385, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"cp\": {\"count\": 303.0, \"mean\": 0.966996699669967, \"std\": 1.0320524894832985, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"trtbps\": {\"count\": 303.0, \"mean\": 131.62376237623764, \"std\": 17.5381428135171, \"min\": 94.0, \"25%\": 120.0, \"50%\": 130.0, \"75%\": 140.0, \"max\": 200.0}, \"chol\": {\"count\": 303.0, \"mean\": 246.26402640264027, \"std\": 51.83075098793003, \"min\": 126.0, \"25%\": 211.0, \"50%\": 240.0, \"75%\": 274.5, \"max\": 564.0}, \"fbs\": {\"count\": 303.0, \"mean\": 0.1485148514851485, \"std\": 0.35619787492797644, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"restecg\": {\"count\": 303.0, \"mean\": 0.528052805280528, \"std\": 0.525859596359298, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 2.0}, \"thalachh\": {\"count\": 303.0, \"mean\": 149.64686468646866, \"std\": 22.905161114914094, \"min\": 71.0, \"25%\": 133.5, \"50%\": 153.0, \"75%\": 166.0, \"max\": 202.0}, \"exng\": {\"count\": 303.0, \"mean\": 0.32673267326732675, \"std\": 0.4697944645223165, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"oldpeak\": {\"count\": 303.0, \"mean\": 1.0396039603960396, \"std\": 1.1610750220686348, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.8, \"75%\": 1.6, \"max\": 6.2}, \"slp\": {\"count\": 303.0, \"mean\": 1.3993399339933994, \"std\": 0.6162261453459619, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 2.0}, \"caa\": {\"count\": 303.0, \"mean\": 0.7293729372937293, \"std\": 1.022606364969327, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 4.0}, \"thall\": {\"count\": 303.0, \"mean\": 2.3135313531353137, \"std\": 0.6122765072781409, \"min\": 0.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 3.0}, \"output\": {\"count\": 303.0, \"mean\": 0.5445544554455446, \"std\": 0.4988347841643913, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"age\":{\"0\":63,\"1\":37,\"2\":41,\"3\":56},\"sex\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"cp\":{\"0\":3,\"1\":2,\"2\":1,\"3\":1},\"trtbps\":{\"0\":145,\"1\":130,\"2\":130,\"3\":120},\"chol\":{\"0\":233,\"1\":250,\"2\":204,\"3\":236},\"fbs\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"restecg\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"thalachh\":{\"0\":150,\"1\":187,\"2\":172,\"3\":178},\"exng\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"oldpeak\":{\"0\":2.3,\"1\":3.5,\"2\":1.4,\"3\":0.8},\"slp\":{\"0\":0,\"1\":0,\"2\":2,\"3\":2},\"caa\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"thall\":{\"0\":1,\"1\":2,\"2\":2,\"3\":2},\"output\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
| true | 1 |
<start_data_description><data_path>heart-attack-analysis-prediction-dataset/heart.csv:
<column_names>
['age', 'sex', 'cp', 'trtbps', 'chol', 'fbs', 'restecg', 'thalachh', 'exng', 'oldpeak', 'slp', 'caa', 'thall', 'output']
<column_types>
{'age': 'int64', 'sex': 'int64', 'cp': 'int64', 'trtbps': 'int64', 'chol': 'int64', 'fbs': 'int64', 'restecg': 'int64', 'thalachh': 'int64', 'exng': 'int64', 'oldpeak': 'float64', 'slp': 'int64', 'caa': 'int64', 'thall': 'int64', 'output': 'int64'}
<dataframe_Summary>
{'age': {'count': 303.0, 'mean': 54.366336633663366, 'std': 9.082100989837857, 'min': 29.0, '25%': 47.5, '50%': 55.0, '75%': 61.0, 'max': 77.0}, 'sex': {'count': 303.0, 'mean': 0.6831683168316832, 'std': 0.46601082333962385, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'cp': {'count': 303.0, 'mean': 0.966996699669967, 'std': 1.0320524894832985, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'trtbps': {'count': 303.0, 'mean': 131.62376237623764, 'std': 17.5381428135171, 'min': 94.0, '25%': 120.0, '50%': 130.0, '75%': 140.0, 'max': 200.0}, 'chol': {'count': 303.0, 'mean': 246.26402640264027, 'std': 51.83075098793003, 'min': 126.0, '25%': 211.0, '50%': 240.0, '75%': 274.5, 'max': 564.0}, 'fbs': {'count': 303.0, 'mean': 0.1485148514851485, 'std': 0.35619787492797644, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'restecg': {'count': 303.0, 'mean': 0.528052805280528, 'std': 0.525859596359298, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 2.0}, 'thalachh': {'count': 303.0, 'mean': 149.64686468646866, 'std': 22.905161114914094, 'min': 71.0, '25%': 133.5, '50%': 153.0, '75%': 166.0, 'max': 202.0}, 'exng': {'count': 303.0, 'mean': 0.32673267326732675, 'std': 0.4697944645223165, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'oldpeak': {'count': 303.0, 'mean': 1.0396039603960396, 'std': 1.1610750220686348, 'min': 0.0, '25%': 0.0, '50%': 0.8, '75%': 1.6, 'max': 6.2}, 'slp': {'count': 303.0, 'mean': 1.3993399339933994, 'std': 0.6162261453459619, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 2.0}, 'caa': {'count': 303.0, 'mean': 0.7293729372937293, 'std': 1.022606364969327, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 4.0}, 'thall': {'count': 303.0, 'mean': 2.3135313531353137, 'std': 0.6122765072781409, 'min': 0.0, '25%': 2.0, '50%': 2.0, '75%': 3.0, 'max': 3.0}, 'output': {'count': 303.0, 'mean': 0.5445544554455446, 'std': 0.4988347841643913, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 303 entries, 0 to 302
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 303 non-null int64
1 sex 303 non-null int64
2 cp 303 non-null int64
3 trtbps 303 non-null int64
4 chol 303 non-null int64
5 fbs 303 non-null int64
6 restecg 303 non-null int64
7 thalachh 303 non-null int64
8 exng 303 non-null int64
9 oldpeak 303 non-null float64
10 slp 303 non-null int64
11 caa 303 non-null int64
12 thall 303 non-null int64
13 output 303 non-null int64
dtypes: float64(1), int64(13)
memory usage: 33.3 KB
<some_examples>
{'age': {'0': 63, '1': 37, '2': 41, '3': 56}, 'sex': {'0': 1, '1': 1, '2': 0, '3': 1}, 'cp': {'0': 3, '1': 2, '2': 1, '3': 1}, 'trtbps': {'0': 145, '1': 130, '2': 130, '3': 120}, 'chol': {'0': 233, '1': 250, '2': 204, '3': 236}, 'fbs': {'0': 1, '1': 0, '2': 0, '3': 0}, 'restecg': {'0': 0, '1': 1, '2': 0, '3': 1}, 'thalachh': {'0': 150, '1': 187, '2': 172, '3': 178}, 'exng': {'0': 0, '1': 0, '2': 0, '3': 0}, 'oldpeak': {'0': 2.3, '1': 3.5, '2': 1.4, '3': 0.8}, 'slp': {'0': 0, '1': 0, '2': 2, '3': 2}, 'caa': {'0': 0, '1': 0, '2': 0, '3': 0}, 'thall': {'0': 1, '1': 2, '2': 2, '3': 2}, 'output': {'0': 1, '1': 1, '2': 1, '3': 1}}
<end_description>
| 3,820 | 0 | 5,514 | 3,820 |
69090386
|
# Training a LSTM model with ULMFIT approach on original text and then on reversed text. Then ensembaling the predictions of both.
# Inpiration source [here](https://www.kaggle.com/gurharkhalsa/backwards-forwards-ulmfit-ensemble).
# ## Import libraries
from typing import *
from pandas.core.frame import DataFrame
from fastai.text.all import *
path = Path("../input")
op_path = Path("/kaggle/working")
train_path = path / "commonlitreadabilityprize/train.csv"
test_path = path / "commonlitreadabilityprize/test.csv"
aug_path = path / "common-lit-datset-with-synonym-replacement/aug_df.csv"
df_train = pd.read_csv(train_path)
df_test = pd.read_csv(test_path)
df_all = pd.concat([df_train, df_test])
df_aug = pd.read_csv(aug_path)
df_aug.head(1)
# ## The backwards model
# Trained on text in reverse order
# ## The dataloader
dls_lm_back = TextDataLoaders.from_df(
df_aug,
text_col="excerpt",
is_lm=True,
valid_pct=0.1,
bs=64,
seq_len=72,
backwards=True,
)
dls_lm_back.show_batch(min_n=3)
# ## The backwards model
learn_back = language_model_learner(
dls_lm_back,
AWD_LSTM,
drop_mult=2.0,
metrics=[accuracy, Perplexity()],
path=path,
wd=0.5,
).to_fp16()
# ## Training the model
learn_back.path = op_path
learn_back.fit_one_cycle(1, 1e-2)
def show_me_lrs(learn):
suggestions = namedtuple("Suggestions", ["min", "steep", "valley", "slide"])
lr_min, lr_steep, lr_valley, lr_slide = learn.lr_find(
suggest_funcs=(minimum, steep, valley, slide)
)
suggested_lrs = suggestions(lr_min, lr_steep, lr_valley, lr_slide)
print(
f"Minimum/10:\t{lr_min:.2e}\
\nSteepest point:\t{lr_steep:.2e}\
\nLongest valley:\t{lr_valley:.2e}\
\nSlide interval:\t{lr_slide:.2e}"
)
return suggested_lrs
suggested_lrs = show_me_lrs(learn_back)
learn_back.unfreeze()
learn_back.fit_one_cycle(3, suggested_lrs.slide)
# good enough accuracy, let;s save it
learn_back.path = Path(".")
learn_back.save_encoder("./back_final_encoder")
# ## Backward Text regressor
data = DataBlock(
blocks=(
TextBlock.from_df(
"excerpt", vocab=dls_lm_back.vocab, seq_len=72, backwards=True
),
RegressionBlock,
),
get_x=ColReader("text"),
get_y=ColReader("target"),
splitter=RandomSubsetSplitter(0.3, 0.1, seed=2),
)
data.summary(df_train)
dls_reg_back = data.dataloaders(df_train, bs=8)
# ## The regression model
learn_reg_back = text_classifier_learner(
dls_reg_back, AWD_LSTM, drop_mult=2.0, opt_func=RAdam, metrics=rmse, wd=0.5
).to_fp16()
learn_reg_back.path = Path(".")
learn_reg_back = learn_reg_back.load_encoder("./back_final_encoder")
# ## Training backwards regresion model
learn_reg_back.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.freeze_to(-2)
learn_reg_back.fit_one_cycle(3, suggested_lrs.valley)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.freeze_to(-3)
learn_reg_back.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.unfreeze()
learn_reg_back.fit_one_cycle(4, suggested_lrs.slide)
learn_reg_back.export("./back_final_model")
# ## Create the forward model
# ## The dataloader
dls_lm_forward = TextDataLoaders.from_df(
df_aug,
text_col="excerpt",
is_lm=True,
valid_pct=0.1,
bs=64,
seq_len=72,
backwards=False,
)
dls_lm_forward.show_batch(min_n=3)
# ## The forward Language model
learn_forward = language_model_learner(
dls_lm_forward,
AWD_LSTM,
drop_mult=2.0,
metrics=[accuracy, Perplexity()],
path=path,
wd=0.5,
).to_fp16()
learn_forward.path = op_path
learn_forward.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_forward)
learn_forward.unfreeze()
learn_forward.fit_one_cycle(3, suggested_lrs.slide)
learn_forward.path = Path(".")
learn_forward.save_encoder("./forward_final_encoder")
# ## Forward Text Regressor
data = DataBlock(
blocks=(
TextBlock.from_df(
"excerpt", vocab=dls_lm_forward.vocab, seq_len=72, backwards=False
),
RegressionBlock,
),
get_x=ColReader("text"),
get_y=ColReader("target"),
splitter=RandomSubsetSplitter(0.3, 0.1, seed=2),
)
data.summary(df_train)
# ## The forward dataloader
dls_reg_forward = data.dataloaders(df_train, bs=8)
dls_reg_forward.show_batch()
# ## The regression model
learn_reg_forward = text_classifier_learner(
dls_reg_forward, AWD_LSTM, drop_mult=2.0, opt_func=RAdam, metrics=rmse, wd=0.5
).to_fp16()
learn_reg_forward.path = Path(".")
learn_reg_forward = learn_reg_forward.load_encoder("./forward_final_encoder")
# ## Training forward regression model
learn_reg_forward.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.freeze_to(-2)
learn_reg_forward.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.freeze_to(-3)
learn_reg_forward.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.unfreeze()
learn_reg_forward.fit_one_cycle(4, suggested_lrs.slide)
learn_reg_forward.export("./forward_final_model")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090386.ipynb
| null | null |
[{"Id": 69090386, "ScriptId": 18435731, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1174372, "CreationDate": "07/26/2021 18:16:45", "VersionNumber": 7.0, "Title": "common lit - forward backward training on ulmfit", "EvaluationDate": "07/26/2021", "IsChange": false, "TotalLines": 198.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 198.0, "LinesInsertedFromFork": 137.0, "LinesDeletedFromFork": 199.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 61.0, "TotalVotes": 0}]
| null | null | null | null |
# Training a LSTM model with ULMFIT approach on original text and then on reversed text. Then ensembaling the predictions of both.
# Inpiration source [here](https://www.kaggle.com/gurharkhalsa/backwards-forwards-ulmfit-ensemble).
# ## Import libraries
from typing import *
from pandas.core.frame import DataFrame
from fastai.text.all import *
path = Path("../input")
op_path = Path("/kaggle/working")
train_path = path / "commonlitreadabilityprize/train.csv"
test_path = path / "commonlitreadabilityprize/test.csv"
aug_path = path / "common-lit-datset-with-synonym-replacement/aug_df.csv"
df_train = pd.read_csv(train_path)
df_test = pd.read_csv(test_path)
df_all = pd.concat([df_train, df_test])
df_aug = pd.read_csv(aug_path)
df_aug.head(1)
# ## The backwards model
# Trained on text in reverse order
# ## The dataloader
dls_lm_back = TextDataLoaders.from_df(
df_aug,
text_col="excerpt",
is_lm=True,
valid_pct=0.1,
bs=64,
seq_len=72,
backwards=True,
)
dls_lm_back.show_batch(min_n=3)
# ## The backwards model
learn_back = language_model_learner(
dls_lm_back,
AWD_LSTM,
drop_mult=2.0,
metrics=[accuracy, Perplexity()],
path=path,
wd=0.5,
).to_fp16()
# ## Training the model
learn_back.path = op_path
learn_back.fit_one_cycle(1, 1e-2)
def show_me_lrs(learn):
suggestions = namedtuple("Suggestions", ["min", "steep", "valley", "slide"])
lr_min, lr_steep, lr_valley, lr_slide = learn.lr_find(
suggest_funcs=(minimum, steep, valley, slide)
)
suggested_lrs = suggestions(lr_min, lr_steep, lr_valley, lr_slide)
print(
f"Minimum/10:\t{lr_min:.2e}\
\nSteepest point:\t{lr_steep:.2e}\
\nLongest valley:\t{lr_valley:.2e}\
\nSlide interval:\t{lr_slide:.2e}"
)
return suggested_lrs
suggested_lrs = show_me_lrs(learn_back)
learn_back.unfreeze()
learn_back.fit_one_cycle(3, suggested_lrs.slide)
# good enough accuracy, let;s save it
learn_back.path = Path(".")
learn_back.save_encoder("./back_final_encoder")
# ## Backward Text regressor
data = DataBlock(
blocks=(
TextBlock.from_df(
"excerpt", vocab=dls_lm_back.vocab, seq_len=72, backwards=True
),
RegressionBlock,
),
get_x=ColReader("text"),
get_y=ColReader("target"),
splitter=RandomSubsetSplitter(0.3, 0.1, seed=2),
)
data.summary(df_train)
dls_reg_back = data.dataloaders(df_train, bs=8)
# ## The regression model
learn_reg_back = text_classifier_learner(
dls_reg_back, AWD_LSTM, drop_mult=2.0, opt_func=RAdam, metrics=rmse, wd=0.5
).to_fp16()
learn_reg_back.path = Path(".")
learn_reg_back = learn_reg_back.load_encoder("./back_final_encoder")
# ## Training backwards regresion model
learn_reg_back.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.freeze_to(-2)
learn_reg_back.fit_one_cycle(3, suggested_lrs.valley)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.freeze_to(-3)
learn_reg_back.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_back)
learn_reg_back.unfreeze()
learn_reg_back.fit_one_cycle(4, suggested_lrs.slide)
learn_reg_back.export("./back_final_model")
# ## Create the forward model
# ## The dataloader
dls_lm_forward = TextDataLoaders.from_df(
df_aug,
text_col="excerpt",
is_lm=True,
valid_pct=0.1,
bs=64,
seq_len=72,
backwards=False,
)
dls_lm_forward.show_batch(min_n=3)
# ## The forward Language model
learn_forward = language_model_learner(
dls_lm_forward,
AWD_LSTM,
drop_mult=2.0,
metrics=[accuracy, Perplexity()],
path=path,
wd=0.5,
).to_fp16()
learn_forward.path = op_path
learn_forward.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_forward)
learn_forward.unfreeze()
learn_forward.fit_one_cycle(3, suggested_lrs.slide)
learn_forward.path = Path(".")
learn_forward.save_encoder("./forward_final_encoder")
# ## Forward Text Regressor
data = DataBlock(
blocks=(
TextBlock.from_df(
"excerpt", vocab=dls_lm_forward.vocab, seq_len=72, backwards=False
),
RegressionBlock,
),
get_x=ColReader("text"),
get_y=ColReader("target"),
splitter=RandomSubsetSplitter(0.3, 0.1, seed=2),
)
data.summary(df_train)
# ## The forward dataloader
dls_reg_forward = data.dataloaders(df_train, bs=8)
dls_reg_forward.show_batch()
# ## The regression model
learn_reg_forward = text_classifier_learner(
dls_reg_forward, AWD_LSTM, drop_mult=2.0, opt_func=RAdam, metrics=rmse, wd=0.5
).to_fp16()
learn_reg_forward.path = Path(".")
learn_reg_forward = learn_reg_forward.load_encoder("./forward_final_encoder")
# ## Training forward regression model
learn_reg_forward.fit_one_cycle(1, 1e-2)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.freeze_to(-2)
learn_reg_forward.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.freeze_to(-3)
learn_reg_forward.fit_one_cycle(3, suggested_lrs.slide)
suggested_lrs = show_me_lrs(learn_reg_forward)
learn_reg_forward.unfreeze()
learn_reg_forward.fit_one_cycle(4, suggested_lrs.slide)
learn_reg_forward.export("./forward_final_model")
| false | 0 | 1,902 | 0 | 1,902 | 1,902 |
||
69090533
|
<jupyter_start><jupyter_text>ORL_faces
Kaggle dataset identifier: orl-faces
<jupyter_script>import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from keras.utils import np_utils
import itertools
# # Step 2
# Load Dataset :
# After loading the Dataset you have to normalize every image.
# **Note:** an image is a Uint8 matrix of pixels and for calculation, you need to convert the format of the image to float or double
# load dataset
data = np.load("../input/orl-faces/ORL_faces.npz")
# load the "Train Images"
x_train = data["trainX"]
# normalize every image
x_train = np.array(x_train, dtype="float32") / 255
x_test = data["testX"]
x_test = np.array(x_test, dtype="float32") / 255
# load the Label of Images
y_train = data["trainY"]
y_test = data["testY"]
# show the train and test Data format
print("x_train : {}".format(x_train[:]))
print("Y-train shape: {}".format(y_train))
print("x_test shape: {}".format(x_test.shape))
# # Step 3
# Split DataSet : Validation data and Train
# Validation DataSet: this data set is used to minimize overfitting.If the accuracy over the training data set increases, but the accuracy over then validation data set stays the same or decreases, then you're overfitting your neural network and you should stop training.
# **Note:** we usually use 30 percent of every dataset as the validation data but Here we only used 5 percent because the number of images in this dataset is very low.
x_train, x_valid, y_train, y_valid = train_test_split(
x_train,
y_train,
test_size=0.05,
random_state=1234,
)
# # **Step 4**
# for using the CNN, we need to change The size of images ( The size of images must be the same)
im_rows = 112
im_cols = 92
batch_size = 512
im_shape = (im_rows, im_cols, 1)
# change the size of images
x_train = x_train.reshape(x_train.shape[0], *im_shape)
x_test = x_test.reshape(x_test.shape[0], *im_shape)
x_valid = x_valid.reshape(x_valid.shape[0], *im_shape)
print("x_train shape: {}".format(y_train.shape[0]))
print("x_test shape: {}".format(y_test.shape))
# # **Step 5**
# Build CNN model: CNN have 3 main layer:
# 1-Convolotional layer
# 2- pooling layer
# 3- fully connected layer
# we could build a new architecture of CNN by changing the number and position of layers.
# filters= the depth of output image or kernels
cnn_model = Sequential(
[
Conv2D(filters=36, kernel_size=7, activation="relu", input_shape=im_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=54, kernel_size=5, activation="relu", input_shape=im_shape),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(2024, activation="relu"),
Dropout(0.5),
Dense(1024, activation="relu"),
Dropout(0.5),
Dense(512, activation="relu"),
Dropout(0.5),
# 20 is the number of outputs
Dense(20, activation="softmax"),
]
)
cnn_model.compile(
loss="sparse_categorical_crossentropy", #'categorical_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=["accuracy"],
)
#
# Show the model's parameters.
cnn_model.summary()
# Step 6
# Train the Model
# Note: You can change the number of epochs
history = cnn_model.fit(
np.array(x_train),
np.array(y_train),
batch_size=512,
epochs=250,
verbose=2,
validation_data=(np.array(x_valid), np.array(y_valid)),
)
# Evaluate the test data
scor = cnn_model.evaluate(np.array(x_test), np.array(y_test), verbose=0)
print("test los {:.4f}".format(scor[0]))
print("test acc {:.4f}".format(scor[1]))
# # Step 7
# plot the result
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# # step 8
# Plot Confusion Matrix
predicted = np.argmax(cnn_model.predict(x_test), axis=-1)
# print(predicted)
# print(y_test)
ynew = cnn_model.predict_classes(x_test)
Acc = accuracy_score(y_test, ynew)
print("accuracy : ")
print(Acc)
# /tn, fp, fn, tp = confusion_matrix(np.array(y_test), ynew).ravel()
cnf_matrix = confusion_matrix(np.array(y_test), ynew)
y_test1 = np_utils.to_categorical(y_test, 20)
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
# print(cm)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
print("Confusion matrix, without normalization")
print(cnf_matrix)
plt.figure()
plot_confusion_matrix(
cnf_matrix[1:10, 1:10],
classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
title="Confusion matrix, without normalization",
)
plt.figure()
plot_confusion_matrix(
cnf_matrix[11:20, 11:20],
classes=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
title="Confusion matrix, without normalization",
)
print("Confusion matrix:\n%s" % confusion_matrix(np.array(y_test), ynew))
print(classification_report(np.array(y_test), ynew))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090533.ipynb
|
orl-faces
|
sifboudjellal
|
[{"Id": 69090533, "ScriptId": 18855149, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7950058, "CreationDate": "07/26/2021 18:19:18", "VersionNumber": 1.0, "Title": "Face Recognition using CNN", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 222.0, "LinesInsertedFromPrevious": 222.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 91862826, "KernelVersionId": 69090533, "SourceDatasetVersionId": 2465716}]
|
[{"Id": 2465716, "DatasetId": 1492577, "DatasourceVersionId": 2508166, "CreatorUserId": 7950058, "LicenseName": "Unknown", "CreationDate": "07/26/2021 16:27:36", "VersionNumber": 1.0, "Title": "ORL_faces", "Slug": "orl-faces", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1492577, "CreatorUserId": 7950058, "OwnerUserId": 7950058.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2465716.0, "CurrentDatasourceVersionId": 2508166.0, "ForumId": 1512289, "Type": 2, "CreationDate": "07/26/2021 16:27:36", "LastActivityDate": "07/26/2021", "TotalViews": 1501, "TotalDownloads": 133, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 7950058, "UserName": "sifboudjellal", "DisplayName": "Sif Boudjellal", "RegisterDate": "07/21/2021", "PerformanceTier": 1}]
|
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import accuracy_score
from keras.utils import np_utils
import itertools
# # Step 2
# Load Dataset :
# After loading the Dataset you have to normalize every image.
# **Note:** an image is a Uint8 matrix of pixels and for calculation, you need to convert the format of the image to float or double
# load dataset
data = np.load("../input/orl-faces/ORL_faces.npz")
# load the "Train Images"
x_train = data["trainX"]
# normalize every image
x_train = np.array(x_train, dtype="float32") / 255
x_test = data["testX"]
x_test = np.array(x_test, dtype="float32") / 255
# load the Label of Images
y_train = data["trainY"]
y_test = data["testY"]
# show the train and test Data format
print("x_train : {}".format(x_train[:]))
print("Y-train shape: {}".format(y_train))
print("x_test shape: {}".format(x_test.shape))
# # Step 3
# Split DataSet : Validation data and Train
# Validation DataSet: this data set is used to minimize overfitting.If the accuracy over the training data set increases, but the accuracy over then validation data set stays the same or decreases, then you're overfitting your neural network and you should stop training.
# **Note:** we usually use 30 percent of every dataset as the validation data but Here we only used 5 percent because the number of images in this dataset is very low.
x_train, x_valid, y_train, y_valid = train_test_split(
x_train,
y_train,
test_size=0.05,
random_state=1234,
)
# # **Step 4**
# for using the CNN, we need to change The size of images ( The size of images must be the same)
im_rows = 112
im_cols = 92
batch_size = 512
im_shape = (im_rows, im_cols, 1)
# change the size of images
x_train = x_train.reshape(x_train.shape[0], *im_shape)
x_test = x_test.reshape(x_test.shape[0], *im_shape)
x_valid = x_valid.reshape(x_valid.shape[0], *im_shape)
print("x_train shape: {}".format(y_train.shape[0]))
print("x_test shape: {}".format(y_test.shape))
# # **Step 5**
# Build CNN model: CNN have 3 main layer:
# 1-Convolotional layer
# 2- pooling layer
# 3- fully connected layer
# we could build a new architecture of CNN by changing the number and position of layers.
# filters= the depth of output image or kernels
cnn_model = Sequential(
[
Conv2D(filters=36, kernel_size=7, activation="relu", input_shape=im_shape),
MaxPooling2D(pool_size=2),
Conv2D(filters=54, kernel_size=5, activation="relu", input_shape=im_shape),
MaxPooling2D(pool_size=2),
Flatten(),
Dense(2024, activation="relu"),
Dropout(0.5),
Dense(1024, activation="relu"),
Dropout(0.5),
Dense(512, activation="relu"),
Dropout(0.5),
# 20 is the number of outputs
Dense(20, activation="softmax"),
]
)
cnn_model.compile(
loss="sparse_categorical_crossentropy", #'categorical_crossentropy',
optimizer=Adam(lr=0.0001),
metrics=["accuracy"],
)
#
# Show the model's parameters.
cnn_model.summary()
# Step 6
# Train the Model
# Note: You can change the number of epochs
history = cnn_model.fit(
np.array(x_train),
np.array(y_train),
batch_size=512,
epochs=250,
verbose=2,
validation_data=(np.array(x_valid), np.array(y_valid)),
)
# Evaluate the test data
scor = cnn_model.evaluate(np.array(x_test), np.array(y_test), verbose=0)
print("test los {:.4f}".format(scor[0]))
print("test acc {:.4f}".format(scor[1]))
# # Step 7
# plot the result
# list all data in history
print(history.history.keys())
# summarize history for accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# summarize history for loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
# # step 8
# Plot Confusion Matrix
predicted = np.argmax(cnn_model.predict(x_test), axis=-1)
# print(predicted)
# print(y_test)
ynew = cnn_model.predict_classes(x_test)
Acc = accuracy_score(y_test, ynew)
print("accuracy : ")
print(Acc)
# /tn, fp, fn, tp = confusion_matrix(np.array(y_test), ynew).ravel()
cnf_matrix = confusion_matrix(np.array(y_test), ynew)
y_test1 = np_utils.to_categorical(y_test, 20)
def plot_confusion_matrix(
cm, classes, normalize=False, title="Confusion matrix", cmap=plt.cm.Blues
):
"""
This function prints and plots the confusion matrix.
Normalization can be applied by setting `normalize=True`.
"""
if normalize:
cm = cm.astype("float") / cm.sum(axis=1)[:, np.newaxis]
# print("Normalized confusion matrix")
else:
print("Confusion matrix, without normalization")
# print(cm)
plt.imshow(cm, interpolation="nearest", cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes, rotation=45)
plt.yticks(tick_marks, classes)
fmt = ".2f" if normalize else "d"
thresh = cm.max() / 2.0
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(
j,
i,
format(cm[i, j], fmt),
horizontalalignment="center",
color="white" if cm[i, j] > thresh else "black",
)
plt.tight_layout()
plt.ylabel("True label")
plt.xlabel("Predicted label")
plt.show()
print("Confusion matrix, without normalization")
print(cnf_matrix)
plt.figure()
plot_confusion_matrix(
cnf_matrix[1:10, 1:10],
classes=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
title="Confusion matrix, without normalization",
)
plt.figure()
plot_confusion_matrix(
cnf_matrix[11:20, 11:20],
classes=[10, 11, 12, 13, 14, 15, 16, 17, 18, 19],
title="Confusion matrix, without normalization",
)
print("Confusion matrix:\n%s" % confusion_matrix(np.array(y_test), ynew))
print(classification_report(np.array(y_test), ynew))
| false | 0 | 2,076 | 2 | 2,098 | 2,076 |
||
69090971
|
<jupyter_start><jupyter_text>Fashion MNIST
### Context
Fashion-MNIST is a dataset of Zalando's article images—consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Zalando intends Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.
The original MNIST dataset contains a lot of handwritten digits. Members of the AI/ML/Data Science community love this dataset and use it as a benchmark to validate their algorithms. In fact, MNIST is often the first dataset researchers try. "If it doesn't work on MNIST, it won't work at all", they said. "Well, if it does work on MNIST, it may still fail on others."
Zalando seeks to replace the original MNIST dataset
### Content
Each image is 28 pixels in height and 28 pixels in width, for a total of 784 pixels in total. Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255. The training and test data sets have 785 columns. The first column consists of the class labels (see above), and represents the article of clothing. The rest of the columns contain the pixel-values of the associated image.
- To locate a pixel on the image, suppose that we have decomposed x as x = i * 28 + j, where i and j are integers between 0 and 27. The pixel is located on row i and column j of a 28 x 28 matrix.
- For example, pixel31 indicates the pixel that is in the fourth column from the left, and the second row from the top, as in the ascii-diagram below.
<br><br>
**Labels**
Each training and test example is assigned to one of the following labels:
- 0 T-shirt/top
- 1 Trouser
- 2 Pullover
- 3 Dress
- 4 Coat
- 5 Sandal
- 6 Shirt
- 7 Sneaker
- 8 Bag
- 9 Ankle boot
<br><br>
TL;DR
- Each row is a separate image
- Column 1 is the class label.
- Remaining columns are pixel numbers (784 total).
- Each value is the darkness of the pixel (1 to 255)
Kaggle dataset identifier: fashionmnist
<jupyter_script>from subprocess import check_output
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
num_classes = 10
epochs = 20
train_df = pd.read_csv("../input/fashionmnist/fashion-mnist_train.csv", sep=",")
test_df = pd.read_csv("../input/fashionmnist/fashion-mnist_test.csv", sep=",")
train_df
test_df
train_data = np.array(train_df, dtype="float32")
test_data = np.array(test_df, dtype="float32")
x_train = train_data[:, 1:] / 255
y_train = train_data[:, 0]
x_test = test_data[:, 1:] / 255
y_test = test_data[:, 0]
x_train, x_validate, y_train, y_validate = train_test_split(
x_train, y_train, test_size=0.2, random_state=12345
)
class_names = [
"T_shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
plt.figure(figsize=(10, 10))
for i in range(36):
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i].reshape((28, 28)))
label_index = int(y_train[i])
plt.title(class_names[label_index])
plt.show()
W_grid = 15
L_grid = 15
fig, axes = plt.subplots(L_grid, W_grid, figsize=(16, 16))
axes = axes.ravel()
n_train = len(train_data)
for i in np.arange(0, W_grid * L_grid):
index = np.random.randint(0, n_train)
axes[i].imshow(train_data[index, 1:].reshape((28, 28)))
labelindex = int(train_data[index, 0])
axes[i].set_title(class_names[labelindex], fontsize=9)
axes[i].axis("off")
plt.subplots_adjust(hspace=0.3)
image_rows = 28
image_cols = 28
batch_size = 4096
image_shape = (image_rows, image_cols, 1)
x_train = x_train.reshape(x_train.shape[0], *image_shape)
x_test = x_test.reshape(x_test.shape[0], *image_shape)
x_validate = x_validate.reshape(x_validate.shape[0], *image_shape)
cnn_model = Sequential(
[
Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=image_shape),
MaxPooling2D(pool_size=2),
Dropout(0.2),
Flatten(),
Dense(32, activation="relu"),
Dense(10, activation="softmax"),
]
)
cnn_model.compile(
loss="sparse_categorical_crossentropy", optimizer=Adam(lr=0.001), metrics=["acc"]
)
history = cnn_model.fit(
x_train,
y_train,
batch_size=4096,
epochs=75,
verbose=1,
validation_data=(x_validate, y_validate),
)
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
plt.plot(history.history["loss"], label="Kayıp")
plt.plot(history.history["val_loss"], label="Onaylama Kaybı")
plt.legend()
plt.title("Eğitim - Kayıp")
plt.subplot(2, 2, 2)
plt.plot(history.history["acc"], label="Doğruluk")
plt.plot(history.history["val_acc"], label="Onaylama Doğruluğu")
plt.legend()
plt.title("Eğitim - Doğruluk")
score = cnn_model.evaluate(x_test, y_test, verbose=0)
print("Test - Kayıp Oranı : {:.4f}".format(score[0]))
print("Test - Doğruluk Oranı : {:.4f}".format(score[1]))
import matplotlib.pyplot as plt
accuracy = history.history["acc"]
val_accuracy = history.history["val_acc"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "bo", label="Eğitim Doğruluğu")
plt.plot(epochs, val_accuracy, "b", label="Onaylama Doğruluğu")
plt.title("Eğitim ve Onaylama doğruluğu")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Eğitim Kaybı")
plt.plot(epochs, val_loss, "b", label="Onaylama Kaybı")
plt.title("Eğitim ve Onaylama kaybı")
plt.legend()
plt.show()
predicted_classes = cnn_model.predict_classes(x_test)
y_true = test_df.iloc[:, 0]
from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(num_classes)]
print(classification_report(y_true, predicted_classes, target_names=target_names))
L = 7
W = 7
fig, axes = plt.subplots(L, W, figsize=(23, 23))
axes = axes.ravel()
for i in np.arange(0, L * W):
axes[i].imshow(x_test[i].reshape(28, 28))
axes[i].set_title(
f"Tahmini Sınıfı = {predicted_classes[i]:0.1f}\n Orjinal Sınıfı = {y_test[i]:0.1f}"
)
axes[i].axis("off")
plt.subplots_adjust(wspace=0.3)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090971.ipynb
|
fashionmnist
| null |
[{"Id": 69090971, "ScriptId": 18848495, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7862058, "CreationDate": "07/26/2021 18:28:13", "VersionNumber": 4.0, "Title": "CNN \u0130le K\u0131yafet S\u0131n\u0131fland\u0131r\u0131lmas\u0131", "EvaluationDate": "07/26/2021", "IsChange": false, "TotalLines": 150.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 150.0, "LinesInsertedFromFork": 28.0, "LinesDeletedFromFork": 332.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 122.0, "TotalVotes": 0}]
|
[{"Id": 91863699, "KernelVersionId": 69090971, "SourceDatasetVersionId": 9243}]
|
[{"Id": 9243, "DatasetId": 2243, "DatasourceVersionId": 9243, "CreatorUserId": 484516, "LicenseName": "Other (specified in description)", "CreationDate": "12/07/2017 00:54:20", "VersionNumber": 4.0, "Title": "Fashion MNIST", "Slug": "fashionmnist", "Subtitle": "An MNIST-like dataset of 70,000 28x28 labeled fashion images", "Description": "### Context\n\nFashion-MNIST is a dataset of Zalando's article images\u2014consisting of a training set of 60,000 examples and a test set of 10,000 examples. Each example is a 28x28 grayscale image, associated with a label from 10 classes. Zalando intends Fashion-MNIST to serve as a direct drop-in replacement for the original MNIST dataset for benchmarking machine learning algorithms. It shares the same image size and structure of training and testing splits.\n\nThe original MNIST dataset contains a lot of handwritten digits. Members of the AI/ML/Data Science community love this dataset and use it as a benchmark to validate their algorithms. In fact, MNIST is often the first dataset researchers try. \"If it doesn't work on MNIST, it won't work at all\", they said. \"Well, if it does work on MNIST, it may still fail on others.\"\n\nZalando seeks to replace the original MNIST dataset\n\n### Content\n\nEach image is 28 pixels in height and 28 pixels in width, for a total of 784 pixels in total. Each pixel has a single pixel-value associated with it, indicating the lightness or darkness of that pixel, with higher numbers meaning darker. This pixel-value is an integer between 0 and 255. The training and test data sets have 785 columns. The first column consists of the class labels (see above), and represents the article of clothing. The rest of the columns contain the pixel-values of the associated image.\n\n- To locate a pixel on the image, suppose that we have decomposed x as x = i * 28 + j, where i and j are integers between 0 and 27. The pixel is located on row i and column j of a 28 x 28 matrix. \n- For example, pixel31 indicates the pixel that is in the fourth column from the left, and the second row from the top, as in the ascii-diagram below.\n<br><br>\n\n\n**Labels**\n\nEach training and test example is assigned to one of the following labels:\n\n- 0\tT-shirt/top\n- 1\tTrouser\n- 2\tPullover\n- 3\tDress\n- 4\tCoat\n- 5\tSandal\n- 6\tShirt\n- 7\tSneaker\n- 8\tBag\n- 9\tAnkle boot\n<br><br>\n\nTL;DR\n\n- Each row is a separate image \n- Column 1 is the class label. \n- Remaining columns are pixel numbers (784 total). \n- Each value is the darkness of the pixel (1 to 255)\n\n### Acknowledgements\n\n- Original dataset was downloaded from [https://github.com/zalandoresearch/fashion-mnist][1]\n\n- Dataset was converted to CSV with this script: [https://pjreddie.com/projects/mnist-in-csv/][2]\n\n### License\n\nThe MIT License (MIT) Copyright \u00a9 [2017] Zalando SE, https://tech.zalando.com\n\nPermission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \u201cSoftware\u201d), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \u201cAS IS\u201d, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.\n\n [1]: https://github.com/zalandoresearch/fashion-mnist\n [2]: https://pjreddie.com/projects/mnist-in-csv/", "VersionNotes": "Added original ubytes files", "TotalCompressedBytes": 72149861.0, "TotalUncompressedBytes": 72149861.0}]
|
[{"Id": 2243, "CreatorUserId": 484516, "OwnerUserId": NaN, "OwnerOrganizationId": 952.0, "CurrentDatasetVersionId": 9243.0, "CurrentDatasourceVersionId": 9243.0, "ForumId": 6101, "Type": 2, "CreationDate": "08/28/2017 20:58:16", "LastActivityDate": "02/06/2018", "TotalViews": 1067163, "TotalDownloads": 160216, "TotalVotes": 2509, "TotalKernels": 2060}]
| null |
from subprocess import check_output
import pandas as pd
import numpy as np
import warnings
warnings.filterwarnings("ignore")
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
import keras
from keras.models import Sequential
from keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout
from keras.optimizers import Adam
from keras.callbacks import TensorBoard
num_classes = 10
epochs = 20
train_df = pd.read_csv("../input/fashionmnist/fashion-mnist_train.csv", sep=",")
test_df = pd.read_csv("../input/fashionmnist/fashion-mnist_test.csv", sep=",")
train_df
test_df
train_data = np.array(train_df, dtype="float32")
test_data = np.array(test_df, dtype="float32")
x_train = train_data[:, 1:] / 255
y_train = train_data[:, 0]
x_test = test_data[:, 1:] / 255
y_test = test_data[:, 0]
x_train, x_validate, y_train, y_validate = train_test_split(
x_train, y_train, test_size=0.2, random_state=12345
)
class_names = [
"T_shirt/top",
"Trouser",
"Pullover",
"Dress",
"Coat",
"Sandal",
"Shirt",
"Sneaker",
"Bag",
"Ankle boot",
]
plt.figure(figsize=(10, 10))
for i in range(36):
plt.subplot(6, 6, i + 1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(x_train[i].reshape((28, 28)))
label_index = int(y_train[i])
plt.title(class_names[label_index])
plt.show()
W_grid = 15
L_grid = 15
fig, axes = plt.subplots(L_grid, W_grid, figsize=(16, 16))
axes = axes.ravel()
n_train = len(train_data)
for i in np.arange(0, W_grid * L_grid):
index = np.random.randint(0, n_train)
axes[i].imshow(train_data[index, 1:].reshape((28, 28)))
labelindex = int(train_data[index, 0])
axes[i].set_title(class_names[labelindex], fontsize=9)
axes[i].axis("off")
plt.subplots_adjust(hspace=0.3)
image_rows = 28
image_cols = 28
batch_size = 4096
image_shape = (image_rows, image_cols, 1)
x_train = x_train.reshape(x_train.shape[0], *image_shape)
x_test = x_test.reshape(x_test.shape[0], *image_shape)
x_validate = x_validate.reshape(x_validate.shape[0], *image_shape)
cnn_model = Sequential(
[
Conv2D(filters=32, kernel_size=3, activation="relu", input_shape=image_shape),
MaxPooling2D(pool_size=2),
Dropout(0.2),
Flatten(),
Dense(32, activation="relu"),
Dense(10, activation="softmax"),
]
)
cnn_model.compile(
loss="sparse_categorical_crossentropy", optimizer=Adam(lr=0.001), metrics=["acc"]
)
history = cnn_model.fit(
x_train,
y_train,
batch_size=4096,
epochs=75,
verbose=1,
validation_data=(x_validate, y_validate),
)
plt.figure(figsize=(10, 10))
plt.subplot(2, 2, 1)
plt.plot(history.history["loss"], label="Kayıp")
plt.plot(history.history["val_loss"], label="Onaylama Kaybı")
plt.legend()
plt.title("Eğitim - Kayıp")
plt.subplot(2, 2, 2)
plt.plot(history.history["acc"], label="Doğruluk")
plt.plot(history.history["val_acc"], label="Onaylama Doğruluğu")
plt.legend()
plt.title("Eğitim - Doğruluk")
score = cnn_model.evaluate(x_test, y_test, verbose=0)
print("Test - Kayıp Oranı : {:.4f}".format(score[0]))
print("Test - Doğruluk Oranı : {:.4f}".format(score[1]))
import matplotlib.pyplot as plt
accuracy = history.history["acc"]
val_accuracy = history.history["val_acc"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "bo", label="Eğitim Doğruluğu")
plt.plot(epochs, val_accuracy, "b", label="Onaylama Doğruluğu")
plt.title("Eğitim ve Onaylama doğruluğu")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Eğitim Kaybı")
plt.plot(epochs, val_loss, "b", label="Onaylama Kaybı")
plt.title("Eğitim ve Onaylama kaybı")
plt.legend()
plt.show()
predicted_classes = cnn_model.predict_classes(x_test)
y_true = test_df.iloc[:, 0]
from sklearn.metrics import classification_report
target_names = ["Class {}".format(i) for i in range(num_classes)]
print(classification_report(y_true, predicted_classes, target_names=target_names))
L = 7
W = 7
fig, axes = plt.subplots(L, W, figsize=(23, 23))
axes = axes.ravel()
for i in np.arange(0, L * W):
axes[i].imshow(x_test[i].reshape(28, 28))
axes[i].set_title(
f"Tahmini Sınıfı = {predicted_classes[i]:0.1f}\n Orjinal Sınıfı = {y_test[i]:0.1f}"
)
axes[i].axis("off")
plt.subplots_adjust(wspace=0.3)
| false | 0 | 1,640 | 0 | 2,285 | 1,640 |
||
69090488
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-whitegrid")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import KMeans
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
def substrings_in_string(big_string, substrings):
if type(big_string) == pd._libs.missing.NAType:
return "Unknown"
for substring in substrings:
if big_string.find(substring) != -1:
return substring
return np.nan
title_list = [
"Mrs",
"Mr",
"Master",
"Miss",
"Major",
"Rev",
"Dr",
"Ms",
"Mlle",
"Col",
"Capt",
"Mme",
"Countess",
"Don",
"Jonkheer",
]
train_data["Title"] = train_data["Name"].map(
lambda x: substrings_in_string(x, title_list)
)
test_data["Title"] = test_data["Name"].map(
lambda x: substrings_in_string(x, title_list)
)
def replace_titles(x):
title = x["Title"]
if title in ["Don", "Major", "Capt", "Jonkheer", "Rev", "Col"]:
return "Mr"
elif title in ["Countess", "Mme"]:
return "Mrs"
elif title in ["Mlle", "Ms"]:
return "Miss"
elif title == "Dr":
if x["Sex"] == "Male":
return "Mr"
else:
return "Mrs"
else:
return title
train_data["Title"] = train_data.apply(replace_titles, axis=1)
test_data["Title"] = test_data.apply(replace_titles, axis=1)
cabin_list = ["A", "B", "C", "D", "E", "F", "T", "G", "Unknown"]
train_data["Deck"] = (
train_data["Cabin"]
.astype("string")
.map(lambda x: substrings_in_string(x, cabin_list))
)
test_data["Deck"] = (
test_data["Cabin"]
.astype("string")
.map(lambda x: substrings_in_string(x, cabin_list))
)
train_data.isnull().sum()
test_data.isnull().sum()
# train_data["AverageAge"] = train_data.groupby("Title")["Age"].transform("mean")
# avgAge_df=train_data[["Title", "AverageAge"]].drop_duplicates()
# train_data["Age2"]=train_data["Age"]
train_data["Age"] = (
train_data[["Title", "Age"]]
.groupby("Title")
.transform(lambda group: group.fillna(group.mean()))
)
test_data["Age"] = test_data["Age"].fillna(0)
title_dict = {"Mr": 0, "Mrs": 1, "Miss": 2, "Master": 4, "Unknown": 5}
train_data["Title"] = train_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
test_data["Title"] = test_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
deck_dict = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"T": 6,
"G": 7,
"Unknown": 8,
}
train_data["Deck"] = train_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
test_data["Deck"] = test_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
sex_dict = {"male": 0, "female": 1}
train_data["Sex"] = train_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
test_data["Sex"] = test_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
train_data["Relationships"] = train_data["SibSp"] + train_data["Parch"]
test_data["Relationships"] = test_data["SibSp"] + test_data["Parch"]
train_data["Age*Class"] = train_data["Age"] * train_data["Pclass"]
train_data["Age*Class"] = train_data["Age*Class"].fillna(0)
test_data["Age*Class"] = test_data["Age"] * test_data["Pclass"]
test_data["Age*Class"] = test_data["Age*Class"].fillna(0)
train_data["Sex*Class"] = train_data["Sex"] * train_data["Pclass"]
test_data["Sex*Class"] = test_data["Sex"] * test_data["Pclass"]
train_data["Fare_Per_Person"] = train_data["Fare"] / (train_data["Relationships"] + 1)
test_data["Fare_Per_Person"] = test_data["Fare"] / (test_data["Relationships"] + 1)
X_tmp = train_data.copy()
y_tmp = X_tmp.pop("Survived")
features_all = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"]
# features = ["Pclass", "Sex", "Fare", "Embarked", "Age", "Relationships"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"] #v4
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Deck", "Age*Class", "Fare_Per_Person"]
features = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Fare",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
X_tmp = X_tmp[features]
X_train, X_test, y_train, y_test = train_test_split(
X_tmp, y_tmp, test_size=0.2, random_state=1, stratify=y_tmp
)
from sklearn.impute import SimpleImputer
# imputer_age = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputer_age = imputer_age.fit(X_train[['Age']])
# X_train['Age'] = imputer_age.transform(X_train[['Age']])
# X_test['Age'] = imputer_age.transform(X_test[['Age']])
imputer_embarked = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
imputer_embarked = imputer_embarked.fit(X_train[["Embarked"]])
X_train["Embarked"] = imputer_embarked.transform(X_train[["Embarked"]])
X_test["Embarked"] = imputer_embarked.transform(X_test[["Embarked"]])
# encoder_sex = LabelEncoder()
# X_train['Sex'] = encoder_sex.fit_transform(X_train['Sex'].values)
# X_test['Sex'] = encoder_sex.transform(X_test['Sex'].values)
encoder_embarked = LabelEncoder()
X_train["Embarked"] = encoder_embarked.fit_transform(X_train["Embarked"].values)
X_test["Embarked"] = encoder_embarked.transform(X_test["Embarked"].values)
# encoder_title = LabelEncoder()
# X_train['Title'] = encoder_title.fit_transform(X_train['Title'].values)
# X_test['Title'] = encoder_title.transform(X_test['Title'].values)
# Create cluster feature
# cluster_features=["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"]
# kmeans = KMeans(n_clusters=5)
# X_train["Cluster"] = kmeans.fit_predict(X_train[cluster_features])
# X_train["Cluster"] = X_train["Cluster"].astype("category")
# X_test["Cluster"] = kmeans.predict(X_test[cluster_features])
# X_test["Cluster"] = X_test["Cluster"].astype("category")
X_train
model = RandomForestClassifier(n_estimators=500, max_depth=5, random_state=1)
model.fit(X_train, y_train)
# predictions = model.predict(X_test)
print("Train Accuracy: ", accuracy_score(model.predict(X_train), y_train))
print("Test Accuracy: ", accuracy_score(model.predict(X_test), y_test))
# features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Cabin", "Embarked", "Ticket", "Fare"]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Ticket", "Fare", "Embarked", "Age"]
# X = train_data.copy()
# y = X.pop("Survived")
# X = X[features]
# # Label encoding for categoricals
# for colname in X.select_dtypes("object"):
# X[colname], _ = X[colname].factorize()
# # All discrete features should now have integer dtypes (double-check this before using MI!)
# discrete_features = X.dtypes == int
# X.nunique()
# from sklearn.feature_selection import mutual_info_classif
# def make_mi_scores(X, y, discrete_features):
# mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
# mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
# mi_scores = mi_scores.sort_values(ascending=False)
# return mi_scores
# mi_scores = make_mi_scores(X, y, discrete_features)
# mi_scores # show a few features with their MI scores
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090488.ipynb
| null | null |
[{"Id": 69090488, "ScriptId": 18802922, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7890865, "CreationDate": "07/26/2021 18:18:34", "VersionNumber": 7.0, "Title": "feature_eng1", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 201.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 140.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("seaborn-whitegrid")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, confusion_matrix, classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import OneHotEncoder
from sklearn.cluster import KMeans
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
test_data.head()
def substrings_in_string(big_string, substrings):
if type(big_string) == pd._libs.missing.NAType:
return "Unknown"
for substring in substrings:
if big_string.find(substring) != -1:
return substring
return np.nan
title_list = [
"Mrs",
"Mr",
"Master",
"Miss",
"Major",
"Rev",
"Dr",
"Ms",
"Mlle",
"Col",
"Capt",
"Mme",
"Countess",
"Don",
"Jonkheer",
]
train_data["Title"] = train_data["Name"].map(
lambda x: substrings_in_string(x, title_list)
)
test_data["Title"] = test_data["Name"].map(
lambda x: substrings_in_string(x, title_list)
)
def replace_titles(x):
title = x["Title"]
if title in ["Don", "Major", "Capt", "Jonkheer", "Rev", "Col"]:
return "Mr"
elif title in ["Countess", "Mme"]:
return "Mrs"
elif title in ["Mlle", "Ms"]:
return "Miss"
elif title == "Dr":
if x["Sex"] == "Male":
return "Mr"
else:
return "Mrs"
else:
return title
train_data["Title"] = train_data.apply(replace_titles, axis=1)
test_data["Title"] = test_data.apply(replace_titles, axis=1)
cabin_list = ["A", "B", "C", "D", "E", "F", "T", "G", "Unknown"]
train_data["Deck"] = (
train_data["Cabin"]
.astype("string")
.map(lambda x: substrings_in_string(x, cabin_list))
)
test_data["Deck"] = (
test_data["Cabin"]
.astype("string")
.map(lambda x: substrings_in_string(x, cabin_list))
)
train_data.isnull().sum()
test_data.isnull().sum()
# train_data["AverageAge"] = train_data.groupby("Title")["Age"].transform("mean")
# avgAge_df=train_data[["Title", "AverageAge"]].drop_duplicates()
# train_data["Age2"]=train_data["Age"]
train_data["Age"] = (
train_data[["Title", "Age"]]
.groupby("Title")
.transform(lambda group: group.fillna(group.mean()))
)
test_data["Age"] = test_data["Age"].fillna(0)
title_dict = {"Mr": 0, "Mrs": 1, "Miss": 2, "Master": 4, "Unknown": 5}
train_data["Title"] = train_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
test_data["Title"] = test_data["Title"].apply(
lambda x: title_dict[x] if (x in title_dict.keys()) else 5
)
deck_dict = {
"A": 0,
"B": 1,
"C": 2,
"D": 3,
"E": 4,
"F": 5,
"T": 6,
"G": 7,
"Unknown": 8,
}
train_data["Deck"] = train_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
test_data["Deck"] = test_data["Deck"].apply(
lambda x: deck_dict[x] if (x in deck_dict.keys()) else 8
)
sex_dict = {"male": 0, "female": 1}
train_data["Sex"] = train_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
test_data["Sex"] = test_data["Sex"].apply(
lambda x: sex_dict[x] if (x in sex_dict.keys()) else 3
)
train_data["Relationships"] = train_data["SibSp"] + train_data["Parch"]
test_data["Relationships"] = test_data["SibSp"] + test_data["Parch"]
train_data["Age*Class"] = train_data["Age"] * train_data["Pclass"]
train_data["Age*Class"] = train_data["Age*Class"].fillna(0)
test_data["Age*Class"] = test_data["Age"] * test_data["Pclass"]
test_data["Age*Class"] = test_data["Age*Class"].fillna(0)
train_data["Sex*Class"] = train_data["Sex"] * train_data["Pclass"]
test_data["Sex*Class"] = test_data["Sex"] * test_data["Pclass"]
train_data["Fare_Per_Person"] = train_data["Fare"] / (train_data["Relationships"] + 1)
test_data["Fare_Per_Person"] = test_data["Fare"] / (test_data["Relationships"] + 1)
X_tmp = train_data.copy()
y_tmp = X_tmp.pop("Survived")
features_all = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Ticket",
"Fare",
"Cabin",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Fare", "Embarked", "Age"]
# features = ["Pclass", "Sex", "Fare", "Embarked", "Age", "Relationships"]
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"] #v4
# features = ["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title","Deck", "Age*Class", "Fare_Per_Person"]
features = [
"Pclass",
"Sex",
"Age",
"SibSp",
"Parch",
"Fare",
"Embarked",
"Relationships",
"Title",
"Deck",
"Age*Class",
"Sex*Class",
"Fare_Per_Person",
]
X_tmp = X_tmp[features]
X_train, X_test, y_train, y_test = train_test_split(
X_tmp, y_tmp, test_size=0.2, random_state=1, stratify=y_tmp
)
from sklearn.impute import SimpleImputer
# imputer_age = SimpleImputer(missing_values=np.nan, strategy='mean')
# imputer_age = imputer_age.fit(X_train[['Age']])
# X_train['Age'] = imputer_age.transform(X_train[['Age']])
# X_test['Age'] = imputer_age.transform(X_test[['Age']])
imputer_embarked = SimpleImputer(missing_values=np.nan, strategy="most_frequent")
imputer_embarked = imputer_embarked.fit(X_train[["Embarked"]])
X_train["Embarked"] = imputer_embarked.transform(X_train[["Embarked"]])
X_test["Embarked"] = imputer_embarked.transform(X_test[["Embarked"]])
# encoder_sex = LabelEncoder()
# X_train['Sex'] = encoder_sex.fit_transform(X_train['Sex'].values)
# X_test['Sex'] = encoder_sex.transform(X_test['Sex'].values)
encoder_embarked = LabelEncoder()
X_train["Embarked"] = encoder_embarked.fit_transform(X_train["Embarked"].values)
X_test["Embarked"] = encoder_embarked.transform(X_test["Embarked"].values)
# encoder_title = LabelEncoder()
# X_train['Title'] = encoder_title.fit_transform(X_train['Title'].values)
# X_test['Title'] = encoder_title.transform(X_test['Title'].values)
# Create cluster feature
# cluster_features=["Pclass", "Sex", "Embarked", "Age", "Relationships", "Title"]
# kmeans = KMeans(n_clusters=5)
# X_train["Cluster"] = kmeans.fit_predict(X_train[cluster_features])
# X_train["Cluster"] = X_train["Cluster"].astype("category")
# X_test["Cluster"] = kmeans.predict(X_test[cluster_features])
# X_test["Cluster"] = X_test["Cluster"].astype("category")
X_train
model = RandomForestClassifier(n_estimators=500, max_depth=5, random_state=1)
model.fit(X_train, y_train)
# predictions = model.predict(X_test)
print("Train Accuracy: ", accuracy_score(model.predict(X_train), y_train))
print("Test Accuracy: ", accuracy_score(model.predict(X_test), y_test))
# features = ["Pclass", "Sex", "SibSp", "Parch", "Age", "Cabin", "Embarked", "Ticket", "Fare"]
# features = ["Pclass", "Sex", "SibSp", "Parch", "Ticket", "Fare", "Embarked", "Age"]
# X = train_data.copy()
# y = X.pop("Survived")
# X = X[features]
# # Label encoding for categoricals
# for colname in X.select_dtypes("object"):
# X[colname], _ = X[colname].factorize()
# # All discrete features should now have integer dtypes (double-check this before using MI!)
# discrete_features = X.dtypes == int
# X.nunique()
# from sklearn.feature_selection import mutual_info_classif
# def make_mi_scores(X, y, discrete_features):
# mi_scores = mutual_info_classif(X, y, discrete_features=discrete_features)
# mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
# mi_scores = mi_scores.sort_values(ascending=False)
# return mi_scores
# mi_scores = make_mi_scores(X, y, discrete_features)
# mi_scores # show a few features with their MI scores
| false | 0 | 2,948 | 0 | 2,948 | 2,948 |
||
69090479
|
<jupyter_start><jupyter_text>Water Quality
# Context
`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`
# Content
The water_potability.csv file contains water quality metrics for 3276 different water bodies.
### 1. pH value:
```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ```
### 2. Hardness:
```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.
Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```
### 3. Solids (Total dissolved solids - TDS):
```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```
### 4. Chloramines:
```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```
### 5. Sulfate:
```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```
### 6. Conductivity:
```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ```
### 7. Organic_carbon:
```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```
### 8. Trihalomethanes:
```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```
### 9. Turbidity:
```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```
### 10. Potability:
```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```
Kaggle dataset identifier: water-potability
<jupyter_code>import pandas as pd
df = pd.read_csv('water-potability/water_potability.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<jupyter_text>Examples:
{
"ph": NaN,
"Hardness": 204.8904554713,
"Solids": 20791.318980747,
"Chloramines": 7.3002118732,
"Sulfate": 368.5164413498,
"Conductivity": 564.3086541722,
"Organic_carbon": 10.379783078100001,
"Trihalomethanes": 86.9909704615,
"Turbidity": 2.9631353806,
"Potability": 0.0
}
{
"ph": 3.7160800754,
"Hardness": 129.4229205149,
"Solids": 18630.0578579703,
"Chloramines": 6.6352458839,
"Sulfate": NaN,
"Conductivity": 592.8853591349,
"Organic_carbon": 15.1800131164,
"Trihalomethanes": 56.3290762845,
"Turbidity": 4.5006562749,
"Potability": 0.0
}
{
"ph": 8.0991241893,
"Hardness": 224.2362593936,
"Solids": 19909.5417322924,
"Chloramines": 9.2758836027,
"Sulfate": NaN,
"Conductivity": 418.6062130645,
"Organic_carbon": 16.8686369296,
"Trihalomethanes": 66.4200925118,
"Turbidity": 3.0559337497,
"Potability": 0.0
}
{
"ph": 8.3167658842,
"Hardness": 214.3733940856,
"Solids": 22018.4174407753,
"Chloramines": 8.0593323774,
"Sulfate": 356.8861356431,
"Conductivity": 363.2665161642,
"Organic_carbon": 18.4365244955,
"Trihalomethanes": 100.3416743651,
"Turbidity": 4.6287705368,
"Potability": 0.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import subpackage of Matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# import 'Seaborn'
import seaborn as sns
# to suppress warnings
from warnings import filterwarnings
filterwarnings("ignore")
# display all columns of the dataframe
pd.options.display.max_columns = None
# display all rows of the dataframe
pd.options.display.max_rows = None
# to display the float values upto 6 decimal places
pd.options.display.float_format = "{:.6f}".format
# import train-test split
from sklearn.model_selection import train_test_split
# import StandardScaler to perform scaling
from sklearn.preprocessing import StandardScaler
# import various functions from sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# import the XGBoost function for classification
from xgboost import XGBClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
df = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
df.head()
df.shape
df.info()
df.describe()
# #### EDA
# missing values
df.isnull().sum()
# percentage of missing values
df.isnull().sum() / df.shape[0] * 100
sns.heatmap(df.isnull(), cbar=False)
plt.rcParams["figure.figsize"] = [12, 12]
df.hist()
plt.show()
j = []
skew = []
kurtosis = []
for i in df.columns[:9]:
j.append(i)
skew.append(df[i].skew())
kurtosis.append(df[i].kurt())
skew_kurtosis = pd.DataFrame({"column name": j, "skew": skew, "kurtosis": kurtosis})
skew_kurtosis
k = 1
plt.figure(figsize=(30, 30))
for i in df.columns[:9]:
plt.subplot(5, 3, k)
sns.distplot(df[i])
k += 1
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[:9]:
if df[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=df[i])
k += 1
# since we have outliers we use median
# As the median value is not affected by the presence of outliers, replace the null values in the variables by median
df["ph"] = df["ph"].fillna(df["ph"].median())
df["Sulfate"] = df["Sulfate"].fillna(df["Sulfate"].median())
df["Trihalomethanes"] = df["Trihalomethanes"].fillna(df["Trihalomethanes"].median())
df.isnull().sum()
df.describe()
plt.rcParams["figure.figsize"] = [12, 12]
df.hist()
plt.show()
df.nunique()
print(df["Potability"].value_counts())
sns.pairplot(df)
plt.figure(figsize=(10, 5))
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
# harness and pH has high correlation
# sulfate and solids has low correlation
X = df.drop("Potability", axis=1)
y = df["Potability"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=20, test_size=0.3
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #### Logistic regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
print(classification_report(y_test, y_pred_lr))
y_act = y_test # True o/p
y_pred = y_pred_lr # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Decision Tree
dt = DecisionTreeClassifier(random_state=1)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
print(classification_report(y_test, y_pred_dt))
y_act = y_test # True o/p
y_pred = y_pred_dt # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Random forest
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)
print(classification_report(y_test, y_pred_rf))
y_act = y_test # True o/p
y_pred = y_pred_rf # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### XG BOOST
xgb = XGBClassifier(random_state=1)
xgb.fit(X_train, y_train)
y_pred_xgb = xgb.predict(X_test)
print(classification_report(y_test, y_pred_xgb))
y_act = y_test # True o/p
y_pred = y_pred_xgb # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### ADA BOOST
ada = AdaBoostClassifier(random_state=1)
ada.fit(X_train, y_train)
y_pred_ada = ada.predict(X_test)
print(classification_report(y_test, y_pred_ada))
y_act = y_test # True o/p
y_pred = y_pred_ada # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### GRADIENT BOOSTING
gra = GradientBoostingClassifier(random_state=1)
gra.fit(X_train, y_train)
y_pred_gra = gra.predict(X_test)
print(classification_report(y_test, y_pred_gra))
y_act = y_test # True o/p
y_pred = y_pred_gra # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# Random forest ranks first with accuracy of 0.6673448626653102
# lets work on the outliers treatment and very we go with models
def outlier_treatment(column):
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
ul = q3 + 1.5 * iqr
ll = q1 - 1.5 * iqr
return ll, ul
for i in df.columns[:9]:
w_limit = outlier_treatment(i)
df = df[~((df[i] < w_limit[0]) | (df[i] > w_limit[1]))]
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[:9]:
if df[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=df[i])
k += 1
df.shape
df.describe()
X = df.drop("Potability", axis=1)
y = df["Potability"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=20, test_size=0.3
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #### Logistic regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
print(classification_report(y_test, y_pred_lr))
y_act = y_test # True o/p
y_pred = y_pred_lr # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Decision Tree
dt = DecisionTreeClassifier(random_state=1)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
print(classification_report(y_test, y_pred_dt))
y_act = y_test # True o/p
y_pred = y_pred_dt # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Random forest
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)
print(classification_report(y_test, y_pred_rf))
y_act = y_test # True o/p
y_pred = y_pred_rf # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### XG BOOST
xgb = XGBClassifier(random_state=1)
xgb.fit(X_train, y_train)
y_pred_xgb = xgb.predict(X_test)
print(classification_report(y_test, y_pred_xgb))
y_act = y_test # True o/p
y_pred = y_pred_xgb # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### ADA BOOST
ada = AdaBoostClassifier(random_state=1)
ada.fit(X_train, y_train)
y_pred_ada = ada.predict(X_test)
print(classification_report(y_test, y_pred_ada))
y_act = y_test # True o/p
y_pred = y_pred_ada # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### GRADIENT BOOSTING
gra = GradientBoostingClassifier(random_state=1)
gra.fit(X_train, y_train)
y_pred_gra = gra.predict(X_test)
print(classification_report(y_test, y_pred_gra))
y_act = y_test # True o/p
y_pred = y_pred_gra # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# again random forest has accuracy of 0.6353383458646616
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090479.ipynb
|
water-potability
|
adityakadiwal
|
[{"Id": 69090479, "ScriptId": 18856681, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1214231, "CreationDate": "07/26/2021 18:18:24", "VersionNumber": 1.0, "Title": "EDA process and ML models", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 466.0, "LinesInsertedFromPrevious": 466.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 91862764, "KernelVersionId": 69090479, "SourceDatasetVersionId": 2157486}]
|
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
|
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# import subpackage of Matplotlib
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# import 'Seaborn'
import seaborn as sns
# to suppress warnings
from warnings import filterwarnings
filterwarnings("ignore")
# display all columns of the dataframe
pd.options.display.max_columns = None
# display all rows of the dataframe
pd.options.display.max_rows = None
# to display the float values upto 6 decimal places
pd.options.display.float_format = "{:.6f}".format
# import train-test split
from sklearn.model_selection import train_test_split
# import StandardScaler to perform scaling
from sklearn.preprocessing import StandardScaler
# import various functions from sklearn
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import StackingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
# import the XGBoost function for classification
from xgboost import XGBClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import KFold
df = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
df.head()
df.shape
df.info()
df.describe()
# #### EDA
# missing values
df.isnull().sum()
# percentage of missing values
df.isnull().sum() / df.shape[0] * 100
sns.heatmap(df.isnull(), cbar=False)
plt.rcParams["figure.figsize"] = [12, 12]
df.hist()
plt.show()
j = []
skew = []
kurtosis = []
for i in df.columns[:9]:
j.append(i)
skew.append(df[i].skew())
kurtosis.append(df[i].kurt())
skew_kurtosis = pd.DataFrame({"column name": j, "skew": skew, "kurtosis": kurtosis})
skew_kurtosis
k = 1
plt.figure(figsize=(30, 30))
for i in df.columns[:9]:
plt.subplot(5, 3, k)
sns.distplot(df[i])
k += 1
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[:9]:
if df[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=df[i])
k += 1
# since we have outliers we use median
# As the median value is not affected by the presence of outliers, replace the null values in the variables by median
df["ph"] = df["ph"].fillna(df["ph"].median())
df["Sulfate"] = df["Sulfate"].fillna(df["Sulfate"].median())
df["Trihalomethanes"] = df["Trihalomethanes"].fillna(df["Trihalomethanes"].median())
df.isnull().sum()
df.describe()
plt.rcParams["figure.figsize"] = [12, 12]
df.hist()
plt.show()
df.nunique()
print(df["Potability"].value_counts())
sns.pairplot(df)
plt.figure(figsize=(10, 5))
sns.heatmap(df.corr(), annot=True, cmap="YlGnBu")
# harness and pH has high correlation
# sulfate and solids has low correlation
X = df.drop("Potability", axis=1)
y = df["Potability"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=20, test_size=0.3
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #### Logistic regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
print(classification_report(y_test, y_pred_lr))
y_act = y_test # True o/p
y_pred = y_pred_lr # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Decision Tree
dt = DecisionTreeClassifier(random_state=1)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
print(classification_report(y_test, y_pred_dt))
y_act = y_test # True o/p
y_pred = y_pred_dt # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Random forest
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)
print(classification_report(y_test, y_pred_rf))
y_act = y_test # True o/p
y_pred = y_pred_rf # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### XG BOOST
xgb = XGBClassifier(random_state=1)
xgb.fit(X_train, y_train)
y_pred_xgb = xgb.predict(X_test)
print(classification_report(y_test, y_pred_xgb))
y_act = y_test # True o/p
y_pred = y_pred_xgb # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### ADA BOOST
ada = AdaBoostClassifier(random_state=1)
ada.fit(X_train, y_train)
y_pred_ada = ada.predict(X_test)
print(classification_report(y_test, y_pred_ada))
y_act = y_test # True o/p
y_pred = y_pred_ada # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### GRADIENT BOOSTING
gra = GradientBoostingClassifier(random_state=1)
gra.fit(X_train, y_train)
y_pred_gra = gra.predict(X_test)
print(classification_report(y_test, y_pred_gra))
y_act = y_test # True o/p
y_pred = y_pred_gra # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# Random forest ranks first with accuracy of 0.6673448626653102
# lets work on the outliers treatment and very we go with models
def outlier_treatment(column):
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
ul = q3 + 1.5 * iqr
ll = q1 - 1.5 * iqr
return ll, ul
for i in df.columns[:9]:
w_limit = outlier_treatment(i)
df = df[~((df[i] < w_limit[0]) | (df[i] > w_limit[1]))]
k = 1
plt.figure(figsize=(20, 20))
for i in df.columns[:9]:
if df[i].dtypes != "object":
plt.subplot(4, 4, k)
sns.boxplot(x=df[i])
k += 1
df.shape
df.describe()
X = df.drop("Potability", axis=1)
y = df["Potability"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=20, test_size=0.3
)
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #### Logistic regression
lr = LogisticRegression()
lr.fit(X_train, y_train)
y_pred_lr = lr.predict(X_test)
print(classification_report(y_test, y_pred_lr))
y_act = y_test # True o/p
y_pred = y_pred_lr # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Decision Tree
dt = DecisionTreeClassifier(random_state=1)
dt.fit(X_train, y_train)
y_pred_dt = dt.predict(X_test)
print(classification_report(y_test, y_pred_dt))
y_act = y_test # True o/p
y_pred = y_pred_dt # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### Random forest
rf = RandomForestClassifier(random_state=1)
rf.fit(X_train, y_train)
y_pred_rf = rf.predict(X_test)
print(classification_report(y_test, y_pred_rf))
y_act = y_test # True o/p
y_pred = y_pred_rf # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### XG BOOST
xgb = XGBClassifier(random_state=1)
xgb.fit(X_train, y_train)
y_pred_xgb = xgb.predict(X_test)
print(classification_report(y_test, y_pred_xgb))
y_act = y_test # True o/p
y_pred = y_pred_xgb # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### ADA BOOST
ada = AdaBoostClassifier(random_state=1)
ada.fit(X_train, y_train)
y_pred_ada = ada.predict(X_test)
print(classification_report(y_test, y_pred_ada))
y_act = y_test # True o/p
y_pred = y_pred_ada # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# #### GRADIENT BOOSTING
gra = GradientBoostingClassifier(random_state=1)
gra.fit(X_train, y_train)
y_pred_gra = gra.predict(X_test)
print(classification_report(y_test, y_pred_gra))
y_act = y_test # True o/p
y_pred = y_pred_gra # model o/p
sns.heatmap(confusion_matrix(y_act, y_pred), annot=True, linewidth=0.1)
cnf_mat = confusion_matrix(y_act, y_pred)
tn = cnf_mat[0, 0]
tp = cnf_mat[1, 1]
fp = cnf_mat[0, 1]
fn = cnf_mat[1, 0]
tn, tp, fp, fn
acc = (tn + tp) / (tn + tp + fn + fp)
acc
# again random forest has accuracy of 0.6353383458646616
|
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>water-potability/water_potability.csv:
<column_names>
['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability']
<column_types>
{'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'}
<dataframe_Summary>
{'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<some_examples>
{'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 4,221 | 1 | 6,498 | 4,221 |
69090604
|
<jupyter_start><jupyter_text>Yelp Dataset
## Context
This dataset is a subset of Yelp's businesses, reviews, and user data. It was originally put together for the Yelp Dataset Challenge which is a chance for students to conduct research or analysis on Yelp's data and share their discoveries. In the most recent dataset you'll find information about businesses across 8 metropolitan areas in the USA and Canada.
## Content<br>
This dataset contains five JSON files and the user agreement.
More information about those files can be found [here](https://www.yelp.com/dataset).
## Code snippet to read the files
in Python, you can read the JSON files like this (using the json and pandas libraries):
```
import json
import pandas as pd
data_file = open("yelp_academic_dataset_checkin.json")
data = []
for line in data_file:
data.append(json.loads(line))
checkin_df = pd.DataFrame(data)
data_file.close()
```
Kaggle dataset identifier: yelp-dataset
<jupyter_script># # **Riconoscimento automatico di una review positiva o negativa**
# La seguente implementazione consistene in un classificatore in grado di distinguere recensioni di attività commerciali come positiva o negativo. Per questo, è stato utilizzato il dataset "Yelp Open Dataset". Inizialmente utilizzeremo un set di dati molto contenuto con 30k di recensioni.
# Le etichette sono bilanciate tra le due classi (positiva e negativa). Le recensioni con un punteggio 3 sono etichettate come positive (valore 1). Le recensioni neutre non sono incluse nei dati etichettati.
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import json
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Pretrattamento dei dati
# Dopo che il set di dati è stato scaricato ed estratto dall'archivio, è stato trasformato in una forma più adatta per alimentarlo in un modello di apprendimento automatico per la formazione. Inizieremo combinando tutti i dati delle revisioni in 2 data frame *pandas* che rappresentano i set di dati del train (80%) e del test (20%): **rewiews_train** e **rewiews_test**.
# Nei Data Frame è stata aggiunta una nuova colonna "**label**" dove le recensioni positive (star > 3) avranno valore "1" e le recensioni negative (star >= 3) avranno valore "0".
from sklearn.model_selection import train_test_split
REVIEWS_LIMIT = 3000
def load_rows(filepath, nrows=None, func=None) -> pd.DataFrame:
with open(filepath) as json_file:
count = 0
objs = []
line = json_file.readline()
while (nrows is None or count < nrows) and line:
count += 1
obj = json.loads(line)
if func != None:
func(obj)
objs.append(obj)
line = json_file.readline()
return pd.DataFrame(objs)
# Aggiunge la classe della recensione
def add_sentiment(obj):
obj["label"] = 1 if obj["stars"] > 3 else 0
reviews = load_rows(
"../input/yelp-dataset/yelp_academic_dataset_review.json",
REVIEWS_LIMIT,
add_sentiment,
)
print("Review objects loaded. Count = {}".format(reviews.shape[0]))
# 80% train, 20% test
reviews_train, reviews_test = train_test_split(reviews, test_size=0.2)
# Solo text, label
reviews_train = reviews_train[["text", "label"]]
reviews_test = reviews_test[["text", "label"]]
display(reviews_train.head(2))
display(reviews_test.head(2))
# with pd.option_context('display.max_colwidth', None):
# display(reviews_train)
# # Valutazione dei dati
# Per avere chiaro il bilanciamento tra recensioni positive o negative, è necessario effettuare una valutazione dei dati. Come si può vedere dal diagramma a torta, le recensioni sono in maggior misura positive. Negli step successivi, provvederemo a bilanciare il dataset al fine di otterenere una equa distribuzione dei dati
# per tutte le classi, in modo tale che gli errori provenienti dalle diverse classi,
# ovvero dalla classe maggioritaria e dalla classe minoritaria, abbiano lo stesso peso.
display(reviews_train.head(2))
reviews.stars.value_counts().plot(kind="pie", autopct="%1.0f%%")
import string
import nltk
from IPython.display import display
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from string import punctuation
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
words = set(stopwords.words("english") + list(punctuation))
def remove_punc_stopword(text):
tokens = word_tokenize(text)
out = []
for word in tokens:
lower = word.lower()
if lower not in words and lower.isalpha():
out.append(lower)
return out
reviews_train["text"] = reviews_train["text"].apply(remove_punc_stopword)
reviews_train.head(2)
# with pd.option_context('display.max_colwidth', None):
# display(reviews_train)
# # Pretattamento del testo
# Gli algoritmi di apprendimento automatico funzionano solo con valori numerici. Infatti, è necessario rappresentare il testo con numeri o vettori di numeri. Un modo per farlo è utilizzare il modello **Bag-of-words**, in cui un pezzo di testo (documento) è rappresentato da un vettore dei conteggi delle parole di un vocabolario in quel documento. Questo modello non tiene conto delle regole grammaticali o dell'ordinamento delle parole; tutto ciò che considera è la frequenza delle parole.
# * Con l'utilizzo del **bigramma** si prendono in considerazione i conteggi di ogni combinazione di *n* parole del vocabolario che appare in un dato documento.
# * Con l'utilizzo della **Term Frequency, Inverse Document Frequency** (TF-IDF) possiamo ottenere risultati leggermente migliori del conteggio delle parole. TF-IDF misura l'importanza di una particolare parola rispetto a un documento e all'intero corpus.
# ***Frequenza del termine***
# La frequenza del termine è la misura dei conteggi di ogni parola in un documento rispetto a tutte le parole nello stesso documento.
# TF(w) = (numero di volte in cui la parola w appare in un documento) / (numero totale di parole nel documento)
# ***Frequenza documento inversa***
# IDF è una misura dell'importanza di una parola, prendendo in considerazione la frequenza della parola in tutto il corpus.
# Misura quanto sia importante una parola per il corpus.
# IDF(w) = log(numero totale di documenti / numero di documenti con w dentro)
#
# Infine, per calcolare TF-IDF, moltiplichiamo questi due fattori: TF e IDF.
# **TF-IDF(w) = TF(w) x IDF(w)**
# # Vettorizzazione del testo
# Per la vettorizzazione del testo saranno utilizzate le classi Scikit-Learn (CountVectorizer e TfidfTransformer).
# Utilizzeremo queste classi per trasformare i nostri file in matrici bigram (utilizzando sia i conteggi che i valori tf-idf). Ogni riga nelle matrici rappresenterà un documento (recensione) nel nostro set di dati e ogni colonna rappresenterà i valori associata ad ogni combinazione di massimo 2 parole del vocabolario (bigrammi).
# **CountVectorizer** ha un parametro *ngram_rangeche* prevede una tupla di dimensione 2 che controlla quali n-grammi includere.
# Dopo aver costruito un oggetto CountVectorizer, dovremmo chiamare il metodoto .fit() con il testo effettivo come parametro, in modo che possa apprendere le statistiche richieste della nostra raccolta di documenti.
# Quindi, chiamando il metodo .transform() con la nostra raccolta di documenti, restituisce la matrice per l'intervallo di n-grammi specificato.
# Come suggerisce il nome della classe, questa matrice conterrà solo i conteggi. Per ottenere i valori tf-idf, si dovrà utilizzare la classe **TfidfTransformer**. Possiede i metodi .fit() e .transform() che vengono usati in modo simile a quelli della CountVectorizer, ma prendono come input la matrice dei conteggi ottenuta nel passaggio precedente e restituiranno una matrice con valori tf-idf.
# Dovremmo usare.fit() solo sui dati di addestramento; Quando vogliamo valutare il punteggio del test o ogni volta che vogliamo fare una previsione, dovremmo usare questi oggetti per trasformare i dati prima di inserirli nel nostro classificatore.
# Text vectorization
# Bigram Counts
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from joblib import dump, load # used for saving and loading sklearn objects
bigram_vectorizer = CountVectorizer(ngram_range=(1, 2))
bigram_vectorizer.fit(reviews_train["text"].values)
X_train_bigram = bigram_vectorizer.transform(reviews_train["text"].values)
# Bigram Tf-Idf
bigram_tf_idf_transformer = TfidfTransformer()
bigram_tf_idf_transformer.fit(X_train_bigram)
X_train_bigram_tf_idf = bigram_tf_idf_transformer.transform(X_train_bigram)
y_train = reviews_train["label"].values
# # Classificatore SGDClassifier
# Come modello utilizzeremo un classificatore lineare con discesa del gradiente stocastico (**SGDClassifier**).
# Per prima cosa generiamo i nostri dati in 2 forme: mnatrice di bigrammi con entrambi i conteggi e i valori tf-idf per ciascuno.
# Quindi addestreremo e valuteremo il nostro modello per ciascuna di queste 2 rappresentazioni di dati utilizzando SGDClassifier.
# Successivamente, scegliamo la rappresentazione dei dati che ha portato al punteggio migliore e ottimizzeremo gli iperparametri del nostro modello con questo modulo di dati utilizzando la convalida incrociata per ottenere i migliori risultati.
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
import numpy as np
def train_and_show_scores(X: csr_matrix, y: np.array, title: str) -> None:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.75, stratify=y
)
clf = SGDClassifier(class_weight="balanced")
clf.fit(X_train, y_train)
print(X_train.shape)
train_score = clf.score(X_train, y_train)
valid_score = clf.score(X_valid, y_valid)
print(
f"{title}\nTrain score: {round(train_score, 2)} ; Validation score: {round(valid_score, 2)}\n"
)
train_and_show_scores(X_train_bigram, y_train, "Bigram Counts")
train_and_show_scores(X_train_bigram_tf_idf, y_train, "Bigram Tf-Idf")
# # Grid search cross validation
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from scipy.stats import uniform
X_train = X_train_bigram_tf_idf
# X_train = X_train_bigram
# loss, learning rate, initial learning rate, penalty and alpha
clf = SGDClassifier(random_state=0)
distributions = dict(
loss=["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
learning_rate=["optimal", "invscaling", "adaptive"],
eta0=uniform(loc=1e-7, scale=1e-2),
penalty=["l1", "l2", "elasticnet"],
alpha=uniform(loc=1e-6, scale=1e-4),
)
random_search_cv = RandomizedSearchCV(
estimator=clf, param_distributions=distributions, cv=5, n_iter=10, verbose=3
)
# params = [{
# loss: ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
# learning_rate: ['optimal', 'invscaling', 'adaptive'],
# eta0: uniform(loc=1e-7, scale=1e-2),
# penalty: ['l1', 'l2', 'elasticnet'],
# alpha: uniform(loc=1e-6, scale=1e-4)
# }]
params = {
"loss": ["hinge", "log", "squared_hinge", "modified_huber"],
"alpha": [0.0001, 0.001, 0.01, 0.1],
"penalty": ["l2", "l1", "none"],
}
grid_search_cv = GridSearchCV(
estimator=clf, cv=5, param_grid=params, scoring="accuracy", verbose=3
)
# random_search_cv.fit(X_train, y_train)
grid_search_cv.fit(X_train, y_train)
print(f"Best params: {grid_search_cv.best_params_}")
print(f"Best score: {grid_search_cv.best_score_}")
# # Salvataggio del classificatore con iperparametri migliori
sgd_classifier = grid_search_cv.best_estimator_
dump(grid_search_cv.best_estimator_, "classifiers/sgd_classifier.joblib")
# sgd_classifier = load('classifiers/sgd_classifier.joblib')
# # Test
from sklearn.metrics import plot_precision_recall_curve
X_test = bigram_vectorizer.transform(reviews_test["text"].values)
X_test = bigram_tf_idf_transformer.transform(X_test)
y_test = reviews_test["label"].values
score = sgd_classifier.score(X_test, y_test)
# from sklearn.metrics import precision_recall_fscore_support as score
# precision, recall, fscore, support = score(y_test, predicted)
# print('precision: {}'.format(precision))
# print('recall: {}'.format(recall))
# print('fscore: {}'.format(fscore))
# print('support: {}'.format(support))
print(score)
disp = plot_precision_recall_curve(sgd_classifier, X_test, y_test)
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
import numpy as np
def train_and_show_scores(X: csr_matrix, y: np.array, title: str) -> None:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.75, stratify=y
)
clf = RandomForestClassifier(class_weight="balanced", n_estimators=100)
clf.fit(X_train, y_train)
print(X_train.shape)
train_score = clf.score(X_train, y_train)
valid_score = clf.score(X_valid, y_valid)
print(
f"{title}\nTrain score: {round(train_score, 2)} ; Validation score: {round(valid_score, 2)}\n"
)
y_train = reviews["label"].values
train_and_show_scores(X_train_bigram, y_train, "Bigram Counts")
train_and_show_scores(X_train_bigram_tf_idf, y_train, "Bigram Tf-Idf")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090604.ipynb
|
yelp-dataset
| null |
[{"Id": 69090604, "ScriptId": 18421858, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7853574, "CreationDate": "07/26/2021 18:20:47", "VersionNumber": 14.0, "Title": "Yelp - NLP", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 299.0, "LinesInsertedFromPrevious": 57.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 242.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91862966, "KernelVersionId": 69090604, "SourceDatasetVersionId": 1989618}]
|
[{"Id": 1989618, "DatasetId": 10100, "DatasourceVersionId": 2028839, "CreatorUserId": 305758, "LicenseName": "Other (specified in description)", "CreationDate": "03/02/2021 23:11:59", "VersionNumber": 3.0, "Title": "Yelp Dataset", "Slug": "yelp-dataset", "Subtitle": "A trove of reviews, businesses, users, tips, and check-in data!", "Description": "## Context\n\nThis dataset is a subset of Yelp's businesses, reviews, and user data. It was originally put together for the Yelp Dataset Challenge which is a chance for students to conduct research or analysis on Yelp's data and share their discoveries. In the most recent dataset you'll find information about businesses across 8 metropolitan areas in the USA and Canada. \n\n## Content<br>\nThis dataset contains five JSON files and the user agreement.\nMore information about those files can be found [here](https://www.yelp.com/dataset).\n\n## Code snippet to read the files\n\nin Python, you can read the JSON files like this (using the json and pandas libraries):\n\n```\nimport json\nimport pandas as pd\ndata_file = open(\"yelp_academic_dataset_checkin.json\")\ndata = []\nfor line in data_file:\n data.append(json.loads(line))\ncheckin_df = pd.DataFrame(data)\ndata_file.close()\n\n```", "VersionNotes": "we changed the metropolitan areas in this new version", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 10100, "CreatorUserId": 484516, "OwnerUserId": NaN, "OwnerOrganizationId": 1029.0, "CurrentDatasetVersionId": 3316532.0, "CurrentDatasourceVersionId": 3367434.0, "ForumId": 17447, "Type": 2, "CreationDate": "01/17/2018 17:27:37", "LastActivityDate": "02/06/2018", "TotalViews": 757633, "TotalDownloads": 114046, "TotalVotes": 1577, "TotalKernels": 237}]
| null |
# # **Riconoscimento automatico di una review positiva o negativa**
# La seguente implementazione consistene in un classificatore in grado di distinguere recensioni di attività commerciali come positiva o negativo. Per questo, è stato utilizzato il dataset "Yelp Open Dataset". Inizialmente utilizzeremo un set di dati molto contenuto con 30k di recensioni.
# Le etichette sono bilanciate tra le due classi (positiva e negativa). Le recensioni con un punteggio 3 sono etichettate come positive (valore 1). Le recensioni neutre non sono incluse nei dati etichettati.
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import json
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# # Pretrattamento dei dati
# Dopo che il set di dati è stato scaricato ed estratto dall'archivio, è stato trasformato in una forma più adatta per alimentarlo in un modello di apprendimento automatico per la formazione. Inizieremo combinando tutti i dati delle revisioni in 2 data frame *pandas* che rappresentano i set di dati del train (80%) e del test (20%): **rewiews_train** e **rewiews_test**.
# Nei Data Frame è stata aggiunta una nuova colonna "**label**" dove le recensioni positive (star > 3) avranno valore "1" e le recensioni negative (star >= 3) avranno valore "0".
from sklearn.model_selection import train_test_split
REVIEWS_LIMIT = 3000
def load_rows(filepath, nrows=None, func=None) -> pd.DataFrame:
with open(filepath) as json_file:
count = 0
objs = []
line = json_file.readline()
while (nrows is None or count < nrows) and line:
count += 1
obj = json.loads(line)
if func != None:
func(obj)
objs.append(obj)
line = json_file.readline()
return pd.DataFrame(objs)
# Aggiunge la classe della recensione
def add_sentiment(obj):
obj["label"] = 1 if obj["stars"] > 3 else 0
reviews = load_rows(
"../input/yelp-dataset/yelp_academic_dataset_review.json",
REVIEWS_LIMIT,
add_sentiment,
)
print("Review objects loaded. Count = {}".format(reviews.shape[0]))
# 80% train, 20% test
reviews_train, reviews_test = train_test_split(reviews, test_size=0.2)
# Solo text, label
reviews_train = reviews_train[["text", "label"]]
reviews_test = reviews_test[["text", "label"]]
display(reviews_train.head(2))
display(reviews_test.head(2))
# with pd.option_context('display.max_colwidth', None):
# display(reviews_train)
# # Valutazione dei dati
# Per avere chiaro il bilanciamento tra recensioni positive o negative, è necessario effettuare una valutazione dei dati. Come si può vedere dal diagramma a torta, le recensioni sono in maggior misura positive. Negli step successivi, provvederemo a bilanciare il dataset al fine di otterenere una equa distribuzione dei dati
# per tutte le classi, in modo tale che gli errori provenienti dalle diverse classi,
# ovvero dalla classe maggioritaria e dalla classe minoritaria, abbiano lo stesso peso.
display(reviews_train.head(2))
reviews.stars.value_counts().plot(kind="pie", autopct="%1.0f%%")
import string
import nltk
from IPython.display import display
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from string import punctuation
from nltk.stem.porter import PorterStemmer
porter = PorterStemmer()
words = set(stopwords.words("english") + list(punctuation))
def remove_punc_stopword(text):
tokens = word_tokenize(text)
out = []
for word in tokens:
lower = word.lower()
if lower not in words and lower.isalpha():
out.append(lower)
return out
reviews_train["text"] = reviews_train["text"].apply(remove_punc_stopword)
reviews_train.head(2)
# with pd.option_context('display.max_colwidth', None):
# display(reviews_train)
# # Pretattamento del testo
# Gli algoritmi di apprendimento automatico funzionano solo con valori numerici. Infatti, è necessario rappresentare il testo con numeri o vettori di numeri. Un modo per farlo è utilizzare il modello **Bag-of-words**, in cui un pezzo di testo (documento) è rappresentato da un vettore dei conteggi delle parole di un vocabolario in quel documento. Questo modello non tiene conto delle regole grammaticali o dell'ordinamento delle parole; tutto ciò che considera è la frequenza delle parole.
# * Con l'utilizzo del **bigramma** si prendono in considerazione i conteggi di ogni combinazione di *n* parole del vocabolario che appare in un dato documento.
# * Con l'utilizzo della **Term Frequency, Inverse Document Frequency** (TF-IDF) possiamo ottenere risultati leggermente migliori del conteggio delle parole. TF-IDF misura l'importanza di una particolare parola rispetto a un documento e all'intero corpus.
# ***Frequenza del termine***
# La frequenza del termine è la misura dei conteggi di ogni parola in un documento rispetto a tutte le parole nello stesso documento.
# TF(w) = (numero di volte in cui la parola w appare in un documento) / (numero totale di parole nel documento)
# ***Frequenza documento inversa***
# IDF è una misura dell'importanza di una parola, prendendo in considerazione la frequenza della parola in tutto il corpus.
# Misura quanto sia importante una parola per il corpus.
# IDF(w) = log(numero totale di documenti / numero di documenti con w dentro)
#
# Infine, per calcolare TF-IDF, moltiplichiamo questi due fattori: TF e IDF.
# **TF-IDF(w) = TF(w) x IDF(w)**
# # Vettorizzazione del testo
# Per la vettorizzazione del testo saranno utilizzate le classi Scikit-Learn (CountVectorizer e TfidfTransformer).
# Utilizzeremo queste classi per trasformare i nostri file in matrici bigram (utilizzando sia i conteggi che i valori tf-idf). Ogni riga nelle matrici rappresenterà un documento (recensione) nel nostro set di dati e ogni colonna rappresenterà i valori associata ad ogni combinazione di massimo 2 parole del vocabolario (bigrammi).
# **CountVectorizer** ha un parametro *ngram_rangeche* prevede una tupla di dimensione 2 che controlla quali n-grammi includere.
# Dopo aver costruito un oggetto CountVectorizer, dovremmo chiamare il metodoto .fit() con il testo effettivo come parametro, in modo che possa apprendere le statistiche richieste della nostra raccolta di documenti.
# Quindi, chiamando il metodo .transform() con la nostra raccolta di documenti, restituisce la matrice per l'intervallo di n-grammi specificato.
# Come suggerisce il nome della classe, questa matrice conterrà solo i conteggi. Per ottenere i valori tf-idf, si dovrà utilizzare la classe **TfidfTransformer**. Possiede i metodi .fit() e .transform() che vengono usati in modo simile a quelli della CountVectorizer, ma prendono come input la matrice dei conteggi ottenuta nel passaggio precedente e restituiranno una matrice con valori tf-idf.
# Dovremmo usare.fit() solo sui dati di addestramento; Quando vogliamo valutare il punteggio del test o ogni volta che vogliamo fare una previsione, dovremmo usare questi oggetti per trasformare i dati prima di inserirli nel nostro classificatore.
# Text vectorization
# Bigram Counts
from sklearn.feature_extraction.text import CountVectorizer, TfidfTransformer
from joblib import dump, load # used for saving and loading sklearn objects
bigram_vectorizer = CountVectorizer(ngram_range=(1, 2))
bigram_vectorizer.fit(reviews_train["text"].values)
X_train_bigram = bigram_vectorizer.transform(reviews_train["text"].values)
# Bigram Tf-Idf
bigram_tf_idf_transformer = TfidfTransformer()
bigram_tf_idf_transformer.fit(X_train_bigram)
X_train_bigram_tf_idf = bigram_tf_idf_transformer.transform(X_train_bigram)
y_train = reviews_train["label"].values
# # Classificatore SGDClassifier
# Come modello utilizzeremo un classificatore lineare con discesa del gradiente stocastico (**SGDClassifier**).
# Per prima cosa generiamo i nostri dati in 2 forme: mnatrice di bigrammi con entrambi i conteggi e i valori tf-idf per ciascuno.
# Quindi addestreremo e valuteremo il nostro modello per ciascuna di queste 2 rappresentazioni di dati utilizzando SGDClassifier.
# Successivamente, scegliamo la rappresentazione dei dati che ha portato al punteggio migliore e ottimizzeremo gli iperparametri del nostro modello con questo modulo di dati utilizzando la convalida incrociata per ottenere i migliori risultati.
from sklearn.linear_model import SGDClassifier
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
import numpy as np
def train_and_show_scores(X: csr_matrix, y: np.array, title: str) -> None:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.75, stratify=y
)
clf = SGDClassifier(class_weight="balanced")
clf.fit(X_train, y_train)
print(X_train.shape)
train_score = clf.score(X_train, y_train)
valid_score = clf.score(X_valid, y_valid)
print(
f"{title}\nTrain score: {round(train_score, 2)} ; Validation score: {round(valid_score, 2)}\n"
)
train_and_show_scores(X_train_bigram, y_train, "Bigram Counts")
train_and_show_scores(X_train_bigram_tf_idf, y_train, "Bigram Tf-Idf")
# # Grid search cross validation
from sklearn.model_selection import RandomizedSearchCV
from sklearn.model_selection import GridSearchCV
from scipy.stats import uniform
X_train = X_train_bigram_tf_idf
# X_train = X_train_bigram
# loss, learning rate, initial learning rate, penalty and alpha
clf = SGDClassifier(random_state=0)
distributions = dict(
loss=["hinge", "log", "modified_huber", "squared_hinge", "perceptron"],
learning_rate=["optimal", "invscaling", "adaptive"],
eta0=uniform(loc=1e-7, scale=1e-2),
penalty=["l1", "l2", "elasticnet"],
alpha=uniform(loc=1e-6, scale=1e-4),
)
random_search_cv = RandomizedSearchCV(
estimator=clf, param_distributions=distributions, cv=5, n_iter=10, verbose=3
)
# params = [{
# loss: ['hinge', 'log', 'modified_huber', 'squared_hinge', 'perceptron'],
# learning_rate: ['optimal', 'invscaling', 'adaptive'],
# eta0: uniform(loc=1e-7, scale=1e-2),
# penalty: ['l1', 'l2', 'elasticnet'],
# alpha: uniform(loc=1e-6, scale=1e-4)
# }]
params = {
"loss": ["hinge", "log", "squared_hinge", "modified_huber"],
"alpha": [0.0001, 0.001, 0.01, 0.1],
"penalty": ["l2", "l1", "none"],
}
grid_search_cv = GridSearchCV(
estimator=clf, cv=5, param_grid=params, scoring="accuracy", verbose=3
)
# random_search_cv.fit(X_train, y_train)
grid_search_cv.fit(X_train, y_train)
print(f"Best params: {grid_search_cv.best_params_}")
print(f"Best score: {grid_search_cv.best_score_}")
# # Salvataggio del classificatore con iperparametri migliori
sgd_classifier = grid_search_cv.best_estimator_
dump(grid_search_cv.best_estimator_, "classifiers/sgd_classifier.joblib")
# sgd_classifier = load('classifiers/sgd_classifier.joblib')
# # Test
from sklearn.metrics import plot_precision_recall_curve
X_test = bigram_vectorizer.transform(reviews_test["text"].values)
X_test = bigram_tf_idf_transformer.transform(X_test)
y_test = reviews_test["label"].values
score = sgd_classifier.score(X_test, y_test)
# from sklearn.metrics import precision_recall_fscore_support as score
# precision, recall, fscore, support = score(y_test, predicted)
# print('precision: {}'.format(precision))
# print('recall: {}'.format(recall))
# print('fscore: {}'.format(fscore))
# print('support: {}'.format(support))
print(score)
disp = plot_precision_recall_curve(sgd_classifier, X_test, y_test)
# # Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from scipy.sparse import csr_matrix
import numpy as np
def train_and_show_scores(X: csr_matrix, y: np.array, title: str) -> None:
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.75, stratify=y
)
clf = RandomForestClassifier(class_weight="balanced", n_estimators=100)
clf.fit(X_train, y_train)
print(X_train.shape)
train_score = clf.score(X_train, y_train)
valid_score = clf.score(X_valid, y_valid)
print(
f"{title}\nTrain score: {round(train_score, 2)} ; Validation score: {round(valid_score, 2)}\n"
)
y_train = reviews["label"].values
train_and_show_scores(X_train_bigram, y_train, "Bigram Counts")
train_and_show_scores(X_train_bigram_tf_idf, y_train, "Bigram Tf-Idf")
| false | 0 | 4,020 | 0 | 4,277 | 4,020 |
||
69090163
|
<jupyter_start><jupyter_text>Indian Stock Market EOD Data(1990 onwards)
### Context
In-order to validate various trading strategies and to come up with new trading strategies, historical data is a must.
Once historical data is available, there are various backtesting tools such as "[Backtesting.py](https://pypi.org/project/Backtesting/)" and technical anlaysis tools such as "[ta](https://technical-analysis-library-in-python.readthedocs.io/en/latest/index.html)" which helps users to easily validate their strategies and generate extensive reports.
Please upvote for better reach and wider collaboration, if you find the dataset useful
### Content
This dataset contains detailed data for all indices(Eg: NIFTY, BANKNIFTY etc) including volatility index VIX and stocks listed on NSE since 1990
For indices, it has end of day (EOD) Open, High, Low, Close, Volume (OHLCV) and Turnover data
For stocks, it has EOD [OHLC](https://en.wikipedia.org/wiki/Open-high-low-close_chart#:~:text=An%20open%2Dhigh%2Dlow%2D,one%20day%20or%20one%20hour.), previous close, last price and [Vwap](https://www.investopedia.com/terms/v/vwap.asp#:~:text=The%20volume%20weighted%20average%20price%20(VWAP)%20is%20a%20trading%20benchmark,TradingView.) (Volume weighted average price) data
The data is structured into the below folder hierarchy
1. data\_YearStart\_YearEnd (Depicts the time period of the data)
1. index\_data (contains csv files for all indices including VIX)
--Contains OHLCV data for the indices in files named as "indexCode.csv"
--It also contains PE,PB and dividend data for the indices in a files named as "indexCode\_pe\_pb.csv"
1. stock\_data (contains csv files for all stocks listed on NSE)
--Contains OHLCV and VWap data for the stocks in files named as "stockCode.csv"
Spaces and "&" in filenames are replaced with "_" and "\_N\_" respectively
For Intraday index data please refer [Indian Stock Market Index Intraday Data(2008-2020)](https://www.kaggle.com/nishanthsalian/indian-stock-index-1minute-data-2008-2020) dataset
Kaggle dataset identifier: indian-stock-index-eod-data1990-onwards
<jupyter_code>import pandas as pd
df = pd.read_csv('indian-stock-index-eod-data1990-onwards/data_1990_2020/index_data/NIFTY_50.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 7387 entries, 0 to 7386
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 7387 non-null object
1 Open 7387 non-null float64
2 High 7387 non-null float64
3 Low 7387 non-null float64
4 Close 7387 non-null float64
5 Volume 5974 non-null float64
6 Turnover 5974 non-null float64
dtypes: float64(6), object(1)
memory usage: 404.1+ KB
<jupyter_text>Examples:
{
"Date": "1990-07-03 00:00:00",
"Open": 0,
"High": 0,
"Low": 0,
"Close": 279.02,
"Volume": NaN,
"Turnover": NaN
}
{
"Date": "1990-07-05 00:00:00",
"Open": 0,
"High": 0,
"Low": 0,
"Close": 284.04,
"Volume": NaN,
"Turnover": NaN
}
{
"Date": "1990-07-06 00:00:00",
"Open": 0,
"High": 0,
"Low": 0,
"Close": 289.04,
"Volume": NaN,
"Turnover": NaN
}
{
"Date": "1990-07-09 00:00:00",
"Open": 0,
"High": 0,
"Low": 0,
"Close": 289.69,
"Volume": NaN,
"Turnover": NaN
}
<jupyter_script>import pandas as pd
nifty_data = pd.read_csv(
"/kaggle/input/indian-stock-index-eod-data1990-onwards/data_1990_2020/index_data/NIFTY_50.csv"
)
nifty_data.tail()
nifty_data["Date"] = pd.to_datetime(nifty_data["Date"])
nifty_data = nifty_data[nifty_data.Date > "1990-12-01"]
nifty_data.tail()
nifty_data.loc[nifty_data["High"] == 0, ["Open", "High", "Low"]] = nifty_data[
nifty_data["High"] == 0
].Close
nifty_data
def get_diffTimeFrame(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (
(result["High"] - result["Low"]) * 100 * 2 / (result["Low"] + result["High"])
)
return result
def get_diffTimeFramePos(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (result["High"] - result["Open"]) * 100 / result["Open"]
return result
def get_diffTimeFrameNeg(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (result["Low"] - result["Open"]) * 100 / result["Open"]
return result
nifty_weekly = get_diffTimeFrame(nifty_data, "W")
nifty_weekly.tail(12)
nifty_weekly_pos = get_diffTimeFramePos(nifty_data, "W")
nifty_weekly_neg = get_diffTimeFrameNeg(nifty_data, "W")
# nifty weekly signed range distribution
nifty_weekly_signed = nifty_weekly_pos["Range%"]
nifty_weekly_signed = nifty_weekly_signed.append(
nifty_weekly_neg["Range%"], ignore_index=True
)
nifty_weekly_signed.hist(figsize=[12, 8], bins=95)
# nifty weekly range distribution
nifty_weekly["Range%"].hist(figsize=[12, 8], bins=95)
nifty_monthly = get_diffTimeFrame(nifty_data, "M")
nifty_monthly.tail(12)
nifty_monthly_pos = get_diffTimeFramePos(nifty_data, "M")
nifty_monthly_neg = get_diffTimeFrameNeg(nifty_data, "M")
# nifty weekly signed range distribution
nifty_monthly_signed = nifty_monthly_pos["Range%"]
nifty_monthly_signed = nifty_monthly_signed.append(
nifty_monthly_neg["Range%"], ignore_index=True
)
nifty_monthly_signed.hist(figsize=[12, 8], bins=95)
# nifty montly range distribution
nifty_monthly["Range%"].hist(figsize=[12, 8], bins=55)
nifty_yearly = get_diffTimeFrame(nifty_data, "12M")
nifty_yearly
nifty_yearly_pos = get_diffTimeFramePos(nifty_data, "12M")
nifty_yearly_neg = get_diffTimeFrameNeg(nifty_data, "12M")
# nifty weekly signed range distribution
nifty_yearly_signed = nifty_yearly_pos["Range%"]
nifty_yearly_signed = nifty_yearly_signed.append(
nifty_yearly_neg["Range%"], ignore_index=True
)
nifty_yearly_signed.hist(figsize=[12, 8], bins=30)
# Nifty yearly range distribution
nifty_yearly["Range%"].hist(figsize=[12, 8], bins=15)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/090/69090163.ipynb
|
indian-stock-index-eod-data1990-onwards
|
nishanthsalian
|
[{"Id": 69090163, "ScriptId": 18821268, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 695758, "CreationDate": "07/26/2021 18:12:57", "VersionNumber": 7.0, "Title": "Nifty price range Distribution", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 68.0, "LinesInsertedFromPrevious": 33.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 35.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91862186, "KernelVersionId": 69090163, "SourceDatasetVersionId": 1844796}]
|
[{"Id": 1844796, "DatasetId": 1095729, "DatasourceVersionId": 1882513, "CreatorUserId": 695758, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "01/14/2021 11:40:22", "VersionNumber": 6.0, "Title": "Indian Stock Market EOD Data(1990 onwards)", "Slug": "indian-stock-index-eod-data1990-onwards", "Subtitle": "OHLCV and more EOD data for all Indian stocks, VIX, indices on NSE, since 1990", "Description": "### Context \n\nIn-order to validate various trading strategies and to come up with new trading strategies, historical data is a must.\nOnce historical data is available, there are various backtesting tools such as \"[Backtesting.py](https://pypi.org/project/Backtesting/)\" and technical anlaysis tools such as \"[ta](https://technical-analysis-library-in-python.readthedocs.io/en/latest/index.html)\" which helps users to easily validate their strategies and generate extensive reports.\n\nPlease upvote for better reach and wider collaboration, if you find the dataset useful\n\n### Content\nThis dataset contains detailed data for all indices(Eg: NIFTY, BANKNIFTY etc) including volatility index VIX and stocks listed on NSE since 1990\nFor indices, it has end of day (EOD) Open, High, Low, Close, Volume (OHLCV) and Turnover data\nFor stocks, it has EOD [OHLC](https://en.wikipedia.org/wiki/Open-high-low-close_chart#:~:text=An%20open%2Dhigh%2Dlow%2D,one%20day%20or%20one%20hour.), previous close, last price and [Vwap](https://www.investopedia.com/terms/v/vwap.asp#:~:text=The%20volume%20weighted%20average%20price%20(VWAP)%20is%20a%20trading%20benchmark,TradingView.) (Volume weighted average price) data\n\nThe data is structured into the below folder hierarchy\n1. data\\_YearStart\\_YearEnd (Depicts the time period of the data)\n 1. index\\_data (contains csv files for all indices including VIX)\n --Contains OHLCV data for the indices in files named as \"indexCode.csv\"\n --It also contains PE,PB and dividend data for the indices in a files named as \"indexCode\\_pe\\_pb.csv\"\n 1. stock\\_data (contains csv files for all stocks listed on NSE)\n --Contains OHLCV and VWap data for the stocks in files named as \"stockCode.csv\"\n\nSpaces and \"&\" in filenames are replaced with \"_\" and \"\\_N\\_\" respectively\n\nFor Intraday index data please refer [Indian Stock Market Index Intraday Data(2008-2020)](https://www.kaggle.com/nishanthsalian/indian-stock-index-1minute-data-2008-2020) dataset\n\n### Acknowledgements\n\nThis data is sourced from [nse](http://www.nseindia.com/) using [nsepy](https://nsepy.xyz/) and [nsetools](https://pypi.org/project/nsetools) \nThe data is unprocessed and retained as obtained from the source.\n\nPhoto Credits: [Jason Leung](https://unsplash.com/@ninjason) on [Unsplash](https://unsplash.com/photos/SAYzxuS1O3M)", "VersionNotes": "adding 2021 data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1095729, "CreatorUserId": 695758, "OwnerUserId": 695758.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1844796.0, "CurrentDatasourceVersionId": 1882513.0, "ForumId": 1112899, "Type": 2, "CreationDate": "01/13/2021 17:14:59", "LastActivityDate": "01/13/2021", "TotalViews": 8473, "TotalDownloads": 916, "TotalVotes": 35, "TotalKernels": 5}]
|
[{"Id": 695758, "UserName": "nishanthsalian", "DisplayName": "Nishanth", "RegisterDate": "08/25/2016", "PerformanceTier": 2}]
|
import pandas as pd
nifty_data = pd.read_csv(
"/kaggle/input/indian-stock-index-eod-data1990-onwards/data_1990_2020/index_data/NIFTY_50.csv"
)
nifty_data.tail()
nifty_data["Date"] = pd.to_datetime(nifty_data["Date"])
nifty_data = nifty_data[nifty_data.Date > "1990-12-01"]
nifty_data.tail()
nifty_data.loc[nifty_data["High"] == 0, ["Open", "High", "Low"]] = nifty_data[
nifty_data["High"] == 0
].Close
nifty_data
def get_diffTimeFrame(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (
(result["High"] - result["Low"]) * 100 * 2 / (result["Low"] + result["High"])
)
return result
def get_diffTimeFramePos(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (result["High"] - result["Open"]) * 100 / result["Open"]
return result
def get_diffTimeFrameNeg(data, timeframe):
ohlc_dict = {"Open": "first", "High": "max", "Low": "min", "Close": "last"}
result = data.resample(timeframe, on="Date").apply(ohlc_dict)
result["Range%"] = (result["Low"] - result["Open"]) * 100 / result["Open"]
return result
nifty_weekly = get_diffTimeFrame(nifty_data, "W")
nifty_weekly.tail(12)
nifty_weekly_pos = get_diffTimeFramePos(nifty_data, "W")
nifty_weekly_neg = get_diffTimeFrameNeg(nifty_data, "W")
# nifty weekly signed range distribution
nifty_weekly_signed = nifty_weekly_pos["Range%"]
nifty_weekly_signed = nifty_weekly_signed.append(
nifty_weekly_neg["Range%"], ignore_index=True
)
nifty_weekly_signed.hist(figsize=[12, 8], bins=95)
# nifty weekly range distribution
nifty_weekly["Range%"].hist(figsize=[12, 8], bins=95)
nifty_monthly = get_diffTimeFrame(nifty_data, "M")
nifty_monthly.tail(12)
nifty_monthly_pos = get_diffTimeFramePos(nifty_data, "M")
nifty_monthly_neg = get_diffTimeFrameNeg(nifty_data, "M")
# nifty weekly signed range distribution
nifty_monthly_signed = nifty_monthly_pos["Range%"]
nifty_monthly_signed = nifty_monthly_signed.append(
nifty_monthly_neg["Range%"], ignore_index=True
)
nifty_monthly_signed.hist(figsize=[12, 8], bins=95)
# nifty montly range distribution
nifty_monthly["Range%"].hist(figsize=[12, 8], bins=55)
nifty_yearly = get_diffTimeFrame(nifty_data, "12M")
nifty_yearly
nifty_yearly_pos = get_diffTimeFramePos(nifty_data, "12M")
nifty_yearly_neg = get_diffTimeFrameNeg(nifty_data, "12M")
# nifty weekly signed range distribution
nifty_yearly_signed = nifty_yearly_pos["Range%"]
nifty_yearly_signed = nifty_yearly_signed.append(
nifty_yearly_neg["Range%"], ignore_index=True
)
nifty_yearly_signed.hist(figsize=[12, 8], bins=30)
# Nifty yearly range distribution
nifty_yearly["Range%"].hist(figsize=[12, 8], bins=15)
|
[{"indian-stock-index-eod-data1990-onwards/data_1990_2020/index_data/NIFTY_50.csv": {"column_names": "[\"Date\", \"Open\", \"High\", \"Low\", \"Close\", \"Volume\", \"Turnover\"]", "column_data_types": "{\"Date\": \"object\", \"Open\": \"float64\", \"High\": \"float64\", \"Low\": \"float64\", \"Close\": \"float64\", \"Volume\": \"float64\", \"Turnover\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 7387 entries, 0 to 7386\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Date 7387 non-null object \n 1 Open 7387 non-null float64\n 2 High 7387 non-null float64\n 3 Low 7387 non-null float64\n 4 Close 7387 non-null float64\n 5 Volume 5974 non-null float64\n 6 Turnover 5974 non-null float64\ndtypes: float64(6), object(1)\nmemory usage: 404.1+ KB\n", "summary": "{\"Open\": {\"count\": 7387.0, \"mean\": 3977.6719683227293, \"std\": 3644.7258139608725, \"min\": 0.0, \"25%\": 1034.15, \"50%\": 2764.6, \"75%\": 6061.1, \"max\": 13980.9}, \"High\": {\"count\": 7387.0, \"mean\": 4004.895894138351, \"std\": 3660.286601412386, \"min\": 0.0, \"25%\": 1042.25, \"50%\": 2806.5, \"75%\": 6084.125, \"max\": 14024.85}, \"Low\": {\"count\": 7387.0, \"mean\": 3945.570018952213, \"std\": 3621.2563600718418, \"min\": 0.0, \"25%\": 1024.925, \"50%\": 2708.45, \"75%\": 6016.675, \"max\": 13936.45}, \"Close\": {\"count\": 7387.0, \"mean\": 4101.46544199269, \"std\": 3515.5412069767563, \"min\": 279.02, \"25%\": 1076.025, \"50%\": 2770.5, \"75%\": 6051.799999999999, \"max\": 13981.95}, \"Volume\": {\"count\": 5974.0, \"mean\": 163639661.2815534, \"std\": 161207460.1923021, \"min\": 1394931.0, \"25%\": 58875190.75, \"50%\": 125612476.0, \"75%\": 194308129.75, \"max\": 1811564187.0}, \"Turnover\": {\"count\": 5974.0, \"mean\": 68668851205.22263, \"std\": 71252574712.59055, \"min\": 401200000.0, \"25%\": 20746200000.0, \"50%\": 52069250000.0, \"75%\": 83440900000.0, \"max\": 785229299999.9999}}", "examples": "{\"Date\":{\"0\":\"1990-07-03\",\"1\":\"1990-07-05\",\"2\":\"1990-07-06\",\"3\":\"1990-07-09\"},\"Open\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"High\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"Low\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"Close\":{\"0\":279.02,\"1\":284.04,\"2\":289.04,\"3\":289.69},\"Volume\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"Turnover\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null}}"}}]
| true | 1 |
<start_data_description><data_path>indian-stock-index-eod-data1990-onwards/data_1990_2020/index_data/NIFTY_50.csv:
<column_names>
['Date', 'Open', 'High', 'Low', 'Close', 'Volume', 'Turnover']
<column_types>
{'Date': 'object', 'Open': 'float64', 'High': 'float64', 'Low': 'float64', 'Close': 'float64', 'Volume': 'float64', 'Turnover': 'float64'}
<dataframe_Summary>
{'Open': {'count': 7387.0, 'mean': 3977.6719683227293, 'std': 3644.7258139608725, 'min': 0.0, '25%': 1034.15, '50%': 2764.6, '75%': 6061.1, 'max': 13980.9}, 'High': {'count': 7387.0, 'mean': 4004.895894138351, 'std': 3660.286601412386, 'min': 0.0, '25%': 1042.25, '50%': 2806.5, '75%': 6084.125, 'max': 14024.85}, 'Low': {'count': 7387.0, 'mean': 3945.570018952213, 'std': 3621.2563600718418, 'min': 0.0, '25%': 1024.925, '50%': 2708.45, '75%': 6016.675, 'max': 13936.45}, 'Close': {'count': 7387.0, 'mean': 4101.46544199269, 'std': 3515.5412069767563, 'min': 279.02, '25%': 1076.025, '50%': 2770.5, '75%': 6051.799999999999, 'max': 13981.95}, 'Volume': {'count': 5974.0, 'mean': 163639661.2815534, 'std': 161207460.1923021, 'min': 1394931.0, '25%': 58875190.75, '50%': 125612476.0, '75%': 194308129.75, 'max': 1811564187.0}, 'Turnover': {'count': 5974.0, 'mean': 68668851205.22263, 'std': 71252574712.59055, 'min': 401200000.0, '25%': 20746200000.0, '50%': 52069250000.0, '75%': 83440900000.0, 'max': 785229299999.9999}}
<dataframe_info>
RangeIndex: 7387 entries, 0 to 7386
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Date 7387 non-null object
1 Open 7387 non-null float64
2 High 7387 non-null float64
3 Low 7387 non-null float64
4 Close 7387 non-null float64
5 Volume 5974 non-null float64
6 Turnover 5974 non-null float64
dtypes: float64(6), object(1)
memory usage: 404.1+ KB
<some_examples>
{'Date': {'0': '1990-07-03', '1': '1990-07-05', '2': '1990-07-06', '3': '1990-07-09'}, 'Open': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'High': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'Low': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'Close': {'0': 279.02, '1': 284.04, '2': 289.04, '3': 289.69}, 'Volume': {'0': None, '1': None, '2': None, '3': None}, 'Turnover': {'0': None, '1': None, '2': None, '3': None}}
<end_description>
| 1,088 | 0 | 2,293 | 1,088 |
69104433
|
<jupyter_start><jupyter_text>japan-trade-statistics3
Kaggle dataset identifier: japantradestatistics3
<jupyter_script># # 品目別の統計 (パイナップル )
# - [パイナップル 輸出 080430000](https://www.customs.go.jp/yusyutu/2011/data/print_e201101j_08.htm)
# - [パイナップル 輸入 080430 生鮮 010 乾燥 090](https://www.customs.go.jp/tariff/2021_4/data/print_j_08.htm)
# ### 最新のHSコード [輸出](https://www.customs.go.jp/yusyutu/2021_1/index.htm) [輸入](https://www.customs.go.jp/tariff/2021_4/index.htm)
# ### [貿易統計の検索](https://www.customs.go.jp/toukei/srch/index.htm)
#
import sys
import os
import re
# pandas,sqlite
import pandas as pd
import numpy as np
import pandas.io.sql as psql
import sqlite3
from datetime import datetime as dt
import time
# データ取得
import json
import inspect
import requests
import codecs
import plotly
import plotly.graph_objects as go
show_tables = "select tbl_name from sqlite_master where type = 'table'"
desc = "PRAGMA table_info([{table}])"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
# os.listdir('../input/japantradestatistics3')
# kaggle で、plotly が表示されないことがあるので、対応するコード(おまじない的かも)
plotly.offline.init_notebook_mode(connected=True)
# hs code,country,
try:
attach = 'attach "../input/japantradestatistics3/codes4.db" as code'
cursor.execute(attach)
except:
pass
# import hs,country code as pandas
tmpl = "{hs}_{lang}_df = pd.read_sql('select * from code.{hs}_{lang}',conn)"
for hs in ["hs2", "hs4", "hs6", "hs6", "hs9"]:
for lang in ["jpn", "eng"]:
exec(tmpl.format(hs=hs, lang=lang))
# country table: country_eng,country_jpn
country_eng_df = pd.read_sql("select * from code.country_eng", conn)
country_eng_df["Country"] = country_eng_df["Country"].apply(str)
country_jpn_df = pd.read_sql("select * from code.country_jpn", conn)
country_jpn_df["Country"] = country_jpn_df["Country"].apply(str)
# 年ベースのデータベースをつかえるようにする(attach)
try:
attach = 'attach "../input/japantradestatistics3/y_from_1988.db" as y_1988'
cursor.execute(attach)
except:
pass
y_table = "y_from_1988"
try:
attach = 'attach "../input/japantradestatistics3/ym_2019_2021.db" as ym_2019'
cursor.execute(attach)
except:
pass
show_tables = "select tbl_name from ym_2019.sqlite_master where type = 'table'"
pd.read_sql(show_tables, conn)
# 最新
country_color_list = [
["105", "China", "gold"],
["304", "USA", "red"],
["103", "SouthKorea", "brown"],
["106", "Taiwan", "cyan"],
["601", "Australia", "green"],
["111", "Thai", "violet"],
["213", "Germany", "grey"],
["110", "VietNum", "crimson"],
["108", "HongKong", "orangered"],
["112", "Singapore", "lightblue"],
["147", "UAE", "black"],
["137", "SaudiArabia", "lightgreen"],
["118", "Indonesia", "darkorange"],
["113", "Malaysia", "yellow"],
["205", "UK", "darkblue"],
["224", "Russia", "pink"],
["117", "Philipin", "olive"],
["302", "Canada", "blue"],
["210", "France", "indigo"],
["305", "Mexico", "tan"],
["141", "Oman", "moccasin"],
["312", "Panama", "dimgrey"],
["606", "New_Zealand", "palevioletred"],
["203", "Sweden", "slateblue"],
["223", "Poland", "darkviolet"],
["407", "Peru", "honeydew"],
["220", "Italy", "springgreen"],
["215", "Switzerland", "blue"],
["208", "Belgium", "darkolivegreen"],
["225", "Austria", "mediumpurple"],
["140", "Qatar", "lightgray"],
["409", "Chile", "maroon"],
["234", "Turkey", "orchid"],
["206", "Ireland", "navajowhite"],
["207", "Netherlands", "darkslateblue"],
["138", "Kuwait", "wheat"],
["123", "India", "beige"],
["551", "South_Africa", "papayawhip"],
["218", "Spain", "cornsilk"],
["410", "Brazil", "teal"],
["116", "Brunei", "seashell"],
["120", "Cambodia", "floralwhite"],
["122", "Myanmar", "darkmagenta"],
["124", "Pakistan", "firebrick"],
["125", "Sri_Lanka", "navy"],
["127", "Bangladesh", "gainsboro"],
["133", "Iran", "darkcyan"],
["134", "Iraq", "cornflowerblue"],
["135", "Bahrain", "yellowgreen"],
["143", "Israel", "peru"],
["153", "Kazakhstan", "darkturquoise"],
["202", "Norway", "cadetblue"],
["204", "Denmark", "palegreen"],
["222", "Finland", "darkseagreen"],
["227", "Hungary", "paleturquoise"],
["230", "Greece", "gray"],
["231", "Romania", "ivory"],
["238", "Ukraine", "lightpink"],
["245", "Czech_Republic", "whitesmoke"],
["311", "Cost_rica", "azure"],
["315", "Bahamas", "lightseagreen"],
["324", "Puerto_Rico_(USA)", "lightskyblue"],
["401", "Colombia", "mediumslateblue"],
["406", "Ecuador", "chartreuse"],
["413", "Argentina", "lightslategrey"],
["506", "Egypt", "mediumaquamarine"],
["515", "Liberia", "slategray"],
["541", "Kenya", "mediumblue"],
["602", "Papua_New_Guinea", "magenta"],
["625", "Marshall", "darkgray"],
]
colors = {x[0]: [x[1], x[2]] for x in country_color_list}
# 国に付与したカラーを表示
"""
fig = go.Figure()
fig.update_layout(width=1080,height=1080,paper_bgcolor='gold')
for i in range(len(country_color_list)):
xs = divmod(i,5)[1]
ys = divmod(i,5)[0]
fig.add_annotation(x=-0.4 + 1.2*xs, y=3.8-0.2*ys,
showarrow=False,
bgcolor=country_color_list[i][2],
text=country_color_list[i][2],
font={'family':'MS Pゴシック','size':18, 'color':"#ffffff"},
width=150,
height=30
)
fig.show()
# 残り
nokori_colors = list(set(color_all) - set([x[2] for x in country_color_list]))
nokori_colors
"""
""
# hs code を指定して、抽出 Year のデータは、hs9 のみ
y_table = "y_from_1988"
hs = "hs9"
# hscode = '081190210' 冷凍、調理ずみ
hscode = "080430010" # 生鮮パイナップル
sql = """
select Year,exp_imp,Country,Value,Q1,Q2 from {y_table}
where {hs} = '{hscode}'
order by Year
"""[
1:-1
]
# 年、輸出入、国別の情報
y_df = pd.read_sql(sql.format(hs=hs, hscode=hscode, y_table=y_table), conn)
y_df["Q1"] = y_df["Q1"].astype(float)
y_df["Q2"] = y_df["Q2"].astype(float)
y_df["Value"] = y_df["Value"].astype(float)
# 年月
ym_table = "ym_2019_2021"
sql = """
select ym,exp_imp,Country,Value,Q1,Q2 from {ym_table}
where {hs} = '{hscode}'
order by ym
"""[
1:-1
]
# 年月、輸出入、国別の情報
ym_df = pd.read_sql(sql.format(hs=hs, hscode=hscode, ym_table=ym_table), conn)
ym_df["ym"] = ym_df["ym"].astype(str)
ym_df["ym"] = pd.to_datetime(ym_df["ym"], format="%Y%m") # 年月に変換 1日として扱われる
ym_df["Value"] = ym_df["Value"].astype(float)
ym_df["Q1"] = ym_df["Q1"].astype(float)
ym_df["Q2"] = ym_df["Q2"].astype(float)
print(y_df.head())
print(ym_df.head())
# 全体の集計を行う
# 年ベース 参考 GD Freak https://jp.gdfreak.com/public/detail/jp010090001210161q26/1
# 年、輸出入で、集計
y_xdf = (
y_df[["Year", "exp_imp", "Value", "Q1", "Q2"]]
.groupby(["Year", "exp_imp"], as_index=False)
.sum(["Value", "Q1", "Q2"])
)
text = """
go.Scatter(x = y_xdf.query("exp_imp=={exp_imp}")["Year"]
,y = y_xdf.query("exp_imp=={exp_imp}")["Q2"]/10000000,name="{name}",line=dict(width=5))
"""[
1:-1
]
fig = go.Figure()
# fig.add_trace(eval(text.format(exp_imp=1,name='輸出'))) # 輸出はHSコード上存在しないので、ここでは表示しない
fig.add_trace(eval(text.format(exp_imp=2, name="輸入")))
fig.update_layout(
width=128,
title={"text": "輸入数量"},
xaxis=dict(
title=dict(text="年", font=dict(size=12)), tickfont=dict(size=12), type="date"
),
xaxis_title="年",
yaxis_title="万トン",
)
fig.show()
# パイナップル 1個は1kg として、日本人は、年間パイナップルを、1.5個ぐらい食べています。
# 年月、輸入で、集計
ym_xdf = (
ym_df[["ym", "exp_imp", "Value", "Q1", "Q2"]]
.groupby(["ym", "exp_imp"], as_index=False)
.sum(["Value", "Q1", "Q2"])
)
text = """
go.Scatter(x = ym_xdf.query("exp_imp=={exp_imp}")["ym"]
,y = ym_xdf.query("exp_imp=={exp_imp}")["Q2"]/10000000,name="{name}",line=dict(width=5))
"""[
1:-1
]
fig = go.Figure()
fig.add_trace(eval(text.format(exp_imp=2, name="輸入")))
fig.update_layout(
width=128,
title={"text": "年月 輸入数量"},
xaxis_title="年月",
yaxis_title="数量、万トン",
)
fig.show()
# 2019 年の輸入内訳 pandas での処理
head_num = 10
exp_imp = 2
year = 2019
text = """
xdf = y_df.query("Year=={year} & exp_imp=={exp_imp}")
xdf = xdf[['Country','Q2']].groupby('Country',as_index=False).sum('Value')
xdf = xdf.sort_values('Q2',ascending=False).head({head_num})
xdf = pd.merge(xdf,country_eng_df,on='Country')
xdf = xdf.sort_values('Q2')
xdf['name'] = xdf['Country_name'] + ': ' + xdf['Country'] + ' '
country_list =list(xdf['Country'])
"""
exec(text.format(year=year, exp_imp=exp_imp, head_num=10))
fig = go.Figure(go.Bar(x=xdf["Q2"] / 10000000, y=xdf["name"], orientation="h"))
fig.show()
# 最新年月での
head_num = 10
exp_imp = 2
ym = "2021-05"
text = """
xdf = ym_df.query("ym=='{ym}' & exp_imp=={exp_imp}")
xdf = xdf[['Country','Q2']].groupby('Country',as_index=False).sum()
xdf = xdf.sort_values('Q2',ascending=False).head({head_num})
xdf['Country'] = xdf['Country'].astype(str)
xdf = pd.merge(xdf,country_eng_df,on='Country')
xdf = xdf.sort_values('Q2')
xdf['name'] = xdf['Country_name'] + ': ' + xdf['Country'] + ' '
country_list =list(xdf['Country'])
"""
for ym in ["2021-04", "2020-04"]:
exec(text.format(ym=ym, exp_imp=exp_imp, head_num=10))
fig = go.Figure(go.Bar(x=xdf["Q2"] / 10000000, y=xdf["name"], orientation="h"))
fig.update_layout(title=ym)
fig.show()
# # フィリピンを抜いたの台湾の遷移
country_list = ["117", "106", "118", "311"]
exp_imp = 2
text = """
go.Scatter(x=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Year'],
y=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Q2'],
name='{name}',line=dict(color='{color}',width=3))
"""[
1:-1
]
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0] + " " + country,
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
country_list = ["117", "106", "118", "311"]
exp_imp = 2
text = """
go.Scatter(x=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Year'],
y=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Q2'],
name='{name}',line=dict(color='{color}'),stackgroup='one')
"""[
1:-1
]
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0] + " " + country,
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
xdf = ym_df.query("exp_imp==2 & ym >'2020-01-01'")
text = """
go.Scatter(x=x,y=y,name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
country_list = ["106", "117"]
exp_imp = 2
fig = go.Figure()
for country in country_list:
x = xdf[xdf["Country"] == int(country)]["ym"]
y = xdf[xdf["Country"] == int(country)]["Q2"] / 10000000
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
text = """
go.Scatter(x=ym_df.query("Country=='{country}' & exp_imp=={exp_imp} & ym >'2020-01-01'")['ym'],
y=ym_df.query("Country=='{country}' & exp_imp=={exp_imp} & ym >'2020-01-01'")['Q2']/10000000,
name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
country_list = ["106"]
exp_imp = 2
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
"""
ym:
1: 単一年月
3: 年内の複数月
4: 年内累計
5: 年度内累計
品目の指定:
1: 参照指定
2: 品目コード指定
3: 範囲指定
国の指定:
1: 全対象指定
2: 地理圏指定
3: 経済圏指定
4: 州指定
5: 国参照指定
6: 国コード指定
ページ指定(M)
01: 品別国別表
03: 国別品別表
01:
01:
01:
01:
"""
ym_method = "1"
exp_imp = "2"
year = "2021"
month = "04"
text = """
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,{exp_imp},,,,,,,,{ym_method},0,{year},0,{month},{month1},2,{hscode},,,,,,,,,,6,{Country},,,,,,,,,,,,,,,,,,,,,page_num
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,1,0,2021,0,4,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,4,1,2021,0,0,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20
"""
# [taiwan 2104 ](https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,1,0,2021,0,4,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20)
# 数量
# 2021.4 106 5807759
# 2021.4 117 14365226
# 2020.4 106 783732
# 2020.4 117 12758782
xdf = ym_df.query("exp_imp==2 & ym >'2020-01-01'")
text = """
go.Scatter(x=x,y=y,name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
fig = go.Figure() # グラフの初期化
fig.update_layout(
width=1080,
height=1080,
paper_bgcolor="rgba(248,148,172,1)",
yaxis_title="数量 万トン",
margin_t=300,
legend=dict(x=0, font_size=18, bgcolor="#eee"),
xaxis=dict(dtick="M1", tickformat="%m\n%Y", ticklabelmode="period"),
)
fig.add_layout_image(
dict(
source="https://zanjibar.github.io/flag_of_japan.jpg?",
xref="paper",
yref="paper",
x=0.03,
y=1.3,
sizex=0.1,
sizey=0.1,
xanchor="right",
yanchor="bottom",
)
)
"""
"""
fig.update_layout(
title={"text": "", "font_size": 24, "x": 0.1, "xanchor": "left", "yanchor": "top"}
)
country_list = ["106", "117"]
for country in country_list:
x = xdf[xdf["Country"] == int(country)]["ym"]
y = xdf[xdf["Country"] == int(country)]["Q2"] / 10000000
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
# 'text': "台湾からの生鮮パイナップルは、2021年4月には約580万個輸入です。<br>おおよそ20人に1人の日本人が台湾のパイナップルを食べた計算です。<br>昨年は78万個なので約140人に1人",
fig.add_annotation(
x=0.08,
y=1.38,
text=" 日本人の3% が台湾を応援 ",
xref="paper",
bgcolor="white",
yref="paper",
font_size=32,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.05,
y=1.25,
text="貿易統計を読み解く - 台湾からの生鮮パイナップルの輸入が急増 <br>2020年1月から2021年5月までの生鮮パイナップル(080430010)<br>フィリピン(1位)と台湾の輸入量(キログラム)の比較",
xref="paper",
yref="paper",
font_size=24,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.05,
y=1.1,
text="個数換算で、約400万個分の輸入が増えています。単純計算ですが、日本人の3%が購入<br>したというのはそれほどはずれていないでしょう。",
xref="paper",
yref="paper",
font_size=20,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.1,
y=0.4,
text="1 台湾からのパイナップル輸入は、前年同月比(4月) 7.4倍 <br>2 1位のフィリピンの3分1まで迫る <br>3 フィリピンからのパイナップル輸入も増えている",
xref="paper",
yref="paper",
align="left",
font_size=24,
showarrow=False,
)
fig.add_annotation(
x=0.17,
y=0.82,
text="2020年4月 フィリピンからの輸入は、<br>1.2万トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.88,
y=0.92,
text="2021年4月 フィリピンからの輸入は、<br>1.4万トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.17,
y=0.11,
text="2020年4月 台湾からの輸入は、783トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.88,
y=0.43,
text="2021年4月 台湾からの輸入は、5808トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.show()
fig.write_image("sg.jpeg")
# # おまけ(色コードの一覧、国に付与している)
color_all = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"black",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgrey",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkslategrey",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"grey",
"green",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightgoldenrodyellow",
"lightgray",
"lightgrey",
"lightgreen",
"lightpink",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lightslategray",
"lightslategrey",
"lightsteelblue",
"lightyellow",
"lime",
"limegreen",
"linen",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"mediumpurple",
"mediumseagreen",
"mediumslateblue",
"mediumspringgreen",
"mediumturquoise",
"mediumvioletred",
"midnightblue",
"mintcream",
"mistyrose",
"moccasin",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peru",
"pink",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"silver",
"skyblue",
"slateblue",
"slategray",
"slategrey",
"snow",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104433.ipynb
|
japantradestatistics3
|
zanjibar
|
[{"Id": 69104433, "ScriptId": 18494711, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 647691, "CreationDate": "07/26/2021 23:43:19", "VersionNumber": 6.0, "Title": "hscode-lesson-pineapple", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 682.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 674.0, "LinesInsertedFromFork": 314.0, "LinesDeletedFromFork": 72.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 368.0, "TotalVotes": 0}]
|
[{"Id": 91890680, "KernelVersionId": 69104433, "SourceDatasetVersionId": 2378358}]
|
[{"Id": 2378358, "DatasetId": 700516, "DatasourceVersionId": 2420240, "CreatorUserId": 647691, "LicenseName": "CC BY-SA 4.0", "CreationDate": "06/29/2021 01:15:28", "VersionNumber": 18.0, "Title": "japan-trade-statistics3", "Slug": "japantradestatistics3", "Subtitle": "Japan trade statistics Latest data", "Description": NaN, "VersionNotes": "add 2021.5 data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 700516, "CreatorUserId": 647691, "OwnerUserId": 647691.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6250236.0, "CurrentDatasourceVersionId": 6329998.0, "ForumId": 715190, "Type": 2, "CreationDate": "06/08/2020 07:03:18", "LastActivityDate": "06/08/2020", "TotalViews": 3882, "TotalDownloads": 98, "TotalVotes": 2, "TotalKernels": 20}]
|
[{"Id": 647691, "UserName": "zanjibar", "DisplayName": "TadashiNagao", "RegisterDate": "06/23/2016", "PerformanceTier": 1}]
|
# # 品目別の統計 (パイナップル )
# - [パイナップル 輸出 080430000](https://www.customs.go.jp/yusyutu/2011/data/print_e201101j_08.htm)
# - [パイナップル 輸入 080430 生鮮 010 乾燥 090](https://www.customs.go.jp/tariff/2021_4/data/print_j_08.htm)
# ### 最新のHSコード [輸出](https://www.customs.go.jp/yusyutu/2021_1/index.htm) [輸入](https://www.customs.go.jp/tariff/2021_4/index.htm)
# ### [貿易統計の検索](https://www.customs.go.jp/toukei/srch/index.htm)
#
import sys
import os
import re
# pandas,sqlite
import pandas as pd
import numpy as np
import pandas.io.sql as psql
import sqlite3
from datetime import datetime as dt
import time
# データ取得
import json
import inspect
import requests
import codecs
import plotly
import plotly.graph_objects as go
show_tables = "select tbl_name from sqlite_master where type = 'table'"
desc = "PRAGMA table_info([{table}])"
conn = sqlite3.connect(":memory:")
cursor = conn.cursor()
# os.listdir('../input/japantradestatistics3')
# kaggle で、plotly が表示されないことがあるので、対応するコード(おまじない的かも)
plotly.offline.init_notebook_mode(connected=True)
# hs code,country,
try:
attach = 'attach "../input/japantradestatistics3/codes4.db" as code'
cursor.execute(attach)
except:
pass
# import hs,country code as pandas
tmpl = "{hs}_{lang}_df = pd.read_sql('select * from code.{hs}_{lang}',conn)"
for hs in ["hs2", "hs4", "hs6", "hs6", "hs9"]:
for lang in ["jpn", "eng"]:
exec(tmpl.format(hs=hs, lang=lang))
# country table: country_eng,country_jpn
country_eng_df = pd.read_sql("select * from code.country_eng", conn)
country_eng_df["Country"] = country_eng_df["Country"].apply(str)
country_jpn_df = pd.read_sql("select * from code.country_jpn", conn)
country_jpn_df["Country"] = country_jpn_df["Country"].apply(str)
# 年ベースのデータベースをつかえるようにする(attach)
try:
attach = 'attach "../input/japantradestatistics3/y_from_1988.db" as y_1988'
cursor.execute(attach)
except:
pass
y_table = "y_from_1988"
try:
attach = 'attach "../input/japantradestatistics3/ym_2019_2021.db" as ym_2019'
cursor.execute(attach)
except:
pass
show_tables = "select tbl_name from ym_2019.sqlite_master where type = 'table'"
pd.read_sql(show_tables, conn)
# 最新
country_color_list = [
["105", "China", "gold"],
["304", "USA", "red"],
["103", "SouthKorea", "brown"],
["106", "Taiwan", "cyan"],
["601", "Australia", "green"],
["111", "Thai", "violet"],
["213", "Germany", "grey"],
["110", "VietNum", "crimson"],
["108", "HongKong", "orangered"],
["112", "Singapore", "lightblue"],
["147", "UAE", "black"],
["137", "SaudiArabia", "lightgreen"],
["118", "Indonesia", "darkorange"],
["113", "Malaysia", "yellow"],
["205", "UK", "darkblue"],
["224", "Russia", "pink"],
["117", "Philipin", "olive"],
["302", "Canada", "blue"],
["210", "France", "indigo"],
["305", "Mexico", "tan"],
["141", "Oman", "moccasin"],
["312", "Panama", "dimgrey"],
["606", "New_Zealand", "palevioletred"],
["203", "Sweden", "slateblue"],
["223", "Poland", "darkviolet"],
["407", "Peru", "honeydew"],
["220", "Italy", "springgreen"],
["215", "Switzerland", "blue"],
["208", "Belgium", "darkolivegreen"],
["225", "Austria", "mediumpurple"],
["140", "Qatar", "lightgray"],
["409", "Chile", "maroon"],
["234", "Turkey", "orchid"],
["206", "Ireland", "navajowhite"],
["207", "Netherlands", "darkslateblue"],
["138", "Kuwait", "wheat"],
["123", "India", "beige"],
["551", "South_Africa", "papayawhip"],
["218", "Spain", "cornsilk"],
["410", "Brazil", "teal"],
["116", "Brunei", "seashell"],
["120", "Cambodia", "floralwhite"],
["122", "Myanmar", "darkmagenta"],
["124", "Pakistan", "firebrick"],
["125", "Sri_Lanka", "navy"],
["127", "Bangladesh", "gainsboro"],
["133", "Iran", "darkcyan"],
["134", "Iraq", "cornflowerblue"],
["135", "Bahrain", "yellowgreen"],
["143", "Israel", "peru"],
["153", "Kazakhstan", "darkturquoise"],
["202", "Norway", "cadetblue"],
["204", "Denmark", "palegreen"],
["222", "Finland", "darkseagreen"],
["227", "Hungary", "paleturquoise"],
["230", "Greece", "gray"],
["231", "Romania", "ivory"],
["238", "Ukraine", "lightpink"],
["245", "Czech_Republic", "whitesmoke"],
["311", "Cost_rica", "azure"],
["315", "Bahamas", "lightseagreen"],
["324", "Puerto_Rico_(USA)", "lightskyblue"],
["401", "Colombia", "mediumslateblue"],
["406", "Ecuador", "chartreuse"],
["413", "Argentina", "lightslategrey"],
["506", "Egypt", "mediumaquamarine"],
["515", "Liberia", "slategray"],
["541", "Kenya", "mediumblue"],
["602", "Papua_New_Guinea", "magenta"],
["625", "Marshall", "darkgray"],
]
colors = {x[0]: [x[1], x[2]] for x in country_color_list}
# 国に付与したカラーを表示
"""
fig = go.Figure()
fig.update_layout(width=1080,height=1080,paper_bgcolor='gold')
for i in range(len(country_color_list)):
xs = divmod(i,5)[1]
ys = divmod(i,5)[0]
fig.add_annotation(x=-0.4 + 1.2*xs, y=3.8-0.2*ys,
showarrow=False,
bgcolor=country_color_list[i][2],
text=country_color_list[i][2],
font={'family':'MS Pゴシック','size':18, 'color':"#ffffff"},
width=150,
height=30
)
fig.show()
# 残り
nokori_colors = list(set(color_all) - set([x[2] for x in country_color_list]))
nokori_colors
"""
""
# hs code を指定して、抽出 Year のデータは、hs9 のみ
y_table = "y_from_1988"
hs = "hs9"
# hscode = '081190210' 冷凍、調理ずみ
hscode = "080430010" # 生鮮パイナップル
sql = """
select Year,exp_imp,Country,Value,Q1,Q2 from {y_table}
where {hs} = '{hscode}'
order by Year
"""[
1:-1
]
# 年、輸出入、国別の情報
y_df = pd.read_sql(sql.format(hs=hs, hscode=hscode, y_table=y_table), conn)
y_df["Q1"] = y_df["Q1"].astype(float)
y_df["Q2"] = y_df["Q2"].astype(float)
y_df["Value"] = y_df["Value"].astype(float)
# 年月
ym_table = "ym_2019_2021"
sql = """
select ym,exp_imp,Country,Value,Q1,Q2 from {ym_table}
where {hs} = '{hscode}'
order by ym
"""[
1:-1
]
# 年月、輸出入、国別の情報
ym_df = pd.read_sql(sql.format(hs=hs, hscode=hscode, ym_table=ym_table), conn)
ym_df["ym"] = ym_df["ym"].astype(str)
ym_df["ym"] = pd.to_datetime(ym_df["ym"], format="%Y%m") # 年月に変換 1日として扱われる
ym_df["Value"] = ym_df["Value"].astype(float)
ym_df["Q1"] = ym_df["Q1"].astype(float)
ym_df["Q2"] = ym_df["Q2"].astype(float)
print(y_df.head())
print(ym_df.head())
# 全体の集計を行う
# 年ベース 参考 GD Freak https://jp.gdfreak.com/public/detail/jp010090001210161q26/1
# 年、輸出入で、集計
y_xdf = (
y_df[["Year", "exp_imp", "Value", "Q1", "Q2"]]
.groupby(["Year", "exp_imp"], as_index=False)
.sum(["Value", "Q1", "Q2"])
)
text = """
go.Scatter(x = y_xdf.query("exp_imp=={exp_imp}")["Year"]
,y = y_xdf.query("exp_imp=={exp_imp}")["Q2"]/10000000,name="{name}",line=dict(width=5))
"""[
1:-1
]
fig = go.Figure()
# fig.add_trace(eval(text.format(exp_imp=1,name='輸出'))) # 輸出はHSコード上存在しないので、ここでは表示しない
fig.add_trace(eval(text.format(exp_imp=2, name="輸入")))
fig.update_layout(
width=128,
title={"text": "輸入数量"},
xaxis=dict(
title=dict(text="年", font=dict(size=12)), tickfont=dict(size=12), type="date"
),
xaxis_title="年",
yaxis_title="万トン",
)
fig.show()
# パイナップル 1個は1kg として、日本人は、年間パイナップルを、1.5個ぐらい食べています。
# 年月、輸入で、集計
ym_xdf = (
ym_df[["ym", "exp_imp", "Value", "Q1", "Q2"]]
.groupby(["ym", "exp_imp"], as_index=False)
.sum(["Value", "Q1", "Q2"])
)
text = """
go.Scatter(x = ym_xdf.query("exp_imp=={exp_imp}")["ym"]
,y = ym_xdf.query("exp_imp=={exp_imp}")["Q2"]/10000000,name="{name}",line=dict(width=5))
"""[
1:-1
]
fig = go.Figure()
fig.add_trace(eval(text.format(exp_imp=2, name="輸入")))
fig.update_layout(
width=128,
title={"text": "年月 輸入数量"},
xaxis_title="年月",
yaxis_title="数量、万トン",
)
fig.show()
# 2019 年の輸入内訳 pandas での処理
head_num = 10
exp_imp = 2
year = 2019
text = """
xdf = y_df.query("Year=={year} & exp_imp=={exp_imp}")
xdf = xdf[['Country','Q2']].groupby('Country',as_index=False).sum('Value')
xdf = xdf.sort_values('Q2',ascending=False).head({head_num})
xdf = pd.merge(xdf,country_eng_df,on='Country')
xdf = xdf.sort_values('Q2')
xdf['name'] = xdf['Country_name'] + ': ' + xdf['Country'] + ' '
country_list =list(xdf['Country'])
"""
exec(text.format(year=year, exp_imp=exp_imp, head_num=10))
fig = go.Figure(go.Bar(x=xdf["Q2"] / 10000000, y=xdf["name"], orientation="h"))
fig.show()
# 最新年月での
head_num = 10
exp_imp = 2
ym = "2021-05"
text = """
xdf = ym_df.query("ym=='{ym}' & exp_imp=={exp_imp}")
xdf = xdf[['Country','Q2']].groupby('Country',as_index=False).sum()
xdf = xdf.sort_values('Q2',ascending=False).head({head_num})
xdf['Country'] = xdf['Country'].astype(str)
xdf = pd.merge(xdf,country_eng_df,on='Country')
xdf = xdf.sort_values('Q2')
xdf['name'] = xdf['Country_name'] + ': ' + xdf['Country'] + ' '
country_list =list(xdf['Country'])
"""
for ym in ["2021-04", "2020-04"]:
exec(text.format(ym=ym, exp_imp=exp_imp, head_num=10))
fig = go.Figure(go.Bar(x=xdf["Q2"] / 10000000, y=xdf["name"], orientation="h"))
fig.update_layout(title=ym)
fig.show()
# # フィリピンを抜いたの台湾の遷移
country_list = ["117", "106", "118", "311"]
exp_imp = 2
text = """
go.Scatter(x=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Year'],
y=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Q2'],
name='{name}',line=dict(color='{color}',width=3))
"""[
1:-1
]
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0] + " " + country,
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
country_list = ["117", "106", "118", "311"]
exp_imp = 2
text = """
go.Scatter(x=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Year'],
y=y_df.query("Country=='{country}' & exp_imp=={exp_imp}")['Q2'],
name='{name}',line=dict(color='{color}'),stackgroup='one')
"""[
1:-1
]
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0] + " " + country,
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
xdf = ym_df.query("exp_imp==2 & ym >'2020-01-01'")
text = """
go.Scatter(x=x,y=y,name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
country_list = ["106", "117"]
exp_imp = 2
fig = go.Figure()
for country in country_list:
x = xdf[xdf["Country"] == int(country)]["ym"]
y = xdf[xdf["Country"] == int(country)]["Q2"] / 10000000
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
text = """
go.Scatter(x=ym_df.query("Country=='{country}' & exp_imp=={exp_imp} & ym >'2020-01-01'")['ym'],
y=ym_df.query("Country=='{country}' & exp_imp=={exp_imp} & ym >'2020-01-01'")['Q2']/10000000,
name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
country_list = ["106"]
exp_imp = 2
fig = go.Figure()
for country in country_list:
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
fig.show()
"""
ym:
1: 単一年月
3: 年内の複数月
4: 年内累計
5: 年度内累計
品目の指定:
1: 参照指定
2: 品目コード指定
3: 範囲指定
国の指定:
1: 全対象指定
2: 地理圏指定
3: 経済圏指定
4: 州指定
5: 国参照指定
6: 国コード指定
ページ指定(M)
01: 品別国別表
03: 国別品別表
01:
01:
01:
01:
"""
ym_method = "1"
exp_imp = "2"
year = "2021"
month = "04"
text = """
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,{exp_imp},,,,,,,,{ym_method},0,{year},0,{month},{month1},2,{hscode},,,,,,,,,,6,{Country},,,,,,,,,,,,,,,,,,,,,page_num
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,1,0,2021,0,4,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20
https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,4,1,2021,0,0,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20
"""
# [taiwan 2104 ](https://www.customs.go.jp/toukei/srch/index.htm?M=01&P=1,2,,,,,,,,1,0,2021,0,4,0,2,080430010,,,,,,,,,,6,106,,,,,,,,,,,,,,,,,,,,,20)
# 数量
# 2021.4 106 5807759
# 2021.4 117 14365226
# 2020.4 106 783732
# 2020.4 117 12758782
xdf = ym_df.query("exp_imp==2 & ym >'2020-01-01'")
text = """
go.Scatter(x=x,y=y,name='{name}',line=dict(width=5,color='{color}'))
"""[
1:-1
]
fig = go.Figure() # グラフの初期化
fig.update_layout(
width=1080,
height=1080,
paper_bgcolor="rgba(248,148,172,1)",
yaxis_title="数量 万トン",
margin_t=300,
legend=dict(x=0, font_size=18, bgcolor="#eee"),
xaxis=dict(dtick="M1", tickformat="%m\n%Y", ticklabelmode="period"),
)
fig.add_layout_image(
dict(
source="https://zanjibar.github.io/flag_of_japan.jpg?",
xref="paper",
yref="paper",
x=0.03,
y=1.3,
sizex=0.1,
sizey=0.1,
xanchor="right",
yanchor="bottom",
)
)
"""
"""
fig.update_layout(
title={"text": "", "font_size": 24, "x": 0.1, "xanchor": "left", "yanchor": "top"}
)
country_list = ["106", "117"]
for country in country_list:
x = xdf[xdf["Country"] == int(country)]["ym"]
y = xdf[xdf["Country"] == int(country)]["Q2"] / 10000000
tr = eval(
text.format(
country=country,
exp_imp=exp_imp,
name=colors[country][0],
color=colors[country][1],
)
)
fig.add_trace(tr)
# 'text': "台湾からの生鮮パイナップルは、2021年4月には約580万個輸入です。<br>おおよそ20人に1人の日本人が台湾のパイナップルを食べた計算です。<br>昨年は78万個なので約140人に1人",
fig.add_annotation(
x=0.08,
y=1.38,
text=" 日本人の3% が台湾を応援 ",
xref="paper",
bgcolor="white",
yref="paper",
font_size=32,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.05,
y=1.25,
text="貿易統計を読み解く - 台湾からの生鮮パイナップルの輸入が急増 <br>2020年1月から2021年5月までの生鮮パイナップル(080430010)<br>フィリピン(1位)と台湾の輸入量(キログラム)の比較",
xref="paper",
yref="paper",
font_size=24,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.05,
y=1.1,
text="個数換算で、約400万個分の輸入が増えています。単純計算ですが、日本人の3%が購入<br>したというのはそれほどはずれていないでしょう。",
xref="paper",
yref="paper",
font_size=20,
align="left",
showarrow=False,
)
fig.add_annotation(
x=0.1,
y=0.4,
text="1 台湾からのパイナップル輸入は、前年同月比(4月) 7.4倍 <br>2 1位のフィリピンの3分1まで迫る <br>3 フィリピンからのパイナップル輸入も増えている",
xref="paper",
yref="paper",
align="left",
font_size=24,
showarrow=False,
)
fig.add_annotation(
x=0.17,
y=0.82,
text="2020年4月 フィリピンからの輸入は、<br>1.2万トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.88,
y=0.92,
text="2021年4月 フィリピンからの輸入は、<br>1.4万トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.17,
y=0.11,
text="2020年4月 台湾からの輸入は、783トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.add_annotation(
x=0.88,
y=0.43,
text="2021年4月 台湾からの輸入は、5808トン",
arrowhead=1,
xref="paper",
yref="paper",
showarrow=True,
)
fig.show()
fig.write_image("sg.jpeg")
# # おまけ(色コードの一覧、国に付与している)
color_all = [
"aliceblue",
"antiquewhite",
"aqua",
"aquamarine",
"azure",
"beige",
"bisque",
"black",
"blanchedalmond",
"blue",
"blueviolet",
"brown",
"burlywood",
"cadetblue",
"chartreuse",
"chocolate",
"coral",
"cornflowerblue",
"cornsilk",
"crimson",
"cyan",
"darkblue",
"darkcyan",
"darkgoldenrod",
"darkgray",
"darkgrey",
"darkgreen",
"darkkhaki",
"darkmagenta",
"darkolivegreen",
"darkorange",
"darkorchid",
"darkred",
"darksalmon",
"darkseagreen",
"darkslateblue",
"darkslategray",
"darkslategrey",
"darkturquoise",
"darkviolet",
"deeppink",
"deepskyblue",
"dimgray",
"dimgrey",
"dodgerblue",
"firebrick",
"floralwhite",
"forestgreen",
"fuchsia",
"gainsboro",
"ghostwhite",
"gold",
"goldenrod",
"gray",
"grey",
"green",
"greenyellow",
"honeydew",
"hotpink",
"indianred",
"indigo",
"ivory",
"khaki",
"lavender",
"lavenderblush",
"lawngreen",
"lemonchiffon",
"lightblue",
"lightcoral",
"lightcyan",
"lightgoldenrodyellow",
"lightgray",
"lightgrey",
"lightgreen",
"lightpink",
"lightsalmon",
"lightseagreen",
"lightskyblue",
"lightslategray",
"lightslategrey",
"lightsteelblue",
"lightyellow",
"lime",
"limegreen",
"linen",
"magenta",
"maroon",
"mediumaquamarine",
"mediumblue",
"mediumorchid",
"mediumpurple",
"mediumseagreen",
"mediumslateblue",
"mediumspringgreen",
"mediumturquoise",
"mediumvioletred",
"midnightblue",
"mintcream",
"mistyrose",
"moccasin",
"navajowhite",
"navy",
"oldlace",
"olive",
"olivedrab",
"orange",
"orangered",
"orchid",
"palegoldenrod",
"palegreen",
"paleturquoise",
"palevioletred",
"papayawhip",
"peachpuff",
"peru",
"pink",
"plum",
"powderblue",
"purple",
"red",
"rosybrown",
"royalblue",
"saddlebrown",
"salmon",
"sandybrown",
"seagreen",
"seashell",
"sienna",
"silver",
"skyblue",
"slateblue",
"slategray",
"slategrey",
"snow",
"springgreen",
"steelblue",
"tan",
"teal",
"thistle",
"tomato",
"turquoise",
"violet",
"wheat",
"white",
"whitesmoke",
"yellow",
"yellowgreen",
]
| false | 0 | 7,683 | 0 | 7,708 | 7,683 |
||
69104572
|
import pandas as pd
import seaborn as sns
import plotly.express as xp
import plotly.graph_objects as go
import numpy as np
from datetime import datetime
import missingno
import yaml
from collections import Counter
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, ShuffleSplit
from sklearn.manifold import TSNE
from sklearn.linear_model import RidgeClassifier
from sklearn.impute import SimpleImputer
train_data = pd.read_csv("../input/mymusicalprefrences/train.csv")
test_data = pd.read_csv("../input/mymusicalprefrences/test.csv")
dataset = pd.concat([train_data, test_data]).reset_index(drop=True)
dataset.columns = [i.strip() for i in dataset.columns]
print(dataset.size)
dataset.head()
# copying the code for splitting to onehot
def split_to_onehot(df, col):
"""
This method converts features separated by '|' into one-hot vectors.
Additionally it drops unnecessary values, which present only in
test set / train set or have only one value.
"""
# Getting all unique ganres values.
unique = []
for i in df.index:
unique.extend(df.loc[i, col].split("|"))
if "" in unique:
unique.remove("")
unique = list(set(unique))
# Putting values into binary form
onehot = df.loc[:, ["Category"]]
onehot[unique] = np.zeros((len(unique),), dtype=np.int8)
for i in df.index:
g = set(df.loc[i, col].split("|"))
for j in g:
if j != "":
onehot.loc[i, j] = 1
# Dropping unnecessary values
_a = onehot.groupby("Category").sum()
only_one = list(_a.sum()[_a.sum() == 1].index)
only_train = list(_a.loc["none"][_a.loc["none"] == 0].index)
only_test = list(
_a.loc[["like", "dislike"]].sum()[_a.loc[["like", "dislike"]].sum() == 0].index
)
_a = set(only_one + only_train + only_test)
onehot = onehot.drop(_a, axis=1)
return onehot
def onehot_to_tsne2(df, title):
"""
This method converts one-hot representation into two tsne values.
Such operation is needed to shrink the dimentionality of the dataset
"""
onehot = df.drop("Category", axis=1)
embedding = TSNE(n_components=2, init="pca")
embedded = embedding.fit_transform(onehot)
embedded = pd.DataFrame(embedded, columns=[f"{title}_tsne1", f"{title}_tsne2"])
return embedded
def plot_commulative_onehot(onehot):
"""
Method of plotting commulative values of the one hot feature representation
"""
_df = onehot.groupby("Category").sum()
fig = go.Figure()
for i in range(len(_df.index)):
k = _df.index[i]
x, y = [], []
for g in _df.columns:
if _df.loc[k, g] != 0:
x.append(g)
y.append(_df.loc[k, g])
fig.add_trace(go.Bar(x=x, y=y, name=k, marker=dict(color=palette[i])))
fig.show()
# The method label_fix_category is used to simplify the Category column.
# In this part the category column is simplified to values 0, 1, 2
print(dataset["Category"].unique())
dataset["Category"] = (
dataset["Category"].fillna("none").replace({0: "dislike", 1: "like"})
)
dataset["Category"].unique()
def label_fix_category(value):
if value == "dislike":
return 0
elif value == "like":
return 1
elif value == "none":
return 2
dataset["Category"] = dataset["Category"].apply(label_fix_category)
dataset["Category"].unique()
# Now I'm trying simplifying the Version column and adding some int values insted of string values
print(dataset["Version"])
print(dataset["Version"].count())
# only 129 data have some value (!= NaN)
# replacing nan with NA string
dataset["Version"] = dataset["Version"].fillna("NA")
label_encoder = LabelEncoder()
dataset.Version = label_encoder.fit_transform(dataset.Version)
dataset["Version"].unique()
# Now I'm trying simplifying the Album_type column and adding some int values insted of string values
dataset["Album_type"].unique()
print(dataset["Album_type"].count())
# 212 data have some value (!= NaN)
# replacing nan with NA string
dataset["Album_type"] = dataset["Album_type"].fillna("NA")
label_encoder = LabelEncoder()
dataset.Album_type = label_encoder.fit_transform(dataset.Album_type)
dataset["Album_type"].unique()
print(dataset["Album_type"].count())
# now we can use all the samples
# For the key part I decided to take the all possible values from the key column and to create new column for each value. And at the end delete the key value
dataset[list(set(dataset["Key"].values))] = (
OneHotEncoder().fit_transform(dataset[["Key"]]).toarray()
)
# dataset = dataset.drop("Key", axis=1)
print(dataset["Key"].unique())
print(dataset["Key"].count())
# all data have some value (!= NaN)
label_encoder = LabelEncoder()
dataset.Key = label_encoder.fit_transform(dataset.Key)
dataset = dataset.drop("Key", axis=1)
dataset.columns
print(dataset["Vocal"].unique())
print(dataset["Vocal"].count())
ganres_onehot = split_to_onehot(dataset, "Artists_Genres")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104572.ipynb
| null | null |
[{"Id": 69104572, "ScriptId": 18858339, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7932867, "CreationDate": "07/26/2021 23:46:54", "VersionNumber": 4.0, "Title": "notebook9e486434af", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 156.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
import pandas as pd
import seaborn as sns
import plotly.express as xp
import plotly.graph_objects as go
import numpy as np
from datetime import datetime
import missingno
import yaml
from collections import Counter
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, ShuffleSplit
from sklearn.manifold import TSNE
from sklearn.linear_model import RidgeClassifier
from sklearn.impute import SimpleImputer
train_data = pd.read_csv("../input/mymusicalprefrences/train.csv")
test_data = pd.read_csv("../input/mymusicalprefrences/test.csv")
dataset = pd.concat([train_data, test_data]).reset_index(drop=True)
dataset.columns = [i.strip() for i in dataset.columns]
print(dataset.size)
dataset.head()
# copying the code for splitting to onehot
def split_to_onehot(df, col):
"""
This method converts features separated by '|' into one-hot vectors.
Additionally it drops unnecessary values, which present only in
test set / train set or have only one value.
"""
# Getting all unique ganres values.
unique = []
for i in df.index:
unique.extend(df.loc[i, col].split("|"))
if "" in unique:
unique.remove("")
unique = list(set(unique))
# Putting values into binary form
onehot = df.loc[:, ["Category"]]
onehot[unique] = np.zeros((len(unique),), dtype=np.int8)
for i in df.index:
g = set(df.loc[i, col].split("|"))
for j in g:
if j != "":
onehot.loc[i, j] = 1
# Dropping unnecessary values
_a = onehot.groupby("Category").sum()
only_one = list(_a.sum()[_a.sum() == 1].index)
only_train = list(_a.loc["none"][_a.loc["none"] == 0].index)
only_test = list(
_a.loc[["like", "dislike"]].sum()[_a.loc[["like", "dislike"]].sum() == 0].index
)
_a = set(only_one + only_train + only_test)
onehot = onehot.drop(_a, axis=1)
return onehot
def onehot_to_tsne2(df, title):
"""
This method converts one-hot representation into two tsne values.
Such operation is needed to shrink the dimentionality of the dataset
"""
onehot = df.drop("Category", axis=1)
embedding = TSNE(n_components=2, init="pca")
embedded = embedding.fit_transform(onehot)
embedded = pd.DataFrame(embedded, columns=[f"{title}_tsne1", f"{title}_tsne2"])
return embedded
def plot_commulative_onehot(onehot):
"""
Method of plotting commulative values of the one hot feature representation
"""
_df = onehot.groupby("Category").sum()
fig = go.Figure()
for i in range(len(_df.index)):
k = _df.index[i]
x, y = [], []
for g in _df.columns:
if _df.loc[k, g] != 0:
x.append(g)
y.append(_df.loc[k, g])
fig.add_trace(go.Bar(x=x, y=y, name=k, marker=dict(color=palette[i])))
fig.show()
# The method label_fix_category is used to simplify the Category column.
# In this part the category column is simplified to values 0, 1, 2
print(dataset["Category"].unique())
dataset["Category"] = (
dataset["Category"].fillna("none").replace({0: "dislike", 1: "like"})
)
dataset["Category"].unique()
def label_fix_category(value):
if value == "dislike":
return 0
elif value == "like":
return 1
elif value == "none":
return 2
dataset["Category"] = dataset["Category"].apply(label_fix_category)
dataset["Category"].unique()
# Now I'm trying simplifying the Version column and adding some int values insted of string values
print(dataset["Version"])
print(dataset["Version"].count())
# only 129 data have some value (!= NaN)
# replacing nan with NA string
dataset["Version"] = dataset["Version"].fillna("NA")
label_encoder = LabelEncoder()
dataset.Version = label_encoder.fit_transform(dataset.Version)
dataset["Version"].unique()
# Now I'm trying simplifying the Album_type column and adding some int values insted of string values
dataset["Album_type"].unique()
print(dataset["Album_type"].count())
# 212 data have some value (!= NaN)
# replacing nan with NA string
dataset["Album_type"] = dataset["Album_type"].fillna("NA")
label_encoder = LabelEncoder()
dataset.Album_type = label_encoder.fit_transform(dataset.Album_type)
dataset["Album_type"].unique()
print(dataset["Album_type"].count())
# now we can use all the samples
# For the key part I decided to take the all possible values from the key column and to create new column for each value. And at the end delete the key value
dataset[list(set(dataset["Key"].values))] = (
OneHotEncoder().fit_transform(dataset[["Key"]]).toarray()
)
# dataset = dataset.drop("Key", axis=1)
print(dataset["Key"].unique())
print(dataset["Key"].count())
# all data have some value (!= NaN)
label_encoder = LabelEncoder()
dataset.Key = label_encoder.fit_transform(dataset.Key)
dataset = dataset.drop("Key", axis=1)
dataset.columns
print(dataset["Vocal"].unique())
print(dataset["Vocal"].count())
ganres_onehot = split_to_onehot(dataset, "Artists_Genres")
| false | 0 | 1,466 | 3 | 1,466 | 1,466 |
||
69104005
|
# Desired output size.
RESIZED_WIDTH, RESIZED_HEIGHT = 384, 576
OUTPUT_FORMAT = "jpg"
OUTPUT_DIR = "384_576"
# # Imports
import glob
import joblib
import numpy as np
import PIL
from PIL import Image
import pydicom
import tqdm
import os
import zipfile
# # Get images paths
data_dir = "../input/plant-pathology-2021-fgvc8"
train_dir = "train_images"
train_paths = glob.glob(f"{data_dir}/{train_dir}/*.jpg")
test_dir = "test_images"
test_paths = glob.glob(f"{data_dir}/{test_dir}/*.jpg")
len(train_paths), len(test_paths)
# # Preprocess all data
# First declare a bunch of useful functions.
# # Load converted images
# Let's test that everything is ok!
train_num = len(train_paths)
for i, train_path in enumerate(train_paths):
filename = train_path.split("/")[-1]
img = Image.open(train_path)
img = img.resize((RESIZED_WIDTH, RESIZED_HEIGHT))
img.save(os.path.join(OUTPUT_DIR, train_dir, filename))
print(f"Successfully process {i}th image of {train_num} images")
test_num = len(test_paths)
for i, test_path in enumerate(test_paths):
filename = test_path.split("/")[-1]
img = Image.open(test_path)
img = img.resize((RESIZED_WIDTH, RESIZED_HEIGHT))
img.save(os.path.join(OUTPUT_DIR, test_dir, filename))
print(f"Successfully process {i}th image of {test_num} images")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104005.ipynb
| null | null |
[{"Id": 69104005, "ScriptId": 17044921, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5599343, "CreationDate": "07/26/2021 23:29:17", "VersionNumber": 4.0, "Title": "Prepare dataset (resizing and saving as png)", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 70.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 69.0, "LinesInsertedFromFork": 32.0, "LinesDeletedFromFork": 105.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 38.0, "TotalVotes": 0}]
| null | null | null | null |
# Desired output size.
RESIZED_WIDTH, RESIZED_HEIGHT = 384, 576
OUTPUT_FORMAT = "jpg"
OUTPUT_DIR = "384_576"
# # Imports
import glob
import joblib
import numpy as np
import PIL
from PIL import Image
import pydicom
import tqdm
import os
import zipfile
# # Get images paths
data_dir = "../input/plant-pathology-2021-fgvc8"
train_dir = "train_images"
train_paths = glob.glob(f"{data_dir}/{train_dir}/*.jpg")
test_dir = "test_images"
test_paths = glob.glob(f"{data_dir}/{test_dir}/*.jpg")
len(train_paths), len(test_paths)
# # Preprocess all data
# First declare a bunch of useful functions.
# # Load converted images
# Let's test that everything is ok!
train_num = len(train_paths)
for i, train_path in enumerate(train_paths):
filename = train_path.split("/")[-1]
img = Image.open(train_path)
img = img.resize((RESIZED_WIDTH, RESIZED_HEIGHT))
img.save(os.path.join(OUTPUT_DIR, train_dir, filename))
print(f"Successfully process {i}th image of {train_num} images")
test_num = len(test_paths)
for i, test_path in enumerate(test_paths):
filename = test_path.split("/")[-1]
img = Image.open(test_path)
img = img.resize((RESIZED_WIDTH, RESIZED_HEIGHT))
img.save(os.path.join(OUTPUT_DIR, test_dir, filename))
print(f"Successfully process {i}th image of {test_num} images")
| false | 0 | 445 | 0 | 445 | 445 |
||
69104810
|
# # Metrics Processor
# Processes metrics into tables grouped by type and ID with a full statistical description.
# ## Step 1:
# Compile the list of files into a dictionary of datasets with people, days, and threads metadata
metrics_dir = "/kaggle/input/metrics"
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from IPython.core import display as ICD
# ## Functions
# Load the datasets into a list of dictionaries containing dataset metadata and a scoped dataframe of the data within the dataset
def load_datasets():
datasets = []
for dirname, _, filenames in os.walk(metrics_dir):
splt = dirname.split("/")[-1].split("-")
if not dirname.endswith("metrics"):
people = int(splt[0])
days = int(splt[1])
dataset = {
"people": people,
"days": days,
"threads": {},
}
for filename in filenames:
if "csv" in filename:
threads = int(filename.split(".")[0].replace("metrics", ""))
path = os.path.join(dirname, filename)
frame = pd.read_csv(path)
frame["Days"] = days
frame["Dataset Size"] = people
frame["Concurrent Threads"] = threads
thread = {"path": path, "df": frame}
dataset["threads"][threads] = thread
dfs = []
for thread, data in dataset["threads"].items():
dfs.append(data["df"])
dataset["df"] = pd.concat(dfs)
datasets.append(dataset)
return datasets
# Group a dataframe by dataset size, number of concurrent threads, and device type
def process(df):
df.columns = [
"Identifier",
"Type",
"Time Elapsed",
"Category",
"Description",
"Dataset Size",
"Concurrent Threads",
]
return (
df[df["Category"] == "request"]
.groupby(["Dataset Size", "Concurrent Threads", "Type"])
.agg(
**{
"Count": ("Time Elapsed", "count"),
"Mean": ("Time Elapsed", "mean"),
"Total": ("Time Elapsed", "sum"),
"Standard Deviation": ("Time Elapsed", "std"),
}
)
)
# Same as above, but includes input for 'Days'
def process2(df):
df.columns = [
"Identifier",
"Time Elapsed",
"Category",
"Description",
"Days",
"Dataset Size",
"Concurrent Threads",
]
return (
df[df["Category"] == "request"]
.groupby(["Days", "Dataset Size", "Concurrent Threads"])
.agg(
**{
"Count": ("Time Elapsed", "count"),
"Mean": ("Time Elapsed", "mean"),
"Total": ("Time Elapsed", "sum"),
"Standard Deviation": ("Time Elapsed", "std"),
}
)
)
colormap = {
100: "#3A3D3B",
200: "#E84F5E",
500: "#B3CCBA",
750: "#2EACB3",
1000: "#F7F0DD",
}
# Generate a 3D plot on the given x, y, and z axes
def plot3d(df, title=None, x_label=None, y_label=None, z_label=None):
"""matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})"""
# thickness of the bars
dx, dy = 1, 50
# prepare 3d axes
fig = plt.figure(figsize=(10, 6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
# set up positions for the bars
xpos = df.Days.ravel()
ypos = df["Dataset Size"].ravel()
# set the ticks in the middle of the bars
ax.set_xticks(df.Days.unique())
ax.set_yticks(df["Dataset Size"].unique())
color = [colormap[size] for size in df["Dataset Size"].ravel()]
# the bars starts from 0 attitude
zpos = np.zeros(len(xpos))
# the bars' heights
dz = df.Mean.ravel()
bottom = min(df["Dataset Size"].unique())
top = max(df["Dataset Size"].unique())
# plot
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=color)
if title is not None:
plt.title(title)
if x_label is not None:
ax.set_xlabel(x_label)
if x_label is not None:
ax.set_ylabel(y_label)
if x_label is not None:
ax.set_zlabel(z_label)
plt.savefig(title + ".pgf", bbox_inches="tight")
plt.show()
# ## Processing
# Load the data
datasets = load_datasets()
# setup variables with a single combined table
combined = pd.concat([d["df"] for d in datasets])
# the combined table, grouped by days
groups = sorted(combined.Days.unique())
days = {}
for group in groups:
days[group] = combined[combined["Days"] == group]
# print each of the grouped-by-days tables
for day, df in days.items():
print(f"Days: {day}")
ICD.display(process(df.drop("Days", axis=1)))
print(process(df.drop("Days", axis=1)).to_latex())
# group the combined table, reset the index so you can access the index columns, then filter by device type
omega = combined[combined["type"] == "omega"]
thread1 = omega[omega["Concurrent Threads"] == 1]
thread10 = omega[omega["Concurrent Threads"] == 10]
thread100 = omega[omega["Concurrent Threads"] == 100]
thread1 = process2(thread1.drop("type", axis=1)).reset_index()
thread10 = process2(thread10.drop("type", axis=1)).reset_index()
thread100 = process2(thread100.drop("type", axis=1)).reset_index()
# plot days x dataset size x total request time
# plot days x dataset size x mean request time
# plot days x dataset size x standard deviation of request time
plot3d(
thread1,
title="Mean Request Time (1 Thread)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
plot3d(
thread10,
title="Mean Request Time (10 Threads)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
plot3d(
thread100,
title="Mean Request Time (100 Threads)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
# Chunks of combined table to put into paper as LaTeX
first = combined[
(combined["Days"] == 28)
& (combined["Dataset Size"] == 200)
& (combined["type"] == "omega")
]
second = combined[
(combined["Days"] == 28)
& (combined["Dataset Size"] == 500)
& (combined["type"] == "omega")
]
third = combined[
(combined["Days"] == 14)
& (combined["Dataset Size"] == 500)
& (combined["type"] == "omega")
]
fourth = combined[
(combined["Days"] == 14)
& (combined["Dataset Size"] == 200)
& (combined["type"] == "omega")
]
main = pd.concat([first, second, third, fourth])
print(process2(main.drop("type", axis=1)).round(decimals=3).to_latex())
omega
import seaborn as sns
g = sns.catplot(
x="Days",
y="value",
hue="Concurrent Threads",
data=combined,
col="Dataset Size",
row="type",
row_order=["theta", "alpha", "beta", "omega"],
kind="point",
)
g.fig.subplots_adjust(top=0.9)
g.fig.suptitle("Experiment Results", fontsize=16)
[plt.setp(ax.get_xticklabels(), rotation=45) for ax in g.axes.flat]
# plt.savefig("experiment.png", bbox_inches='tight')
plt.show()
# Got it
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104810.ipynb
| null | null |
[{"Id": 69104810, "ScriptId": 18449366, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7203566, "CreationDate": "07/26/2021 23:54:09", "VersionNumber": 11.0, "Title": "Metrics Tables", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 257.0, "LinesInsertedFromPrevious": 21.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 236.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Metrics Processor
# Processes metrics into tables grouped by type and ID with a full statistical description.
# ## Step 1:
# Compile the list of files into a dictionary of datasets with people, days, and threads metadata
metrics_dir = "/kaggle/input/metrics"
import pandas as pd
import numpy as np
import os
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
from IPython.core import display as ICD
# ## Functions
# Load the datasets into a list of dictionaries containing dataset metadata and a scoped dataframe of the data within the dataset
def load_datasets():
datasets = []
for dirname, _, filenames in os.walk(metrics_dir):
splt = dirname.split("/")[-1].split("-")
if not dirname.endswith("metrics"):
people = int(splt[0])
days = int(splt[1])
dataset = {
"people": people,
"days": days,
"threads": {},
}
for filename in filenames:
if "csv" in filename:
threads = int(filename.split(".")[0].replace("metrics", ""))
path = os.path.join(dirname, filename)
frame = pd.read_csv(path)
frame["Days"] = days
frame["Dataset Size"] = people
frame["Concurrent Threads"] = threads
thread = {"path": path, "df": frame}
dataset["threads"][threads] = thread
dfs = []
for thread, data in dataset["threads"].items():
dfs.append(data["df"])
dataset["df"] = pd.concat(dfs)
datasets.append(dataset)
return datasets
# Group a dataframe by dataset size, number of concurrent threads, and device type
def process(df):
df.columns = [
"Identifier",
"Type",
"Time Elapsed",
"Category",
"Description",
"Dataset Size",
"Concurrent Threads",
]
return (
df[df["Category"] == "request"]
.groupby(["Dataset Size", "Concurrent Threads", "Type"])
.agg(
**{
"Count": ("Time Elapsed", "count"),
"Mean": ("Time Elapsed", "mean"),
"Total": ("Time Elapsed", "sum"),
"Standard Deviation": ("Time Elapsed", "std"),
}
)
)
# Same as above, but includes input for 'Days'
def process2(df):
df.columns = [
"Identifier",
"Time Elapsed",
"Category",
"Description",
"Days",
"Dataset Size",
"Concurrent Threads",
]
return (
df[df["Category"] == "request"]
.groupby(["Days", "Dataset Size", "Concurrent Threads"])
.agg(
**{
"Count": ("Time Elapsed", "count"),
"Mean": ("Time Elapsed", "mean"),
"Total": ("Time Elapsed", "sum"),
"Standard Deviation": ("Time Elapsed", "std"),
}
)
)
colormap = {
100: "#3A3D3B",
200: "#E84F5E",
500: "#B3CCBA",
750: "#2EACB3",
1000: "#F7F0DD",
}
# Generate a 3D plot on the given x, y, and z axes
def plot3d(df, title=None, x_label=None, y_label=None, z_label=None):
"""matplotlib.use("pgf")
matplotlib.rcParams.update({
"pgf.texsystem": "pdflatex",
'font.family': 'serif',
'text.usetex': True,
'pgf.rcfonts': False,
})"""
# thickness of the bars
dx, dy = 1, 50
# prepare 3d axes
fig = plt.figure(figsize=(10, 6))
ax = Axes3D(fig, auto_add_to_figure=False)
fig.add_axes(ax)
# set up positions for the bars
xpos = df.Days.ravel()
ypos = df["Dataset Size"].ravel()
# set the ticks in the middle of the bars
ax.set_xticks(df.Days.unique())
ax.set_yticks(df["Dataset Size"].unique())
color = [colormap[size] for size in df["Dataset Size"].ravel()]
# the bars starts from 0 attitude
zpos = np.zeros(len(xpos))
# the bars' heights
dz = df.Mean.ravel()
bottom = min(df["Dataset Size"].unique())
top = max(df["Dataset Size"].unique())
# plot
ax.bar3d(xpos, ypos, zpos, dx, dy, dz, color=color)
if title is not None:
plt.title(title)
if x_label is not None:
ax.set_xlabel(x_label)
if x_label is not None:
ax.set_ylabel(y_label)
if x_label is not None:
ax.set_zlabel(z_label)
plt.savefig(title + ".pgf", bbox_inches="tight")
plt.show()
# ## Processing
# Load the data
datasets = load_datasets()
# setup variables with a single combined table
combined = pd.concat([d["df"] for d in datasets])
# the combined table, grouped by days
groups = sorted(combined.Days.unique())
days = {}
for group in groups:
days[group] = combined[combined["Days"] == group]
# print each of the grouped-by-days tables
for day, df in days.items():
print(f"Days: {day}")
ICD.display(process(df.drop("Days", axis=1)))
print(process(df.drop("Days", axis=1)).to_latex())
# group the combined table, reset the index so you can access the index columns, then filter by device type
omega = combined[combined["type"] == "omega"]
thread1 = omega[omega["Concurrent Threads"] == 1]
thread10 = omega[omega["Concurrent Threads"] == 10]
thread100 = omega[omega["Concurrent Threads"] == 100]
thread1 = process2(thread1.drop("type", axis=1)).reset_index()
thread10 = process2(thread10.drop("type", axis=1)).reset_index()
thread100 = process2(thread100.drop("type", axis=1)).reset_index()
# plot days x dataset size x total request time
# plot days x dataset size x mean request time
# plot days x dataset size x standard deviation of request time
plot3d(
thread1,
title="Mean Request Time (1 Thread)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
plot3d(
thread10,
title="Mean Request Time (10 Threads)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
plot3d(
thread100,
title="Mean Request Time (100 Threads)",
x_label="Dataset Simulation Duration (Days)",
y_label="Dataset Size (People)",
z_label="Mean Request Time (Seconds)",
)
# Chunks of combined table to put into paper as LaTeX
first = combined[
(combined["Days"] == 28)
& (combined["Dataset Size"] == 200)
& (combined["type"] == "omega")
]
second = combined[
(combined["Days"] == 28)
& (combined["Dataset Size"] == 500)
& (combined["type"] == "omega")
]
third = combined[
(combined["Days"] == 14)
& (combined["Dataset Size"] == 500)
& (combined["type"] == "omega")
]
fourth = combined[
(combined["Days"] == 14)
& (combined["Dataset Size"] == 200)
& (combined["type"] == "omega")
]
main = pd.concat([first, second, third, fourth])
print(process2(main.drop("type", axis=1)).round(decimals=3).to_latex())
omega
import seaborn as sns
g = sns.catplot(
x="Days",
y="value",
hue="Concurrent Threads",
data=combined,
col="Dataset Size",
row="type",
row_order=["theta", "alpha", "beta", "omega"],
kind="point",
)
g.fig.subplots_adjust(top=0.9)
g.fig.suptitle("Experiment Results", fontsize=16)
[plt.setp(ax.get_xticklabels(), rotation=45) for ax in g.axes.flat]
# plt.savefig("experiment.png", bbox_inches='tight')
plt.show()
# Got it
| false | 0 | 2,130 | 0 | 2,130 | 2,130 |
||
69104407
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Pytorch stuff
import torch
import torch.nn.functional as F
from torch.utils.data import random_split, DataLoader, Dataset
from torch import nn
# Visualisation
import tqdm
import matplotlib.pyplot as plt
from PIL import Image
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Loading the Training Data
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df
# Use dataframe to get x and y values for training
x = torch.tensor(df.loc[:, "pixel0":"pixel783"].values)
y = torch.tensor(df.loc[:, "label"].values)
imgx = np.array(x.reshape((-1, 28, 28)), dtype=np.uint8)
img = Image.fromarray(imgx[3], mode="L")
img
# # Creating a baseline model
# A simple baseline model would be taking the average pixel brightness for each digit, then compare an image's average brightness to recorded values to classify the image.
# We will make this model general enough to account for varying numbers of choices for different digits.
# sorts data into different labels
labels = list(set(df["label"])) # gives enumeration for the labels
def label_map(index):
return labels[index]
sorted_data = []
for label in labels:
sorted_data.append(
torch.tensor(df.loc[df["label"] == label].loc[:, "pixel0":"pixel783"].values)
)
# A nice way to display tensors
def display_tns(t):
ldf = pd.DataFrame(np.array(t.reshape(28, 28)))
return ldf.style.set_properties(**{"font-size": "5pt"}).background_gradient(
"Greys", axis=None
)
# Generates a heatmap for each of the digits
averages = torch.tensor(list(map(lambda x: np.array(x).mean((0)), sorted_data)))
display_tns(averages[0])
# We define the prediction functions and determine the models accuracy
def classification_losses(s):
if len(s.shape) == 2:
t = s
else:
t = s.unsqueeze_(0)
losses = torch.empty(0, t.shape[0])
for average in averages:
losses = torch.cat((losses, (t - average).abs().mean((-1)).unsqueeze_(0)), 0)
return losses
def predict(t):
return torch.tensor(
[label_map(a) for a in torch.argmin(classification_losses(t), dim=0)]
)
classification_losses(x).shape, predict(x), predict(x[1])
accuracy = torch.tensor([a == b for (a, b) in zip(predict(x), y)]).float().mean()
accuracy
# So now we have a baseline model with which to compare our future modelling attempts.
# We can also use this model to make a submission.
testdf = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
testx = torch.tensor(testdf.loc[:, "pixel0":"pixel783"].values, dtype=torch.float)
predictions_baseline = predict(testx)
data_baseline = {
"ImageID": np.arange(1, len(predictions_baseline) + 1),
"Label": predictions_baseline,
}
submission_baseline = pd.DataFrame(data_baseline, columns=["ImageID", "Label"])
submission_baseline.to_csv("baseline.csv", index=False)
submission_baseline
# # Using machine learning to do better
# Firstly we're gonna use a simple neural network to try and improve on the 0.65 or so accuracy. Let's first prepare the data by splitting it into validation and training data.
# Typically Dataset would be taken from a saved file rather than stored dynamically, but this is just an experiment
class CustomDatasetMNIST(Dataset):
def __init__(self, data_tensor):
self.x = data_tensor[:, 1:]
self.y = data_tensor[:, 0]
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
x = self.x[idx] / 255 # So all data points lie in [0,1]
y = int(self.y[idx])
return x, y
train, valid = random_split(
CustomDatasetMNIST(torch.tensor(df.values, dtype=torch.float)), [36000, 6000]
)
train_loader = DataLoader(train, batch_size=32)
valid_loader = DataLoader(valid, batch_size=32)
criterion = nn.CrossEntropyLoss()
# First is going to be a very simple neural network, with two hidden layers using RELU as a non-linear function.
class SimpleNetworkMNIST(nn.Module):
def __init__(self):
super(SimpleNetworkMNIST, self).__init__()
self.l1 = nn.Linear(784, 400)
self.l2 = nn.Linear(400, 100)
self.l3 = nn.Linear(100, 10)
def forward(self, x):
x = F.normalize(x)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
simple_model = SimpleNetworkMNIST()
# We also want to describe the loss function we will be using and the optimisation method. For our loss function we will use cross-entropy loss, which is standard for multi-label classification, and for our optimiser, stochastic gradient descent
simple_optimiser = torch.optim.SGD(
simple_model.parameters(), lr=0.1
) # lr denotes a learning rate
# Then we can train the model, while keeping an eye on the metrics:
epochs = 2
for epoch in range(epochs):
# Train over the epoch
simple_model.train()
running_loss = 0.0
for xs, ys in train_loader:
simple_optimiser.zero_grad()
prediction_ys = simple_model(xs)
loss = criterion(prediction_ys, ys)
loss.backward()
simple_optimiser.step() # Note this works as applying loss.backward works on the parameters of the model which is the argument for the optimiser
running_loss += loss.item()
train_loss = running_loss / len(train_loader)
# Get the validation loss
simple_model.eval()
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for xs, ys in valid_loader:
prediction_ys = simple_model(xs)
loss = criterion(prediction_ys, ys)
running_loss += loss.item()
total += ys.size(0)
correct += (prediction_ys.max(1)[1]).eq(ys).sum().item()
valid_loss = running_loss / len(valid_loader)
accuracy = 100.0 * correct / total
print(
f"Epoch: {epoch+1}/{epochs}:\nTraining loss: {train_loss}, Validation loss: {valid_loss}, Accuracy: {accuracy}"
)
# Obviously this model could be trained for longer and with more sophisticated methods, which we will implement later, but let's generate a submission with this initial model (although take note that this model has a much higher accuracy than that of the baseline model showing us that the use of machine learning has been useful).
simple_model.eval()
predictions_simple = simple_model(testx).max(1)[1]
data_simple = {
"ImageID": np.arange(1, len(predictions_simple) + 1),
"Label": predictions_simple,
}
submission_simple = pd.DataFrame(data_simple, columns=["ImageID", "Label"])
submission_simple.to_csv("simple.csv", index=False)
submission_simple
# # Model Interpretation
# Although we have metrics as we train our AI, we might want to look at some of the details of the AI. We can use visualisation methods such as:
# * Confusion matrices
# * Top losses
# Below we can generate a confusion matrix by tallying the times when data with some label was predicted as some label
confusion_matrix = np.zeros((len(labels), len(labels)), dtype=np.uint16)
MNISTdata = torch.tensor(df.values, dtype=torch.float)
simple_model_out = simple_model(MNISTdata[:, 1:])
targets = MNISTdata[:, 0]
# Could potentially also exclusively consider validation data to account for potential overfitting
for prediction, actual in zip(simple_model_out.max(1)[1], targets):
confusion_matrix[prediction, actual.int()] += 1
confusion_matrix
# We can display this information a bit more clearly using matplotlib:
fig, ax = plt.subplots(figsize=(10, 10))
max_val = len(labels)
for i in range(max_val):
for j in range(max_val):
ax.text(i, j, str(confusion_matrix[i, j]), va="center", ha="center")
plt.title("Confusion Matrix")
plt.xlabel("Predicted digit")
plt.ylabel("Actual digit")
plt.xticks(np.arange(0, max_val), labels)
plt.yticks(np.arange(0, max_val), labels)
ax.tick_params(grid_alpha=0)
ax.set_xlim(-0.5, max_val - 0.5)
ax.set_ylim(-0.5, max_val - 0.5)
ax.imshow(confusion_matrix, cmap=plt.cm.Blues, origin="lower", aspect="equal")
ax.grid()
# Looking at this data we see some really interesting and expected flaws in our model, for example it frequently mixes up (2,7) and (4,9) as they are similar looking. We can also see its accuracy in very different looking digits, such as between (3,4) and (2,5). We can also take some of the predictions that resulted in the highest losses to inspect what aspects of these particular examples make them so hard to classify.
top_no = 5
sorted_losses, indices = nn.CrossEntropyLoss(reduction="none")(
simple_model_out, targets.long()
).sort(descending=True)
for i in range(top_no):
loss, actual, predicted = (
sorted_losses[i],
targets[indices[i]].int(),
simple_model_out[indices[i]].max(0)[1],
)
data = MNISTdata[indices[i], 1:]
plt.figure()
plt.title(f"Loss: {loss}, Predicted: {predicted}, Actual: {actual}")
plt.imshow(np.array(data.reshape(28, 28)), cmap=plt.cm.gray)
# As we can see in these top losses are examples of particularly poorly written digits, and in each case (except maybe that 3), you can see what the model predicted in the image. Depending on how this model is trained you may also find erroneous data (there is a 7 mislabelled as a 4 in this dataset), and this highlights a use of this visualisation; removing bad data to aid in the AI's learning process.
# # Improving our initial model
# So we're now going to try improving the model by using a convolutional neural network. The main benefit of this is that the AI can now use the spatial aspects of the image, which will aid in it finding patterns in images (and with some tricks we can see what these patterns are later on).
# The nn.Conv2D class has parameters:
# * **in_channels** - number of input planes
# * **out_channels** - number of output planes
# * **kernel_size** - size of convolution (also specifies number of weights and biases)
# * **stride** - int or tuple describing how far the convolution is shifted over for each evaluation of the convolution
# * **padding** - empty pixels around the image (aids in evaluating pixels around the perimeter)
# * **dilation** - describes distance between evaluation points in the kernel (e.g. dilation = 2 on a 3x3 kernel means evaluation of the 9 friends spread out in a 5x5 area)
# * **groups** - splits connections between input and output layers (both must be divisible by groups value, e.g. groups = 2 is similar to having 2 convolution layers in tandem)
# * **padding_mode** - how the padding pixels are filled, can be one of a few options, default 'zeros'
# * **device**
# * **dtype**
class ConvolutionMNIST(nn.Module):
def __init__(self):
super(ConvolutionMNIST, self).__init__()
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1),
# BatchNorm normalises the data over the features (planes) seperately over the batch and pixels
nn.BatchNorm2d(4),
nn.ReLU(),
# Takes maximum value in a 2 by 2 area of convolution output
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(4, 4, kernel_size=3, padding=1),
nn.BatchNorm2d(4),
nn.ReLU(),
)
# The size of the output planes above is 14 by 14 as size is preserved by conv layer with padding but halved by max pooling
self.l1 = nn.Linear(4 * 14 * 14, 10)
def forward(self, x):
x = x.reshape((-1, 1, 28, 28))
x = self.cnn_layers(x)
x = torch.flatten(x, 1)
x = self.l1(x)
return x
conv_model = ConvolutionMNIST()
os.path.exists("/kaggle//////working")
# Just so we don't have to repeat code, let's define a function to train the models, we'll also add the ability to save models after epochs where the validation losses improved.
def train_model(
model,
epochs,
train_loader,
valid_loader,
optimiser,
criterion,
print_output=True,
save_models=False,
model_path=None,
save_as="model",
):
if save_models:
if not model_path:
raise ValueError("If save_models is True then model_path must be set")
if not os.path.exists(model_path):
os.mkdir(model_path)
train_losses = []
valid_losses = []
accuracies = []
for epoch in range(epochs):
model.train()
running_loss = 0.0
# Some variables have been renamed for clarity
for data, targets in train_loader:
optimiser.zero_grad()
scores = model(data)
loss = criterion(scores, targets)
loss.backward()
optimiser.step() # Note this works as applying loss.backward works on the parameters of the model which is the argument for the optimiser
running_loss += loss.item()
train_loss = running_loss / len(train_loader)
model.eval()
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data, targets in valid_loader:
scores = model(data)
loss = criterion(scores, targets)
running_loss += loss.item()
total += targets.size(0)
correct += (scores.max(1)[1]).eq(targets).sum().item()
valid_loss = running_loss / len(valid_loader)
accuracy = 100.0 * correct / total
train_losses.append(train_loss)
valid_losses.append(valid_loss)
accuracies.append(accuracy)
if print_output:
print(
f"Epoch: {epoch+1}/{epochs}:\nTraining loss: {train_loss}, Validation loss: {valid_loss}, Accuracy: {accuracy}"
)
if save_models:
torch.save(model.state_dict(), model_path + f"/{save_as}_e{epoch+1}")
# For display
return train_losses, valid_losses, accuracies
conv_optimiser = torch.optim.SGD(conv_model.parameters(), lr=0.01)
train_model(conv_model, 2, train_loader, valid_loader, conv_optimiser, criterion)
# Using our trained model we can generate a submission:
conv_model.eval()
predictions_conv = conv_model(testx).max(1)[1]
data_conv = {
"ImageID": np.arange(1, len(predictions_conv) + 1),
"Label": predictions_conv,
}
submission_conv = pd.DataFrame(data_conv, columns=["ImageID", "Label"])
submission_conv.to_csv("conv.csv", index=False)
submission_conv
# We can see though that this results in a similar result to what we were getting previously. We can improve the model further by adding more layers to the model:
class DeepConvolutionMNIST(nn.Module):
def __init__(self):
super(DeepConvolutionMNIST, self).__init__()
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(16, 8, kernel_size=3, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(8, 4, kernel_size=3, padding=1),
nn.BatchNorm2d(4),
nn.ReLU(),
)
self.l1 = nn.Linear(4 * 7 * 7, 10)
def forward(self, x):
x = x.reshape((-1, 1, 28, 28))
x = self.cnn_layers(x)
x = torch.flatten(x, 1)
x = self.l1(x)
return x
deepconv_model = DeepConvolutionMNIST()
deepconv_optimiser = torch.optim.SGD(deepconv_model.parameters(), lr=0.01)
train_model(
deepconv_model, 2, train_loader, valid_loader, deepconv_optimiser, criterion
)
deepconv_model.eval()
predictions_deepconv = deepconv_model(testx).max(1)[1]
data_deepconv = {
"ImageID": np.arange(1, len(predictions_deepconv) + 1),
"Label": predictions_deepconv,
}
submission_deepconv = pd.DataFrame(data_deepconv, columns=["ImageID", "Label"])
submission_deepconv.to_csv("deepconv.csv", index=False)
submission_deepconv
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104407.ipynb
| null | null |
[{"Id": 69104407, "ScriptId": 18584222, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6175774, "CreationDate": "07/26/2021 23:42:30", "VersionNumber": 6.0, "Title": "Using MNIST to walk through AI basics", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 396.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 394.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Pytorch stuff
import torch
import torch.nn.functional as F
from torch.utils.data import random_split, DataLoader, Dataset
from torch import nn
# Visualisation
import tqdm
import matplotlib.pyplot as plt
from PIL import Image
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Loading the Training Data
df = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
df
# Use dataframe to get x and y values for training
x = torch.tensor(df.loc[:, "pixel0":"pixel783"].values)
y = torch.tensor(df.loc[:, "label"].values)
imgx = np.array(x.reshape((-1, 28, 28)), dtype=np.uint8)
img = Image.fromarray(imgx[3], mode="L")
img
# # Creating a baseline model
# A simple baseline model would be taking the average pixel brightness for each digit, then compare an image's average brightness to recorded values to classify the image.
# We will make this model general enough to account for varying numbers of choices for different digits.
# sorts data into different labels
labels = list(set(df["label"])) # gives enumeration for the labels
def label_map(index):
return labels[index]
sorted_data = []
for label in labels:
sorted_data.append(
torch.tensor(df.loc[df["label"] == label].loc[:, "pixel0":"pixel783"].values)
)
# A nice way to display tensors
def display_tns(t):
ldf = pd.DataFrame(np.array(t.reshape(28, 28)))
return ldf.style.set_properties(**{"font-size": "5pt"}).background_gradient(
"Greys", axis=None
)
# Generates a heatmap for each of the digits
averages = torch.tensor(list(map(lambda x: np.array(x).mean((0)), sorted_data)))
display_tns(averages[0])
# We define the prediction functions and determine the models accuracy
def classification_losses(s):
if len(s.shape) == 2:
t = s
else:
t = s.unsqueeze_(0)
losses = torch.empty(0, t.shape[0])
for average in averages:
losses = torch.cat((losses, (t - average).abs().mean((-1)).unsqueeze_(0)), 0)
return losses
def predict(t):
return torch.tensor(
[label_map(a) for a in torch.argmin(classification_losses(t), dim=0)]
)
classification_losses(x).shape, predict(x), predict(x[1])
accuracy = torch.tensor([a == b for (a, b) in zip(predict(x), y)]).float().mean()
accuracy
# So now we have a baseline model with which to compare our future modelling attempts.
# We can also use this model to make a submission.
testdf = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
testx = torch.tensor(testdf.loc[:, "pixel0":"pixel783"].values, dtype=torch.float)
predictions_baseline = predict(testx)
data_baseline = {
"ImageID": np.arange(1, len(predictions_baseline) + 1),
"Label": predictions_baseline,
}
submission_baseline = pd.DataFrame(data_baseline, columns=["ImageID", "Label"])
submission_baseline.to_csv("baseline.csv", index=False)
submission_baseline
# # Using machine learning to do better
# Firstly we're gonna use a simple neural network to try and improve on the 0.65 or so accuracy. Let's first prepare the data by splitting it into validation and training data.
# Typically Dataset would be taken from a saved file rather than stored dynamically, but this is just an experiment
class CustomDatasetMNIST(Dataset):
def __init__(self, data_tensor):
self.x = data_tensor[:, 1:]
self.y = data_tensor[:, 0]
def __len__(self):
return len(self.y)
def __getitem__(self, idx):
x = self.x[idx] / 255 # So all data points lie in [0,1]
y = int(self.y[idx])
return x, y
train, valid = random_split(
CustomDatasetMNIST(torch.tensor(df.values, dtype=torch.float)), [36000, 6000]
)
train_loader = DataLoader(train, batch_size=32)
valid_loader = DataLoader(valid, batch_size=32)
criterion = nn.CrossEntropyLoss()
# First is going to be a very simple neural network, with two hidden layers using RELU as a non-linear function.
class SimpleNetworkMNIST(nn.Module):
def __init__(self):
super(SimpleNetworkMNIST, self).__init__()
self.l1 = nn.Linear(784, 400)
self.l2 = nn.Linear(400, 100)
self.l3 = nn.Linear(100, 10)
def forward(self, x):
x = F.normalize(x)
x = F.relu(self.l1(x))
x = F.relu(self.l2(x))
x = self.l3(x)
return x
simple_model = SimpleNetworkMNIST()
# We also want to describe the loss function we will be using and the optimisation method. For our loss function we will use cross-entropy loss, which is standard for multi-label classification, and for our optimiser, stochastic gradient descent
simple_optimiser = torch.optim.SGD(
simple_model.parameters(), lr=0.1
) # lr denotes a learning rate
# Then we can train the model, while keeping an eye on the metrics:
epochs = 2
for epoch in range(epochs):
# Train over the epoch
simple_model.train()
running_loss = 0.0
for xs, ys in train_loader:
simple_optimiser.zero_grad()
prediction_ys = simple_model(xs)
loss = criterion(prediction_ys, ys)
loss.backward()
simple_optimiser.step() # Note this works as applying loss.backward works on the parameters of the model which is the argument for the optimiser
running_loss += loss.item()
train_loss = running_loss / len(train_loader)
# Get the validation loss
simple_model.eval()
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for xs, ys in valid_loader:
prediction_ys = simple_model(xs)
loss = criterion(prediction_ys, ys)
running_loss += loss.item()
total += ys.size(0)
correct += (prediction_ys.max(1)[1]).eq(ys).sum().item()
valid_loss = running_loss / len(valid_loader)
accuracy = 100.0 * correct / total
print(
f"Epoch: {epoch+1}/{epochs}:\nTraining loss: {train_loss}, Validation loss: {valid_loss}, Accuracy: {accuracy}"
)
# Obviously this model could be trained for longer and with more sophisticated methods, which we will implement later, but let's generate a submission with this initial model (although take note that this model has a much higher accuracy than that of the baseline model showing us that the use of machine learning has been useful).
simple_model.eval()
predictions_simple = simple_model(testx).max(1)[1]
data_simple = {
"ImageID": np.arange(1, len(predictions_simple) + 1),
"Label": predictions_simple,
}
submission_simple = pd.DataFrame(data_simple, columns=["ImageID", "Label"])
submission_simple.to_csv("simple.csv", index=False)
submission_simple
# # Model Interpretation
# Although we have metrics as we train our AI, we might want to look at some of the details of the AI. We can use visualisation methods such as:
# * Confusion matrices
# * Top losses
# Below we can generate a confusion matrix by tallying the times when data with some label was predicted as some label
confusion_matrix = np.zeros((len(labels), len(labels)), dtype=np.uint16)
MNISTdata = torch.tensor(df.values, dtype=torch.float)
simple_model_out = simple_model(MNISTdata[:, 1:])
targets = MNISTdata[:, 0]
# Could potentially also exclusively consider validation data to account for potential overfitting
for prediction, actual in zip(simple_model_out.max(1)[1], targets):
confusion_matrix[prediction, actual.int()] += 1
confusion_matrix
# We can display this information a bit more clearly using matplotlib:
fig, ax = plt.subplots(figsize=(10, 10))
max_val = len(labels)
for i in range(max_val):
for j in range(max_val):
ax.text(i, j, str(confusion_matrix[i, j]), va="center", ha="center")
plt.title("Confusion Matrix")
plt.xlabel("Predicted digit")
plt.ylabel("Actual digit")
plt.xticks(np.arange(0, max_val), labels)
plt.yticks(np.arange(0, max_val), labels)
ax.tick_params(grid_alpha=0)
ax.set_xlim(-0.5, max_val - 0.5)
ax.set_ylim(-0.5, max_val - 0.5)
ax.imshow(confusion_matrix, cmap=plt.cm.Blues, origin="lower", aspect="equal")
ax.grid()
# Looking at this data we see some really interesting and expected flaws in our model, for example it frequently mixes up (2,7) and (4,9) as they are similar looking. We can also see its accuracy in very different looking digits, such as between (3,4) and (2,5). We can also take some of the predictions that resulted in the highest losses to inspect what aspects of these particular examples make them so hard to classify.
top_no = 5
sorted_losses, indices = nn.CrossEntropyLoss(reduction="none")(
simple_model_out, targets.long()
).sort(descending=True)
for i in range(top_no):
loss, actual, predicted = (
sorted_losses[i],
targets[indices[i]].int(),
simple_model_out[indices[i]].max(0)[1],
)
data = MNISTdata[indices[i], 1:]
plt.figure()
plt.title(f"Loss: {loss}, Predicted: {predicted}, Actual: {actual}")
plt.imshow(np.array(data.reshape(28, 28)), cmap=plt.cm.gray)
# As we can see in these top losses are examples of particularly poorly written digits, and in each case (except maybe that 3), you can see what the model predicted in the image. Depending on how this model is trained you may also find erroneous data (there is a 7 mislabelled as a 4 in this dataset), and this highlights a use of this visualisation; removing bad data to aid in the AI's learning process.
# # Improving our initial model
# So we're now going to try improving the model by using a convolutional neural network. The main benefit of this is that the AI can now use the spatial aspects of the image, which will aid in it finding patterns in images (and with some tricks we can see what these patterns are later on).
# The nn.Conv2D class has parameters:
# * **in_channels** - number of input planes
# * **out_channels** - number of output planes
# * **kernel_size** - size of convolution (also specifies number of weights and biases)
# * **stride** - int or tuple describing how far the convolution is shifted over for each evaluation of the convolution
# * **padding** - empty pixels around the image (aids in evaluating pixels around the perimeter)
# * **dilation** - describes distance between evaluation points in the kernel (e.g. dilation = 2 on a 3x3 kernel means evaluation of the 9 friends spread out in a 5x5 area)
# * **groups** - splits connections between input and output layers (both must be divisible by groups value, e.g. groups = 2 is similar to having 2 convolution layers in tandem)
# * **padding_mode** - how the padding pixels are filled, can be one of a few options, default 'zeros'
# * **device**
# * **dtype**
class ConvolutionMNIST(nn.Module):
def __init__(self):
super(ConvolutionMNIST, self).__init__()
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, 4, kernel_size=3, padding=1),
# BatchNorm normalises the data over the features (planes) seperately over the batch and pixels
nn.BatchNorm2d(4),
nn.ReLU(),
# Takes maximum value in a 2 by 2 area of convolution output
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(4, 4, kernel_size=3, padding=1),
nn.BatchNorm2d(4),
nn.ReLU(),
)
# The size of the output planes above is 14 by 14 as size is preserved by conv layer with padding but halved by max pooling
self.l1 = nn.Linear(4 * 14 * 14, 10)
def forward(self, x):
x = x.reshape((-1, 1, 28, 28))
x = self.cnn_layers(x)
x = torch.flatten(x, 1)
x = self.l1(x)
return x
conv_model = ConvolutionMNIST()
os.path.exists("/kaggle//////working")
# Just so we don't have to repeat code, let's define a function to train the models, we'll also add the ability to save models after epochs where the validation losses improved.
def train_model(
model,
epochs,
train_loader,
valid_loader,
optimiser,
criterion,
print_output=True,
save_models=False,
model_path=None,
save_as="model",
):
if save_models:
if not model_path:
raise ValueError("If save_models is True then model_path must be set")
if not os.path.exists(model_path):
os.mkdir(model_path)
train_losses = []
valid_losses = []
accuracies = []
for epoch in range(epochs):
model.train()
running_loss = 0.0
# Some variables have been renamed for clarity
for data, targets in train_loader:
optimiser.zero_grad()
scores = model(data)
loss = criterion(scores, targets)
loss.backward()
optimiser.step() # Note this works as applying loss.backward works on the parameters of the model which is the argument for the optimiser
running_loss += loss.item()
train_loss = running_loss / len(train_loader)
model.eval()
running_loss = 0.0
correct = 0
total = 0
with torch.no_grad():
for data, targets in valid_loader:
scores = model(data)
loss = criterion(scores, targets)
running_loss += loss.item()
total += targets.size(0)
correct += (scores.max(1)[1]).eq(targets).sum().item()
valid_loss = running_loss / len(valid_loader)
accuracy = 100.0 * correct / total
train_losses.append(train_loss)
valid_losses.append(valid_loss)
accuracies.append(accuracy)
if print_output:
print(
f"Epoch: {epoch+1}/{epochs}:\nTraining loss: {train_loss}, Validation loss: {valid_loss}, Accuracy: {accuracy}"
)
if save_models:
torch.save(model.state_dict(), model_path + f"/{save_as}_e{epoch+1}")
# For display
return train_losses, valid_losses, accuracies
conv_optimiser = torch.optim.SGD(conv_model.parameters(), lr=0.01)
train_model(conv_model, 2, train_loader, valid_loader, conv_optimiser, criterion)
# Using our trained model we can generate a submission:
conv_model.eval()
predictions_conv = conv_model(testx).max(1)[1]
data_conv = {
"ImageID": np.arange(1, len(predictions_conv) + 1),
"Label": predictions_conv,
}
submission_conv = pd.DataFrame(data_conv, columns=["ImageID", "Label"])
submission_conv.to_csv("conv.csv", index=False)
submission_conv
# We can see though that this results in a similar result to what we were getting previously. We can improve the model further by adding more layers to the model:
class DeepConvolutionMNIST(nn.Module):
def __init__(self):
super(DeepConvolutionMNIST, self).__init__()
self.cnn_layers = nn.Sequential(
nn.Conv2d(1, 16, kernel_size=3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(16, 16, kernel_size=3, padding=1),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(16, 8, kernel_size=3, padding=1),
nn.BatchNorm2d(8),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.Conv2d(8, 4, kernel_size=3, padding=1),
nn.BatchNorm2d(4),
nn.ReLU(),
)
self.l1 = nn.Linear(4 * 7 * 7, 10)
def forward(self, x):
x = x.reshape((-1, 1, 28, 28))
x = self.cnn_layers(x)
x = torch.flatten(x, 1)
x = self.l1(x)
return x
deepconv_model = DeepConvolutionMNIST()
deepconv_optimiser = torch.optim.SGD(deepconv_model.parameters(), lr=0.01)
train_model(
deepconv_model, 2, train_loader, valid_loader, deepconv_optimiser, criterion
)
deepconv_model.eval()
predictions_deepconv = deepconv_model(testx).max(1)[1]
data_deepconv = {
"ImageID": np.arange(1, len(predictions_deepconv) + 1),
"Label": predictions_deepconv,
}
submission_deepconv = pd.DataFrame(data_deepconv, columns=["ImageID", "Label"])
submission_deepconv.to_csv("deepconv.csv", index=False)
submission_deepconv
| false | 0 | 4,751 | 0 | 4,751 | 4,751 |
||
69104899
|
# # Titanic Classification Project
# ## Latest Submission Score: Top 24% (14218/57176)
# ### The full project code and resources can be found on [GitHub](https://github.com/hassangaber/KaggleTitanic).
# ## **Contents**
# 1. [ ] Understanding The Problem & Provided Datasets
# 2. [ ] Exploratory & Statistical Data Analysis: Histograms, Logistic Regression
# 3. [ ] Data Wrangling:
# 4. [ ] Learning Model: Naive
# 5. [ ] Results & Tuning
# ### The Problem and Provided Data/Features
# Using datasets of passenger information on the Titanic (name, age, price of ticket, etc), predict who will die and who will survive the Titanic tragedy. This is a classification machine learning problem: outputs True if the passenger survives, False if the passenger dies.
# The provided data includes three datasets: `test.csv`, `train.csv`, and `gender_submission.csv`. These can all be found in `~/KaggleTitanic/data`.
# #### `gender_submission.csv`
# Contains two features, PassengerID and the Survived boolean. This is an example dataset showing how to structure the prediction of the model. This particular example output dataset shows all females survived while all males died.
# * `PassengerID` from `test.csv` indicating the Identity of a passenger
# * `Survived` is a boolean indicating `1` for survived and `0` for died
# #### `train.csv`
# This is the dataset to used to train the model. It includes 11 different features (The dictionary is provided below). There are 891 out of 1309 passengers in this dataset.
# #### `test.csv`
# This is the dataset to test our predictions based on our model trained using `train.csv`. It will show us the accuracy of our model. It includes the exact same features as `train.csv`.
# The development of this project will be staged in a Jupyter notebook and move to python files in the final stages.
#
# Importing all Libraries
import os
import numpy as np
import pandas as pd
import math
import statistics as stat
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Loading all datasets and defining directories
for dirname, _, filenames in os.walk("../input/titanic"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv("../input/c/titanic/train.csv")
test_data = pd.read_csv("../input/c/titanic/test.csv")
# Add missing column to properly join both datasets
test_data["Survived"] = np.NaN
test_train_data = pd.concat([train_data, test_data])
# The dataset's intrinsic features
train_data.columns
# ## **Exploratory Data Analysis**
# Here, trends in the data will be recognized and certain relationships will be used to build a better predictor for the test data.
# Exploratory Data Analysis
# Exploring Gender and death rate
men_death = train_data.loc[train_data.Sex == "male"]["Survived"]
men_death_rate = 1 - sum(men_death) / len(men_death)
women_death = train_data.loc[train_data.Sex == "female"]["Survived"]
women_death_rate = 1 - sum(women_death) / len(women_death)
X = ["Man", "Woman"]
Y = [men_death_rate, women_death_rate]
fig = plt.figure()
ax = fig.add_axes([0, 0, 1.1, 1])
ax.bar(X, Y)
plt.ylabel("Death rate")
plt.xlabel("Gender")
print("Men death rate (%):", men_death_rate)
print("Women death rate (%):", women_death_rate)
plt.show()
# Socioeconomic status and death rate
# Their socioeconomic status should imply their priority on the ship (higher class people are usually taken care of)
# Looking at percentage death by class [1,2,3] (1st class being the highest class)
Xarr = []
Yarr = []
for x in range(1, 4):
classdeath = train_data.loc[train_data.Pclass == x]["Survived"]
classdeath_rate = 1 - sum(classdeath) / len(classdeath)
Xarr.append(x)
Yarr.append(classdeath_rate)
print(f"class {x} (%): {classdeath_rate}")
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
Xarr = [int(x) for x in Xarr]
ax.bar(Xarr, Yarr)
plt.ylabel("Death rate")
plt.xlabel("Class (Decsending)")
plt.show()
# Good practice to split data labels into qualititative and quantitative varibles before processing distributions
df_quant = train_data[["Age", "SibSp", "Parch", "Fare"]]
df_qual = train_data[["Survived", "Pclass", "Sex", "Ticket", "Cabin", "Embarked"]]
# distributions for quantitative labels
for x in df_quant.columns:
plt.hist(df_quant[x])
plt.title(x)
plt.show()
# Show the average value of the label with respect to the survival rate
pd.pivot_table(train_data, index="Survived", values=["Age", "SibSp", "Parch", "Fare"])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104899.ipynb
| null | null |
[{"Id": 69104899, "ScriptId": 18860483, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3759296, "CreationDate": "07/26/2021 23:57:21", "VersionNumber": 1.0, "Title": "Titanic Notebook", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Titanic Classification Project
# ## Latest Submission Score: Top 24% (14218/57176)
# ### The full project code and resources can be found on [GitHub](https://github.com/hassangaber/KaggleTitanic).
# ## **Contents**
# 1. [ ] Understanding The Problem & Provided Datasets
# 2. [ ] Exploratory & Statistical Data Analysis: Histograms, Logistic Regression
# 3. [ ] Data Wrangling:
# 4. [ ] Learning Model: Naive
# 5. [ ] Results & Tuning
# ### The Problem and Provided Data/Features
# Using datasets of passenger information on the Titanic (name, age, price of ticket, etc), predict who will die and who will survive the Titanic tragedy. This is a classification machine learning problem: outputs True if the passenger survives, False if the passenger dies.
# The provided data includes three datasets: `test.csv`, `train.csv`, and `gender_submission.csv`. These can all be found in `~/KaggleTitanic/data`.
# #### `gender_submission.csv`
# Contains two features, PassengerID and the Survived boolean. This is an example dataset showing how to structure the prediction of the model. This particular example output dataset shows all females survived while all males died.
# * `PassengerID` from `test.csv` indicating the Identity of a passenger
# * `Survived` is a boolean indicating `1` for survived and `0` for died
# #### `train.csv`
# This is the dataset to used to train the model. It includes 11 different features (The dictionary is provided below). There are 891 out of 1309 passengers in this dataset.
# #### `test.csv`
# This is the dataset to test our predictions based on our model trained using `train.csv`. It will show us the accuracy of our model. It includes the exact same features as `train.csv`.
# The development of this project will be staged in a Jupyter notebook and move to python files in the final stages.
#
# Importing all Libraries
import os
import numpy as np
import pandas as pd
import math
import statistics as stat
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Loading all datasets and defining directories
for dirname, _, filenames in os.walk("../input/titanic"):
for filename in filenames:
print(os.path.join(dirname, filename))
train_data = pd.read_csv("../input/c/titanic/train.csv")
test_data = pd.read_csv("../input/c/titanic/test.csv")
# Add missing column to properly join both datasets
test_data["Survived"] = np.NaN
test_train_data = pd.concat([train_data, test_data])
# The dataset's intrinsic features
train_data.columns
# ## **Exploratory Data Analysis**
# Here, trends in the data will be recognized and certain relationships will be used to build a better predictor for the test data.
# Exploratory Data Analysis
# Exploring Gender and death rate
men_death = train_data.loc[train_data.Sex == "male"]["Survived"]
men_death_rate = 1 - sum(men_death) / len(men_death)
women_death = train_data.loc[train_data.Sex == "female"]["Survived"]
women_death_rate = 1 - sum(women_death) / len(women_death)
X = ["Man", "Woman"]
Y = [men_death_rate, women_death_rate]
fig = plt.figure()
ax = fig.add_axes([0, 0, 1.1, 1])
ax.bar(X, Y)
plt.ylabel("Death rate")
plt.xlabel("Gender")
print("Men death rate (%):", men_death_rate)
print("Women death rate (%):", women_death_rate)
plt.show()
# Socioeconomic status and death rate
# Their socioeconomic status should imply their priority on the ship (higher class people are usually taken care of)
# Looking at percentage death by class [1,2,3] (1st class being the highest class)
Xarr = []
Yarr = []
for x in range(1, 4):
classdeath = train_data.loc[train_data.Pclass == x]["Survived"]
classdeath_rate = 1 - sum(classdeath) / len(classdeath)
Xarr.append(x)
Yarr.append(classdeath_rate)
print(f"class {x} (%): {classdeath_rate}")
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1])
Xarr = [int(x) for x in Xarr]
ax.bar(Xarr, Yarr)
plt.ylabel("Death rate")
plt.xlabel("Class (Decsending)")
plt.show()
# Good practice to split data labels into qualititative and quantitative varibles before processing distributions
df_quant = train_data[["Age", "SibSp", "Parch", "Fare"]]
df_qual = train_data[["Survived", "Pclass", "Sex", "Ticket", "Cabin", "Embarked"]]
# distributions for quantitative labels
for x in df_quant.columns:
plt.hist(df_quant[x])
plt.title(x)
plt.show()
# Show the average value of the label with respect to the survival rate
pd.pivot_table(train_data, index="Survived", values=["Age", "SibSp", "Parch", "Fare"])
| false | 0 | 1,387 | 0 | 1,387 | 1,387 |
||
69104815
|
<jupyter_start><jupyter_text>U.S. Hospital Overall Star Ratings 2016-2020
### Context
Every year, all U.S. hospitals that accept payments from Medicare and Medicaid must submit quality data to The Centers for Medicare and Medicaid Services (CMS). CMS' [Hospital Compare](https://www.medicare.gov/care-compare/?providerType=Hospital&redirect=true) program is a consumer-oriented website that provides information on "the quality of care hospitals are providing to their patients." CMS releases this quality data publicly in order to encourage hospitals to improve their quality and to help consumer make better decisions about which providers they visit.
"Hospital Compare provides data on over 4,000 Medicare-certified hospitals, including acute care hospitals, critical access hospitals (CAHs), children’s hospitals, Veterans Health Administration (VHA) Medical Centers, and hospital outpatient departments"
The Centers for Medicare & Medicaid Services (CMS) uses a five-star quality rating system to measure the experiences Medicare beneficiaries have with their health plan and health care system — the Star Rating Program. Health plans are rated on a scale of 1 to 5 stars, with 5 being the highest.
### Content
| Dataset Rows | Dataset Columns |
| --- | --- |
| 25082 | 29 |
* Includes the most recent Hospital General Information.csv data for each archive year found on CMS' [archive site](https://data.cms.gov/provider-data/archived-data/hospitals). ***Years: 2016-2020***
| Column Name | Data Type | Description |
| --- | --- | -- |
| Facility ID | Char(6) | Facility Medicare ID |
| Facility Name | Char(72) | Name of the facility |
| Address | Char(51) | Facility street address |
| City | Char(20) | Facility City |
| State | Char(2) | Facility State |
| ZIP Code | Num(8) | Facility ZIP Code |
| County Name | Char(25) | Facility County |
| Phone Number | Char(14) | Facility Phone Number |
| Hospital Type | Char(34) | What type of facility is it? |
| Hospital Ownership | Char(43) | What type of ownership does the facility have? |
| Emergency Services | Char(3)) | Does the facility have emergency services Yes/No? |
| Meets criteria for promoting interoperability of EHRs | Char(1) | Does facility meet government EHR standard Yes/No? |
| Hospital overall rating | Char(13) | Hospital Overall Star Rating 1=Worst; 5=Best. Aggregate measure of all other measures |
| Hospital overall rating footnote | Num(8) | |
| Mortality national comparison | Char(28) | Facility overall performance on mortality measures compared to other facilities |
| Mortality national comparison footnote | Num(8) | |
| Safety of care national comparison | Char(28) | Facility overall performance on safety measures compared to other facilities |
| Safety of care national comparison footnote | Num(8) | |
| Readmission national comparison | Char(28) | Facility overall performance on readmission measures compared to other facilities |
| Readmission national comparison footnote | Num(8) | |
| Patient experience national comparison | Char(28) | Facility overall performance on pat. exp. measures compared to other facilities |
| Patient experience national comparison footnote | Char(8) | |
| Effectiveness of care national comparison | Char(28) | Facility overall performance on effect. of care measures compared to other facilities |
| Effectiveness of care national comparison footnote | Char(8) | |
| Timeliness of care national comparison | Char(28) | Facility overall performance on timeliness of care measures compared to other facilities |
| Timeliness of care national comparison footnote| Char(8) | |
| Efficient use of medical imaging national comparison | Char(28) | Facility overall performance on efficient use measures compared to other facilities |
| Efficient use of medical imaging national comparison footnote | Char(8) | |
| Year | Char(4) | cms data release year |
Kaggle dataset identifier: us-hospital-overall-star-ratings-20162020
<jupyter_script># ## The purpose of this notebook is to demonstrate how to load the *U.S. Hospital Overall Star Ratings 2016-2020* dataset followed by a simple exploration of the data to get a sense of the data's characteristics.
# ## Dataset Context
# Every year, all U.S. hospitals that accept payments from Medicare and Medicaid must submit quality data to The Centers for Medicare and Medicaid Services (CMS). CMS' Hospital Compare program is a consumer-oriented website that provides information on "the quality of care hospitals are providing to their patients." CMS releases this quality data publicly in order to encourage hospitals to improve their quality and to help consumer make better decisions about which providers they visit.
# "Hospital Compare provides data on over 4,000 Medicare-certified hospitals, including acute care hospitals, critical access hospitals (CAHs), children’s hospitals, Veterans Health Administration (VHA) Medical Centers, and hospital outpatient departments"
# The Centers for Medicare & Medicaid Services (CMS) uses a five-star quality rating system to measure the experiences Medicare beneficiaries have with their health plan and health care system — the Star Rating Program. Health plans are rated on a scale of 1 to 5 stars, with 5 being the highest.
# ## Acknowledgements
# A similar dataset called *Hospital General Information* was previously uploaded to Kaggle. However, that dataset only includes data from one year (2017). I was inspired by this dataset to go a little further and try to add a time dimension. This dataset includes a union of Hospital General Information for the years 2016-2020.
# Thanks to CMS for releasing this dataset publicly to help consumers find better hospitals and make better-informed decisions.
# All Hospital Compare websites are publically accessible. As works of the U.S. government, Hospital Compare data are in the public domain and permission is not required to reuse them. An attribution to the agency as the source is appreciated. Your materials, however, should not give the false impression of government endorsement of your commercial products or services.
# ## Data File Format:
# The data was saved in .xlsx format. I will use Pandas read_excel to open the file and read in as a Pandas dataframe.
# ### First Step: Import Python Libraries
import sys
import subprocess
import pkg_resources
# check if a package is already installed. If not, install it.
required = {"xlrd", "openpyxl", "numpy", "pandas", "matplotlib", "seaborn"}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call(
[python, "-m", "pip", "install", *missing], stdout=subprocess.DEVNULL
)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mpl # popular visualization library for Python
import matplotlib.pyplot as plt
import seaborn as sns # popular visualization library for better-looking Python charts
import os # used to work with the operating system: Files, directories, etc.
# ### Iterate over the Kaggle input folder and read all Excel files. The data should be stored in the 'Data' worksheet.
# In this case, there is only one data file so it should only iterate
dataset_list = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
# read in the excel file. Data should be stored in worksheet named "Data"
df = pd.read_excel(
os.path.join(dirname, filename), sheet_name="Data", engine="openpyxl"
)
# append dataset to the above container list. In this case, there is only one.
dataset_list.append(df)
# union all the dataframes together
hosp_df = pd.concat(dataset_list)
hosp_df.shape
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104815.ipynb
|
us-hospital-overall-star-ratings-20162020
|
abrambeyer
|
[{"Id": 69104815, "ScriptId": 18860641, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 432563, "CreationDate": "07/26/2021 23:54:17", "VersionNumber": 1.0, "Title": "New Dataset: How to Load and Simple EDA", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 73.0, "LinesInsertedFromPrevious": 73.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 91891794, "KernelVersionId": 69104815, "SourceDatasetVersionId": 2274207}]
|
[{"Id": 2274207, "DatasetId": 1369536, "DatasourceVersionId": 2315297, "CreatorUserId": 432563, "LicenseName": "U.S. Government Works", "CreationDate": "05/26/2021 21:05:28", "VersionNumber": 1.0, "Title": "U.S. Hospital Overall Star Ratings 2016-2020", "Slug": "us-hospital-overall-star-ratings-20162020", "Subtitle": "How does your hospital compare to the other 4,000 Medicare-certified hospitals?", "Description": "### Context\n\nEvery year, all U.S. hospitals that accept payments from Medicare and Medicaid must submit quality data to The Centers for Medicare and Medicaid Services (CMS). CMS' [Hospital Compare](https://www.medicare.gov/care-compare/?providerType=Hospital&redirect=true) program is a consumer-oriented website that provides information on \"the quality of care hospitals are providing to their patients.\" CMS releases this quality data publicly in order to encourage hospitals to improve their quality and to help consumer make better decisions about which providers they visit.\n\n\"Hospital Compare provides data on over 4,000 Medicare-certified hospitals, including acute care hospitals, critical access hospitals (CAHs), children\u2019s hospitals, Veterans Health Administration (VHA) Medical Centers, and hospital outpatient departments\"\n\nThe Centers for Medicare & Medicaid Services (CMS) uses a five-star quality rating system to measure the experiences Medicare beneficiaries have with their health plan and health care system \u2014 the Star Rating Program. Health plans are rated on a scale of 1 to 5 stars, with 5 being the highest.\n\n\n### Content\n\n| Dataset Rows | Dataset Columns |\n| --- | --- |\n| 25082 | 29 |\n\n* Includes the most recent Hospital General Information.csv data for each archive year found on CMS' [archive site](https://data.cms.gov/provider-data/archived-data/hospitals). ***Years: 2016-2020***\n\n| Column Name | Data Type | Description |\n| --- | --- | -- |\n| Facility ID | Char(6) | Facility Medicare ID |\n| Facility Name | Char(72) | Name of the facility |\n| Address | Char(51) | Facility street address |\n| City | Char(20) | Facility City |\n| State | Char(2) | Facility State |\n| ZIP Code | Num(8) | Facility ZIP Code |\n| County Name | Char(25) | Facility County |\n| Phone Number | Char(14) | Facility Phone Number |\n| Hospital Type | Char(34) | What type of facility is it? |\n| Hospital Ownership | Char(43) | What type of ownership does the facility have? |\n| Emergency Services | Char(3)) | Does the facility have emergency services Yes/No? |\n| Meets criteria for promoting interoperability of EHRs | Char(1) | Does facility meet government EHR standard Yes/No? |\n| Hospital overall rating | Char(13) | Hospital Overall Star Rating 1=Worst; 5=Best. Aggregate measure of all other measures |\n| Hospital overall rating footnote | Num(8) | |\n| Mortality national comparison | Char(28) | Facility overall performance on mortality measures compared to other facilities |\n| Mortality national comparison footnote | Num(8) | |\n| Safety of care national comparison | Char(28) | Facility overall performance on safety measures compared to other facilities |\n| Safety of care national comparison footnote | Num(8) | |\n| Readmission national comparison | Char(28) | Facility overall performance on readmission measures compared to other facilities |\n| Readmission national comparison footnote | Num(8) | |\n| Patient experience national comparison | Char(28) | Facility overall performance on pat. exp. measures compared to other facilities |\n| Patient experience national comparison footnote | Char(8) | |\n| Effectiveness of care national comparison | Char(28) | Facility overall performance on effect. of care measures compared to other facilities |\n| Effectiveness of care national comparison footnote | Char(8) | |\n| Timeliness of care national comparison | Char(28) | Facility overall performance on timeliness of care measures compared to other facilities |\n| Timeliness of care national comparison footnote| Char(8) | |\n| Efficient use of medical imaging national comparison | Char(28) | Facility overall performance on efficient use measures compared to other facilities |\n| Efficient use of medical imaging national comparison footnote | Char(8) | |\n| Year | Char(4) | cms data release year |\n\n\n### Acknowledgements\n\nA similar dataset called [Hospital General Information](https://www.kaggle.com/cms/hospital-general-information) was previously uploaded to Kaggle. However, that dataset only includes data from one year (2017). I was inspired by this dataset to go a little further and try to add a time dimension. This dataset includes a union of Hospital General Information for the years 2016-2020. The python script used to collect and union all the datasets can be found on my [github[(https://github.com/abrambeyer/cms_hospital_general_info_file_downloader). Thanks to this dataset owner for the inspiration.\n\nThanks to CMS for releasing this dataset publicly to help consumers find better hospitals and make better-informed decisions.\n\n***All Hospital Compare websites are publically accessible. As works of the U.S. government, Hospital Compare data are in the public domain and permission is not required to reuse them. An attribution to the agency as the source is appreciated. Your materials, however, should not give the false impression of government endorsement of your commercial products or services.***\n\n### Inspiration\n\nSince I work in healthcare, I was inspired to look at my own hospital's performance. Since CMS provides overall performance data on over 4,000 facilities, I hope this data can help people answer the following questions:\n\n1. Which hospitals have the best overall performance? Best performance by measure such as Patient Experience?\n2. Which states, cities, counties have the most high-performing hospitals? The lowest performing hospitals?\n3. How have individual hospitals overall performance changed between 2016 and 2020? Are certain locations getting better or worse overall?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1369536, "CreatorUserId": 432563, "OwnerUserId": 432563.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2274207.0, "CurrentDatasourceVersionId": 2315297.0, "ForumId": 1388668, "Type": 2, "CreationDate": "05/26/2021 21:05:28", "LastActivityDate": "05/26/2021", "TotalViews": 4958, "TotalDownloads": 454, "TotalVotes": 12, "TotalKernels": 1}]
|
[{"Id": 432563, "UserName": "abrambeyer", "DisplayName": "ABeyer", "RegisterDate": "10/01/2015", "PerformanceTier": 1}]
|
# ## The purpose of this notebook is to demonstrate how to load the *U.S. Hospital Overall Star Ratings 2016-2020* dataset followed by a simple exploration of the data to get a sense of the data's characteristics.
# ## Dataset Context
# Every year, all U.S. hospitals that accept payments from Medicare and Medicaid must submit quality data to The Centers for Medicare and Medicaid Services (CMS). CMS' Hospital Compare program is a consumer-oriented website that provides information on "the quality of care hospitals are providing to their patients." CMS releases this quality data publicly in order to encourage hospitals to improve their quality and to help consumer make better decisions about which providers they visit.
# "Hospital Compare provides data on over 4,000 Medicare-certified hospitals, including acute care hospitals, critical access hospitals (CAHs), children’s hospitals, Veterans Health Administration (VHA) Medical Centers, and hospital outpatient departments"
# The Centers for Medicare & Medicaid Services (CMS) uses a five-star quality rating system to measure the experiences Medicare beneficiaries have with their health plan and health care system — the Star Rating Program. Health plans are rated on a scale of 1 to 5 stars, with 5 being the highest.
# ## Acknowledgements
# A similar dataset called *Hospital General Information* was previously uploaded to Kaggle. However, that dataset only includes data from one year (2017). I was inspired by this dataset to go a little further and try to add a time dimension. This dataset includes a union of Hospital General Information for the years 2016-2020.
# Thanks to CMS for releasing this dataset publicly to help consumers find better hospitals and make better-informed decisions.
# All Hospital Compare websites are publically accessible. As works of the U.S. government, Hospital Compare data are in the public domain and permission is not required to reuse them. An attribution to the agency as the source is appreciated. Your materials, however, should not give the false impression of government endorsement of your commercial products or services.
# ## Data File Format:
# The data was saved in .xlsx format. I will use Pandas read_excel to open the file and read in as a Pandas dataframe.
# ### First Step: Import Python Libraries
import sys
import subprocess
import pkg_resources
# check if a package is already installed. If not, install it.
required = {"xlrd", "openpyxl", "numpy", "pandas", "matplotlib", "seaborn"}
installed = {pkg.key for pkg in pkg_resources.working_set}
missing = required - installed
if missing:
python = sys.executable
subprocess.check_call(
[python, "-m", "pip", "install", *missing], stdout=subprocess.DEVNULL
)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib as mpl # popular visualization library for Python
import matplotlib.pyplot as plt
import seaborn as sns # popular visualization library for better-looking Python charts
import os # used to work with the operating system: Files, directories, etc.
# ### Iterate over the Kaggle input folder and read all Excel files. The data should be stored in the 'Data' worksheet.
# In this case, there is only one data file so it should only iterate
dataset_list = []
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
# read in the excel file. Data should be stored in worksheet named "Data"
df = pd.read_excel(
os.path.join(dirname, filename), sheet_name="Data", engine="openpyxl"
)
# append dataset to the above container list. In this case, there is only one.
dataset_list.append(df)
# union all the dataframes together
hosp_df = pd.concat(dataset_list)
hosp_df.shape
| false | 0 | 945 | 0 | 2,071 | 945 |
||
69104281
|
<jupyter_start><jupyter_text>RAPIDS Kaggle Utils
Kaggle dataset identifier: rapids-kaggle-utils
<jupyter_script>import cupy as cp
import cudf
import cuml
import glob
from tqdm import tqdm
path = "/kaggle/input/optiver-realized-volatility-prediction"
trade_train_path = glob.glob(f"{path}/trade_train.parquet/*/*.parquet")
book_train_path = glob.glob(f"{path}/book_train.parquet/*/*.parquet")
def get_minmax_shape(book_train_path):
for k, v in enumerate(book_train_path):
shape = (
read_stock_id(v)[["time_id", "seconds_in_bucket"]]
.pivot(["time_id"], "seconds_in_bucket")
.shape
)
if k == 0:
max_len = shape[0]
max_width = shape[1]
min_len = shape[0]
min_width = shape[1]
if k > 0:
if shape[0] > max_len:
max_len = shape[0]
if shape[1] > max_width:
max_width = shape[1]
if shape[0] < min_len:
min_len = shape[0]
if shape[1] < min_width:
min_width = shape[1]
return {"max": (max_len, max_width), "min": (min_len, min_width)}
get_minmax_shape(book_train_path)
from numba import cuda, float32, float64
import math
def cu_std_transform(x, y_out):
res = cuda.shared.array(1, dtype=float64)
res[0] = 0
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
cuda.atomic.add(res, 0, x[i])
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
# y_out[i] = ((abs(res[0] - (res[0] / len(x)))**2)/len(x))**0.5
y_out[i] = (abs(res[0] - (res[0] / len(x))) ** 2 / len(x)) ** 0.5
def cu_mean_transform(x, y_out):
res = cuda.shared.array(1, dtype=float32)
res[0] = 0
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
cuda.atomic.add(res, 0, x[i])
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
y_out[i] = res[0] / len(x)
def get_cu_shift_transform(shift_by, null_val=-1):
def cu_shift_transform(x, y_out):
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
y_out[i] = null_val
if 0 <= i - shift_by < len(x):
y_out[i] = x[i - shift_by]
return cu_shift_transform
data = read_stock_id(book_train_path[0]).query(
"time_id==5"
) # .set_index(['time_id', 'seconds_in_bucket'])
data = time_id_map.join(data, how="inner", sort=True)
data = cudf.DataFrame(data.to_pandas().fillna(method="ffill")).reset_index()
# import cu_utils.transform as cutran
import numpy as np
read_stock_id = lambda f: cudf.read_parquet(f)
time_id_map = (
read_stock_id(book_train_path[0])[["time_id", "seconds_in_bucket"]]
.pivot(["time_id"], "seconds_in_bucket")["time_id"]
.stack(dropna=False)
.to_frame()
.drop(0, 1)
.reset_index()
.rename({0: "time_id", 1: "seconds_in_bucket"}, axis=1)
.set_index(["time_id", "seconds_in_bucket"])
)
means = lambda col: data.groupby("time_id", method="cudf").apply_grouped(
cu_mean_transform,
incols={col: "x" for c in data.columns},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
stds = lambda col: data.groupby("time_id", method="cudf").apply_grouped(
cu_std_transform,
incols={col: "x" for c in data.columns},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
standardize = lambda col: (data[col] - means(col)) / stds(col)
def diff_log_wap_shifted(n, b):
logwap = lambda bp, ap, bz, az: (((bp * az) + (ap * bz)) / (bz + az)).log()
cols = [
b[f"{col}{n}"] for col in ["bid_price", "ask_price", "bid_size", "ask_size"]
]
b[f"log_wap{n}"] = logwap(*cols)
log_wap_shifted = b.groupby(["time_id"], method="cudf").apply_grouped(
get_cu_shift_transform(shift_by=1, null_val=np.NaN),
incols={f"log_wap{n}": "x"},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
return (b[f"log_wap{n}"] - log_wap_shifted) ** 2
def get_logret(b):
b[f"logret1"] = diff_log_wap_shifted(1, b)
b[f"logret2"] = diff_log_wap_shifted(2, b)
return b
def getvol(f):
b = cudf.read_parquet(f)
b = read_stock_id(f).set_index(["time_id", "seconds_in_bucket"])
b = time_id_map.join(b, how="left", sort=True)
b = cudf.DataFrame(b.to_pandas().fillna(method="ffill")).reset_index()
# for col in b.columns:
# if col not in ['time_id', 'seconds_in_bucket']:
# b[col] = standardize(col)
b["stock_id"] = f.split("/")[-2].split("=")[-1]
b = get_logret(b)
b = b.query(f"logret1==logret1")
b = b.groupby(["stock_id", "time_id"]).agg(
{"logret1": {"sum", "std"}, "logret2": {"sum", "std"}, "time_id": "count"}
)
b.columns = b.columns.map("_".join)
sum_cols = [i for i in b.columns if "sum" in i]
b[[f"vol{k+1}" for k, _ in enumerate(sum_cols)]] = b[sum_cols] ** 0.5
b["volrate"] = b.vol1 / b.vol2
return b.reset_index()
vols = cudf.concat([getvol(f) for f in book_train_path]).set_index(
["stock_id", "time_id"]
)
vols
from cuml.metrics.regression import r2_score
rmspe_lambda = lambda y_true, y_pred: np.mean(((y_true - y_pred) / y_true) ** 2) ** 0.5
train_path = glob.glob(f"{path}/train.csv")[0]
train = cudf.read_csv(train_path).set_index(["stock_id", "time_id"])
features = ["vol1", "vol2", "logret1_std", "logret2_std", "volrate", "time_id_count"]
m = train.join(vols.fillna(0), how="inner")[["target"] + features].reset_index()
rr = round(r2_score(m.target, m.vol1), 3)
rmspe = round(rmspe_lambda(m.target, m.vol1), 3)
print(rr, rmspe)
# cutran.cu_min_transform()
# for i in book_train_path:
# t = read_stock_id(i)
# t_mean_trans = t.groupby('time_id', method='cudf').apply_grouped(
# cutran.cu_mean_transform,
# incols={f'seconds_in_bucket': 'x'},
# outcols={'y_out': cp.float32},
# tpb=32
# )['y_out']
# if len(t[t_min_trans!=0]) > 0:
# print(i)
# x = t[t_min_trans!=0]
# break
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import GridSearchCV
xgbreg = XGBRegressor(tree_method="gpu_hist", gpu_id=0)
parameters = {
"predictor": ["gpu_predictor"],
"objective": ["reg:squarederror", "reg:logistic", "reg:squaredlogerror"],
"eval_metric": ["rmse"],
"booster": ["gblinear"],
"subsample": [0.75, 0.5, 0.25],
"n_estimators": [750, 1000],
}
xgb_grid = GridSearchCV(xgbreg, parameters, cv=3, verbose=False, n_jobs=-1)
# x_train_full = cp.array(m[['stock_id', 'time_id']+features].to_gpu_matrix()).get()
# y_train_full = cp.array(m[['target']].to_gpu_matrix()).get()
# xgb_grid.fit(x_train_full, y_train_full)
# print(xgb_grid.best_score_)
# print(xgb_grid.best_params_)
# %cd /kaggle/working
# df.to_csv("submission.csv", index=False, columns=["row_id", "target"])
# cudf.read_csv("submission.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/104/69104281.ipynb
|
rapids-kaggle-utils
|
aerdem4
|
[{"Id": 69104281, "ScriptId": 18457145, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3948414, "CreationDate": "07/26/2021 23:38:21", "VersionNumber": 2.0, "Title": "Egg Croissant", "EvaluationDate": "07/26/2021", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 132.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 79.0, "LinesInsertedFromFork": 197.0, "LinesDeletedFromFork": 135.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 14.0, "TotalVotes": 0}]
|
[{"Id": 91890291, "KernelVersionId": 69104281, "SourceDatasetVersionId": 2399210}]
|
[{"Id": 2399210, "DatasetId": 1450749, "DatasourceVersionId": 2441232, "CreatorUserId": 471945, "LicenseName": "Unknown", "CreationDate": "07/06/2021 07:13:36", "VersionNumber": 1.0, "Title": "RAPIDS Kaggle Utils", "Slug": "rapids-kaggle-utils", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1450749, "CreatorUserId": 471945, "OwnerUserId": 471945.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2399210.0, "CurrentDatasourceVersionId": 2441232.0, "ForumId": 1470296, "Type": 2, "CreationDate": "07/06/2021 07:13:36", "LastActivityDate": "07/06/2021", "TotalViews": 1547, "TotalDownloads": 47, "TotalVotes": 6, "TotalKernels": 11}]
|
[{"Id": 471945, "UserName": "aerdem4", "DisplayName": "Ahmet Erdem", "RegisterDate": "11/22/2015", "PerformanceTier": 4}]
|
import cupy as cp
import cudf
import cuml
import glob
from tqdm import tqdm
path = "/kaggle/input/optiver-realized-volatility-prediction"
trade_train_path = glob.glob(f"{path}/trade_train.parquet/*/*.parquet")
book_train_path = glob.glob(f"{path}/book_train.parquet/*/*.parquet")
def get_minmax_shape(book_train_path):
for k, v in enumerate(book_train_path):
shape = (
read_stock_id(v)[["time_id", "seconds_in_bucket"]]
.pivot(["time_id"], "seconds_in_bucket")
.shape
)
if k == 0:
max_len = shape[0]
max_width = shape[1]
min_len = shape[0]
min_width = shape[1]
if k > 0:
if shape[0] > max_len:
max_len = shape[0]
if shape[1] > max_width:
max_width = shape[1]
if shape[0] < min_len:
min_len = shape[0]
if shape[1] < min_width:
min_width = shape[1]
return {"max": (max_len, max_width), "min": (min_len, min_width)}
get_minmax_shape(book_train_path)
from numba import cuda, float32, float64
import math
def cu_std_transform(x, y_out):
res = cuda.shared.array(1, dtype=float64)
res[0] = 0
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
cuda.atomic.add(res, 0, x[i])
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
# y_out[i] = ((abs(res[0] - (res[0] / len(x)))**2)/len(x))**0.5
y_out[i] = (abs(res[0] - (res[0] / len(x))) ** 2 / len(x)) ** 0.5
def cu_mean_transform(x, y_out):
res = cuda.shared.array(1, dtype=float32)
res[0] = 0
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
cuda.atomic.add(res, 0, x[i])
cuda.syncthreads()
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
y_out[i] = res[0] / len(x)
def get_cu_shift_transform(shift_by, null_val=-1):
def cu_shift_transform(x, y_out):
for i in range(cuda.threadIdx.x, len(x), cuda.blockDim.x):
y_out[i] = null_val
if 0 <= i - shift_by < len(x):
y_out[i] = x[i - shift_by]
return cu_shift_transform
data = read_stock_id(book_train_path[0]).query(
"time_id==5"
) # .set_index(['time_id', 'seconds_in_bucket'])
data = time_id_map.join(data, how="inner", sort=True)
data = cudf.DataFrame(data.to_pandas().fillna(method="ffill")).reset_index()
# import cu_utils.transform as cutran
import numpy as np
read_stock_id = lambda f: cudf.read_parquet(f)
time_id_map = (
read_stock_id(book_train_path[0])[["time_id", "seconds_in_bucket"]]
.pivot(["time_id"], "seconds_in_bucket")["time_id"]
.stack(dropna=False)
.to_frame()
.drop(0, 1)
.reset_index()
.rename({0: "time_id", 1: "seconds_in_bucket"}, axis=1)
.set_index(["time_id", "seconds_in_bucket"])
)
means = lambda col: data.groupby("time_id", method="cudf").apply_grouped(
cu_mean_transform,
incols={col: "x" for c in data.columns},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
stds = lambda col: data.groupby("time_id", method="cudf").apply_grouped(
cu_std_transform,
incols={col: "x" for c in data.columns},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
standardize = lambda col: (data[col] - means(col)) / stds(col)
def diff_log_wap_shifted(n, b):
logwap = lambda bp, ap, bz, az: (((bp * az) + (ap * bz)) / (bz + az)).log()
cols = [
b[f"{col}{n}"] for col in ["bid_price", "ask_price", "bid_size", "ask_size"]
]
b[f"log_wap{n}"] = logwap(*cols)
log_wap_shifted = b.groupby(["time_id"], method="cudf").apply_grouped(
get_cu_shift_transform(shift_by=1, null_val=np.NaN),
incols={f"log_wap{n}": "x"},
outcols={"y_out": cp.float32},
tpb=32,
)["y_out"]
return (b[f"log_wap{n}"] - log_wap_shifted) ** 2
def get_logret(b):
b[f"logret1"] = diff_log_wap_shifted(1, b)
b[f"logret2"] = diff_log_wap_shifted(2, b)
return b
def getvol(f):
b = cudf.read_parquet(f)
b = read_stock_id(f).set_index(["time_id", "seconds_in_bucket"])
b = time_id_map.join(b, how="left", sort=True)
b = cudf.DataFrame(b.to_pandas().fillna(method="ffill")).reset_index()
# for col in b.columns:
# if col not in ['time_id', 'seconds_in_bucket']:
# b[col] = standardize(col)
b["stock_id"] = f.split("/")[-2].split("=")[-1]
b = get_logret(b)
b = b.query(f"logret1==logret1")
b = b.groupby(["stock_id", "time_id"]).agg(
{"logret1": {"sum", "std"}, "logret2": {"sum", "std"}, "time_id": "count"}
)
b.columns = b.columns.map("_".join)
sum_cols = [i for i in b.columns if "sum" in i]
b[[f"vol{k+1}" for k, _ in enumerate(sum_cols)]] = b[sum_cols] ** 0.5
b["volrate"] = b.vol1 / b.vol2
return b.reset_index()
vols = cudf.concat([getvol(f) for f in book_train_path]).set_index(
["stock_id", "time_id"]
)
vols
from cuml.metrics.regression import r2_score
rmspe_lambda = lambda y_true, y_pred: np.mean(((y_true - y_pred) / y_true) ** 2) ** 0.5
train_path = glob.glob(f"{path}/train.csv")[0]
train = cudf.read_csv(train_path).set_index(["stock_id", "time_id"])
features = ["vol1", "vol2", "logret1_std", "logret2_std", "volrate", "time_id_count"]
m = train.join(vols.fillna(0), how="inner")[["target"] + features].reset_index()
rr = round(r2_score(m.target, m.vol1), 3)
rmspe = round(rmspe_lambda(m.target, m.vol1), 3)
print(rr, rmspe)
# cutran.cu_min_transform()
# for i in book_train_path:
# t = read_stock_id(i)
# t_mean_trans = t.groupby('time_id', method='cudf').apply_grouped(
# cutran.cu_mean_transform,
# incols={f'seconds_in_bucket': 'x'},
# outcols={'y_out': cp.float32},
# tpb=32
# )['y_out']
# if len(t[t_min_trans!=0]) > 0:
# print(i)
# x = t[t_min_trans!=0]
# break
import xgboost as xgb
from xgboost.sklearn import XGBRegressor
from sklearn.model_selection import GridSearchCV
xgbreg = XGBRegressor(tree_method="gpu_hist", gpu_id=0)
parameters = {
"predictor": ["gpu_predictor"],
"objective": ["reg:squarederror", "reg:logistic", "reg:squaredlogerror"],
"eval_metric": ["rmse"],
"booster": ["gblinear"],
"subsample": [0.75, 0.5, 0.25],
"n_estimators": [750, 1000],
}
xgb_grid = GridSearchCV(xgbreg, parameters, cv=3, verbose=False, n_jobs=-1)
# x_train_full = cp.array(m[['stock_id', 'time_id']+features].to_gpu_matrix()).get()
# y_train_full = cp.array(m[['target']].to_gpu_matrix()).get()
# xgb_grid.fit(x_train_full, y_train_full)
# print(xgb_grid.best_score_)
# print(xgb_grid.best_params_)
# %cd /kaggle/working
# df.to_csv("submission.csv", index=False, columns=["row_id", "target"])
# cudf.read_csv("submission.csv")
| false | 0 | 2,597 | 0 | 2,624 | 2,597 |
||
69980089
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Train.csv will contain the details of a subset of the passengers on board (891 to be exact) and importantly, will reveal whether they survived or not, also known as the “ground truth”.
# The `test.csv` dataset contains similar information but does not disclose the “ground truth” for each passenger. It’s your job to predict these outcomes.
# Using the patterns you find in the train.csv data, predict whether the other 418 passengers on board (found in test.csv) survived.
# ## **Evaluation**
# **Goal**
# It is your job to predict if a passenger survived the sinking of the Titanic or not.
# For each in the test set, you must predict a 0 or 1 value for the variable.
# **Metric**
# Your score is the percentage of passengers you correctly predict. This is known as accuracy.
# **Submission File Format**
# You should submit a csv file with exactly 418 entries plus a header row. Your submission will show an error if you have extra columns (beyond PassengerId and Survived) or rows.
# The file should have exactly 2 columns:
# PassengerId (sorted in any order)
# Survived (contains your binary predictions: 1 for survived, 0 for deceased)
# ## **STEPS**
# 1. Understanding the data: look at independent variables X and dependent variable Y (survived or not).
# 2. Finding important independent variables:how X and Y relate.
# 3. Data cleaning: Find missing value and outliner.(Train set and test set)
# 4. Modeling (Random Forest model,Decision Tree,K-nearest neighbor?)
# * 1.Check Normality/Homoscedasticity/Linearity/Absence of correlated errors
# * 2.If Y is not normally distributed, do some transformation.
# 5. Model evaluation and improve the model¶
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy import stats
import scipy.stats as stats
# ## **Understanding data**
# read train set and test set
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
# 
# **Variable Notes**
# pclass: A proxy for socio-economic status (SES)
# 1st = Upper
# 2nd = Middle
# 3rd = Lower
# age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5
# sibsp: The dataset defines family relations in this way...
# Sibling = brother, sister, stepbrother, stepsister
# Spouse = husband, wife (mistresses and fiancés were ignored)
# parch: The dataset defines family relations in this way...
# Parent = mother, father
# Child = daughter, son, stepdaughter, stepson
# Some children travelled only with a nanny, therefore parch=0 for them.
train.head()
# ticker number and cabin number seems not important, we can delete it?
train.isnull().sum()
# handling missing value age and embarked
# Filling missing value in age, use mean
train["Age"] = train["Age"].fillna(train["Age"].mean())
# Filling missing value in embarked, use mode
train["Embarked"] = train["Embarked"].fillna(train["Embarked"].mode()[0])
train = train.drop(["Cabin"], axis=1)
print(train.isnull().sum())
train.head()
# not missing value now
# handling categorical variables Sex and embarked.
train["Sex_code"] = train["Sex"].map({"female": 1, "male": 0}).astype("int")
train["Embarked_code"] = train["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int")
train.head()
# ## **Pclass vs Survived**
sns.countplot(train["Pclass"], hue=train["Survived"])
plt.title("Pclass vs Survived")
# ## **Age vs Survived**
data = pd.concat([train["Age"], train["Survived"]], axis=1)
boxplot, ax = plt.subplots(figsize=(15, 10))
boxplot = sns.boxplot(x="Survived", y="Age", data=data)
plt.title("Age vs Survived")
# ## **Gender vs Survived**
sns.countplot(train["Sex"], hue=train["Survived"])
# ## **Embarked vs Survived**
sns.countplot(train["Embarked"], hue=train["Survived"])
# ## **Fare vs Survived**
data = pd.concat([train["Fare"], train["Survived"]], axis=1)
boxplot, ax = plt.subplots(figsize=(15, 10))
boxplot = sns.boxplot(x="Survived", y="Fare", data=data)
plt.title("Fare vs Survived")
# ## **Finding Important Features and Building Random Forest model**
X = train[["Pclass", "Age", "SibSp", "Parch", "Fare", "Sex_code", "Embarked_code"]]
y = train["Survived"]
# Import train_test_split function
from sklearn.model_selection import train_test_split
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
from sklearn.ensemble import RandomForestClassifier
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
import pandas as pd
feature_importances = pd.DataFrame(
clf.feature_importances_, index=X_train.columns, columns=["importance"]
).sort_values("importance", ascending=False)
print(feature_importances)
# removing some not important variables(Parch and embarked)
X = train[["Pclass", "Age", "SibSp", "Fare", "Sex_code"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# removing some not important variables(Pclass and SibSp)
X = train[["Age", "Fare", "Sex_code"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
X = train[["Age", "Fare", "Sex_code", "Pclass"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
test.head()
# handling categorical variables Sex and embarked.
test["Sex_code"] = test["Sex"].map({"female": 1, "male": 0}).astype("int")
test["Embarked_code"] = test["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int")
test.isnull().sum()
test["Age"] = test["Age"].fillna(test["Age"].mean())
test["Fare"] = test["Fare"].fillna(test["Fare"].mean())
X1 = test[["Age", "Fare", "Sex_code", "Pclass"]]
X = train[["Age", "Fare", "Sex_code", "Pclass"]]
y = train["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X1)
print(len(y_pred))
print(len(test))
y_pred
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/980/69980089.ipynb
| null | null |
[{"Id": 69980089, "ScriptId": 19016252, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7916326, "CreationDate": "08/04/2021 05:31:14", "VersionNumber": 3.0, "Title": "Titanic - Machine Learning from Disaster", "EvaluationDate": "08/04/2021", "IsChange": true, "TotalLines": 255.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 250.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Train.csv will contain the details of a subset of the passengers on board (891 to be exact) and importantly, will reveal whether they survived or not, also known as the “ground truth”.
# The `test.csv` dataset contains similar information but does not disclose the “ground truth” for each passenger. It’s your job to predict these outcomes.
# Using the patterns you find in the train.csv data, predict whether the other 418 passengers on board (found in test.csv) survived.
# ## **Evaluation**
# **Goal**
# It is your job to predict if a passenger survived the sinking of the Titanic or not.
# For each in the test set, you must predict a 0 or 1 value for the variable.
# **Metric**
# Your score is the percentage of passengers you correctly predict. This is known as accuracy.
# **Submission File Format**
# You should submit a csv file with exactly 418 entries plus a header row. Your submission will show an error if you have extra columns (beyond PassengerId and Survived) or rows.
# The file should have exactly 2 columns:
# PassengerId (sorted in any order)
# Survived (contains your binary predictions: 1 for survived, 0 for deceased)
# ## **STEPS**
# 1. Understanding the data: look at independent variables X and dependent variable Y (survived or not).
# 2. Finding important independent variables:how X and Y relate.
# 3. Data cleaning: Find missing value and outliner.(Train set and test set)
# 4. Modeling (Random Forest model,Decision Tree,K-nearest neighbor?)
# * 1.Check Normality/Homoscedasticity/Linearity/Absence of correlated errors
# * 2.If Y is not normally distributed, do some transformation.
# 5. Model evaluation and improve the model¶
#
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib as mpl
import seaborn as sns
from scipy import stats
import scipy.stats as stats
# ## **Understanding data**
# read train set and test set
train = pd.read_csv("../input/titanic/train.csv")
test = pd.read_csv("../input/titanic/test.csv")
# 
# **Variable Notes**
# pclass: A proxy for socio-economic status (SES)
# 1st = Upper
# 2nd = Middle
# 3rd = Lower
# age: Age is fractional if less than 1. If the age is estimated, is it in the form of xx.5
# sibsp: The dataset defines family relations in this way...
# Sibling = brother, sister, stepbrother, stepsister
# Spouse = husband, wife (mistresses and fiancés were ignored)
# parch: The dataset defines family relations in this way...
# Parent = mother, father
# Child = daughter, son, stepdaughter, stepson
# Some children travelled only with a nanny, therefore parch=0 for them.
train.head()
# ticker number and cabin number seems not important, we can delete it?
train.isnull().sum()
# handling missing value age and embarked
# Filling missing value in age, use mean
train["Age"] = train["Age"].fillna(train["Age"].mean())
# Filling missing value in embarked, use mode
train["Embarked"] = train["Embarked"].fillna(train["Embarked"].mode()[0])
train = train.drop(["Cabin"], axis=1)
print(train.isnull().sum())
train.head()
# not missing value now
# handling categorical variables Sex and embarked.
train["Sex_code"] = train["Sex"].map({"female": 1, "male": 0}).astype("int")
train["Embarked_code"] = train["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int")
train.head()
# ## **Pclass vs Survived**
sns.countplot(train["Pclass"], hue=train["Survived"])
plt.title("Pclass vs Survived")
# ## **Age vs Survived**
data = pd.concat([train["Age"], train["Survived"]], axis=1)
boxplot, ax = plt.subplots(figsize=(15, 10))
boxplot = sns.boxplot(x="Survived", y="Age", data=data)
plt.title("Age vs Survived")
# ## **Gender vs Survived**
sns.countplot(train["Sex"], hue=train["Survived"])
# ## **Embarked vs Survived**
sns.countplot(train["Embarked"], hue=train["Survived"])
# ## **Fare vs Survived**
data = pd.concat([train["Fare"], train["Survived"]], axis=1)
boxplot, ax = plt.subplots(figsize=(15, 10))
boxplot = sns.boxplot(x="Survived", y="Fare", data=data)
plt.title("Fare vs Survived")
# ## **Finding Important Features and Building Random Forest model**
X = train[["Pclass", "Age", "SibSp", "Parch", "Fare", "Sex_code", "Embarked_code"]]
y = train["Survived"]
# Import train_test_split function
from sklearn.model_selection import train_test_split
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
from sklearn.ensemble import RandomForestClassifier
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
import pandas as pd
feature_importances = pd.DataFrame(
clf.feature_importances_, index=X_train.columns, columns=["importance"]
).sort_values("importance", ascending=False)
print(feature_importances)
# removing some not important variables(Parch and embarked)
X = train[["Pclass", "Age", "SibSp", "Fare", "Sex_code"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
# removing some not important variables(Pclass and SibSp)
X = train[["Age", "Fare", "Sex_code"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
X = train[["Age", "Fare", "Sex_code", "Pclass"]]
y = train["Survived"]
# Split dataset into training set and test set
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# Import scikit-learn metrics module for accuracy calculation
from sklearn import metrics
# Model Accuracy, how often is the classifier correct?
print("Accuracy:", metrics.accuracy_score(y_test, y_pred))
test.head()
# handling categorical variables Sex and embarked.
test["Sex_code"] = test["Sex"].map({"female": 1, "male": 0}).astype("int")
test["Embarked_code"] = test["Embarked"].map({"S": 0, "C": 1, "Q": 2}).astype("int")
test.isnull().sum()
test["Age"] = test["Age"].fillna(test["Age"].mean())
test["Fare"] = test["Fare"].fillna(test["Fare"].mean())
X1 = test[["Age", "Fare", "Sex_code", "Pclass"]]
X = train[["Age", "Fare", "Sex_code", "Pclass"]]
y = train["Survived"]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3
) # 70% training and 30% test
# Create a Gaussian Classifier
clf = RandomForestClassifier(n_estimators=100)
clf.fit(X_train, y_train)
y_pred = clf.predict(X1)
print(len(y_pred))
print(len(test))
y_pred
| false | 0 | 2,672 | 0 | 2,672 | 2,672 |
||
69980373
|
import os
import glob
from joblib import Parallel, delayed
import joblib
import pandas as pd
import numpy as np
import scipy as sc
from sklearn.model_selection import KFold
import lightgbm as lgb
import warnings
warnings.filterwarnings("ignore")
pd.set_option("max_columns", 300)
# data directory
data_dir = "../input/optiver-realized-volatility-prediction/"
def calculate_wap(df):
a1 = df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
a2 = df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
b = df["bid_size1"] + df["ask_size1"] + df["bid_size2"] + df["ask_size2"]
return (a1 + a2) / b
def calculate_wap1(df):
a1 = df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
b1 = df["bid_size1"] + df["ask_size1"]
a2 = df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
b2 = df["bid_size2"] + df["ask_size2"]
x = (a1 / b1 + a2 / b2) / 2
return x
# Function to calculate the log of the return
# Remember that logb(x / y) = logb(x) - logb(y)
def log_return(series):
return np.log(series).diff()
# Calculate the realized volatility
def realized_volatility(series):
return np.sqrt(np.sum(series**2))
# Function to count unique elements of a series
def count_unique(series):
return len(np.unique(series))
def ewma_vol_999(ReturnSeries, Lambda=0.999):
# ReturnSeries = ReturnSeries[ReturnSeries != 0]
ReturnSeries = log_return(ReturnSeries)
SampleSize = len(ReturnSeries)
Average = ReturnSeries.mean()
e = np.arange(SampleSize - 1, -1, -1)
r = np.repeat(Lambda, SampleSize)
vecLambda = np.power(r, e)
sxxewm = (np.power(ReturnSeries - Average, 2) * vecLambda).sum()
# Vart = sxxewm/vecLambda.sum()
EWMAVol = np.sqrt(sxxewm)
return EWMAVol
# Function to read our base train and test set
def read_train_test():
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
# Create a key to merge with book and trade data
train["row_id"] = train["stock_id"].astype(str) + "-" + train["time_id"].astype(str)
test["row_id"] = test["stock_id"].astype(str) + "-" + test["time_id"].astype(str)
print(f"Our training set has {train.shape[0]} rows")
return train, test
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(df, create_feature_dict, seconds_in_bucket, add_suffix=False):
# Group by the window
df_feature = (
df[df["seconds_in_bucket"] >= seconds_in_bucket]
.groupby(["time_id"])
.agg(create_feature_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix("_" + str(seconds_in_bucket))
return df_feature
def get_stats_window2(
df, create_feature_dict, seconds_in_bucket_s, seconds_in_bucket_e, add_suffix=False
):
# Group by the window
df_feature = (
df[
(df["seconds_in_bucket"] >= seconds_in_bucket_s)
& (df["seconds_in_bucket"] < seconds_in_bucket_e)
]
.groupby(["time_id"])
.agg(create_feature_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix(
"_" + str(seconds_in_bucket_s) + "_" + str(seconds_in_bucket_e)
)
return df_feature
# Function to preprocess book data (for each stock id)
def book_preprocessor(file_path):
df = pd.read_parquet(file_path)
# Calculate Wap
df["wap1"] = (
df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
) / (df["bid_size1"] + df["ask_size1"])
df["wap2"] = (
df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
) / (df["bid_size2"] + df["ask_size2"])
# df['wap3'] = calculate_wap(df)
# Calculate log returns
df["log_return1"] = df.groupby(["time_id"])["wap1"].apply(log_return)
df["log_return2"] = df.groupby(["time_id"])["wap2"].apply(log_return)
# df['log_return3'] = df.groupby(['time_id'])['wap3'].apply(log_return)
# df['bas_1'] = df['ask_price1'] / df['bid_price1'] - 1
# df['bas_2'] = df['ask_price2'] / df['bid_price2'] - 1
df["price_spread"] = (df["ask_price1"] - df["bid_price1"]) / (
df["ask_price1"] + df["bid_price1"]
)
df["bid_spread"] = df["bid_price1"] / df["bid_price2"] - 1
df["ask_spread"] = df["ask_price1"] / df["ask_price2"] - 1
df["total_volume_1"] = df["ask_size1"] + df["bid_size1"]
# df['total_volume_2'] = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
# df['volume_imbalance'] = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# Dict for aggregations
create_feature_dict = {
# 'wap1': [np.mean, np.std],
# 'wap2': [np.mean, np.std],
"log_return1": [realized_volatility, np.mean, np.std],
"log_return2": [realized_volatility, np.mean, np.std],
# 'log_return3': [realized_volatility, np.mean, np.std],
#'wap_balance': [np.mean, np.std],
"seconds_in_bucket": ["size"],
"price_spread": [realized_volatility, np.mean, np.std],
# 'bas_1':[realized_volatility, np.mean, np.std],
# 'bas_2':[realized_volatility, np.mean, np.std],
"bid_spread": [realized_volatility, np.mean, np.std],
"ask_spread": [realized_volatility, np.mean, np.std],
"total_volume_1": [realized_volatility, np.mean, np.std],
# 'total_volume_2':[realized_volatility, np.sum, np.mean, np.std],
#'volume_imbalance':[np.mean, np.std]
}
# Get the stats for different windows
df_feature = get_stats_window(
df, create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_450 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=450, add_suffix=True
)
df_feature_300 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=300, add_suffix=True
)
df_feature_150 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=150, add_suffix=True
)
df_feature_0_150 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=0,
seconds_in_bucket_e=150,
add_suffix=True,
)
df_feature_150_300 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=150,
seconds_in_bucket_e=300,
add_suffix=True,
)
df_feature_300_450 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=300,
seconds_in_bucket_e=450,
add_suffix=True,
)
df_feature_450_600 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=450,
seconds_in_bucket_e=600,
add_suffix=True,
)
# Merge all
df_feature = df_feature.merge(
df_feature_450, how="left", left_on="time_id_", right_on="time_id__450"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_150, how="left", left_on="time_id_", right_on="time_id__150"
)
df_feature = df_feature.merge(
df_feature_0_150, how="left", left_on="time_id_", right_on="time_id__0_150"
)
df_feature = df_feature.merge(
df_feature_150_300, how="left", left_on="time_id_", right_on="time_id__150_300"
)
df_feature = df_feature.merge(
df_feature_300_450, how="left", left_on="time_id_", right_on="time_id__300_450"
)
df_feature = df_feature.merge(
df_feature_450_600, how="left", left_on="time_id_", right_on="time_id__450_600"
)
# Drop unnecesary time_ids
unused_time_id = df_feature.filter(like="time_id__").columns.tolist()
df_feature.drop(unused_time_id, axis=1, inplace=True)
# Create row_id so we can merge
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["time_id_"].apply(lambda x: f"{stock_id}-{x}")
df_feature.drop(["time_id_"], axis=1, inplace=True)
return df_feature
# Function to preprocess trade data (for each stock id)
def trade_preprocessor(file_path):
df = pd.read_parquet(file_path)
df["log_return"] = df.groupby("time_id")["price"].apply(log_return)
df["size_per_order"] = df["size"] / df["order_count"]
# Dict for aggregations
create_feature_dict = {
"log_return": ["std", realized_volatility],
"seconds_in_bucket": ["size"],
"size": ["mean", "std", "sum", realized_volatility],
"order_count": ["mean", "std", "sum", realized_volatility],
"size_per_order": ["mean", "std", "sum", realized_volatility],
}
# Get the stats for different windows
df_feature = get_stats_window(
df, create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_450 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=450, add_suffix=True
)
df_feature_300 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=300, add_suffix=True
)
df_feature_150 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=150, add_suffix=True
)
df_feature_0_150 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=0,
seconds_in_bucket_e=150,
add_suffix=True,
)
df_feature_150_300 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=150,
seconds_in_bucket_e=300,
add_suffix=True,
)
df_feature_300_450 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=300,
seconds_in_bucket_e=450,
add_suffix=True,
)
df_feature_450_600 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=450,
seconds_in_bucket_e=600,
add_suffix=True,
)
# Merge all
df_feature = df_feature.merge(
df_feature_450, how="left", left_on="time_id_", right_on="time_id__450"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_150, how="left", left_on="time_id_", right_on="time_id__150"
)
df_feature = df_feature.merge(
df_feature_0_150, how="left", left_on="time_id_", right_on="time_id__0_150"
)
df_feature = df_feature.merge(
df_feature_150_300, how="left", left_on="time_id_", right_on="time_id__150_300"
)
df_feature = df_feature.merge(
df_feature_300_450, how="left", left_on="time_id_", right_on="time_id__300_450"
)
df_feature = df_feature.merge(
df_feature_450_600, how="left", left_on="time_id_", right_on="time_id__450_600"
)
# Drop unnecesary time_ids
unused_time_id = df_feature.filter(like="time_id__").columns.tolist()
df_feature.drop(unused_time_id, axis=1, inplace=True)
df_feature = df_feature.add_prefix("trade_")
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["trade_time_id_"].apply(
lambda x: f"{stock_id}-{x}"
)
df_feature.drop(["trade_time_id_"], axis=1, inplace=True)
return df_feature
# Function to get group stats for the stock_id and time_id
def get_time_stock(df):
# Get realized volatility columns
# vol_cols = ['log_return1_realized_volatility', 'log_return2_realized_volatility', 'log_return1_realized_volatility_450', 'log_return2_realized_volatility_450',
# 'log_return1_realized_volatility_300', 'log_return2_realized_volatility_300', 'log_return1_realized_volatility_150', 'log_return2_realized_volatility_150',
# 'trade_log_return_realized_volatility', 'trade_log_return_realized_volatility_450', 'trade_log_return_realized_volatility_300', 'trade_log_return_realized_volatility_150']
vol_cols = [f for f in df.columns if ("realized_volatility" in f)]
# # Group by the stock id
# df_stock_id = df.groupby(['stock_id'])[vol_cols].agg(['mean', 'std', 'max', 'min', ]).reset_index()
# # Rename columns joining suffix
# df_stock_id.columns = ['_'.join(col) for col in df_stock_id.columns]
# df_stock_id = df_stock_id.add_suffix('_' + 'stock')
# Group by the stock id
df_time_id = (
df.groupby(["time_id"])[vol_cols]
.agg(
[
"mean",
"std",
"max",
"min",
]
)
.reset_index()
)
# Rename columns joining suffix
df_time_id.columns = ["_".join(col) for col in df_time_id.columns]
df_time_id = df_time_id.add_suffix("_" + "time")
# Merge with original dataframe
# df = df.merge(df_stock_id, how = 'left', left_on = ['stock_id'], right_on = ['stock_id__stock'])
df = df.merge(
df_time_id, how="left", left_on=["time_id"], right_on=["time_id__time"]
)
df.drop(["time_id__time"], axis=1, inplace=True)
return df
# Funtion to make preprocessing function in parallel (for each stock id)
def preprocessor(list_stock_ids, is_train=True):
from tqdm import tqdm
# Parrallel for loop
def for_joblib(stock_id):
# Train
if is_train:
file_path_book = data_dir + "book_train.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_train.parquet/stock_id=" + str(stock_id)
# Test
else:
file_path_book = data_dir + "book_test.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_test.parquet/stock_id=" + str(stock_id)
# Preprocess book and trade data and merge them
df_tmp = pd.merge(
book_preprocessor(file_path_book),
trade_preprocessor(file_path_trade),
on="row_id",
how="left",
)
# Return the merge dataframe
return df_tmp
# Use parallel api to call paralle for loop
df = Parallel(n_jobs=-1, verbose=1)(
delayed(for_joblib)(stock_id) for stock_id in tqdm(list_stock_ids)
)
# Concatenate all the dataframes that return from Parallel
df = pd.concat(df, ignore_index=True)
return df
# Function to calculate the root mean squared percentage error
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
# Function to early stop with root mean squared percentage error
def feval_rmspe(y_pred, lgb_train):
y_true = lgb_train.get_label()
return "RMSPE", rmspe(y_true, y_pred), False
train, test = read_train_test()
# Get unique stock ids
test_stock_ids = test["stock_id"].unique()
# Preprocess them using Parallel and our single stock id functions
test_ = preprocessor(test_stock_ids, is_train=False)
test = test.merge(test_, on=["row_id"], how="left")
test = get_time_stock(test)
features = test.drop(columns=["time_id", "row_id"]).columns
MODEL_DIR = "../input/lgbmv12"
y_preds = np.zeros(len(test))
files = glob.glob(f"{MODEL_DIR}/*lgbm*.pkl")
assert len(files) > 0
for i, f in enumerate(files):
print(f)
model = joblib.load(f)
y_preds += model.predict(test[features])
y_preds /= i + 1
test["target"] = y_preds
sub = test[["row_id", "target"]]
sub
sub.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/980/69980373.ipynb
| null | null |
[{"Id": 69980373, "ScriptId": 18889078, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2934869, "CreationDate": "08/04/2021 05:32:59", "VersionNumber": 13.0, "Title": "lgbm-infer", "EvaluationDate": "08/04/2021", "IsChange": true, "TotalLines": 312.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 306.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
import glob
from joblib import Parallel, delayed
import joblib
import pandas as pd
import numpy as np
import scipy as sc
from sklearn.model_selection import KFold
import lightgbm as lgb
import warnings
warnings.filterwarnings("ignore")
pd.set_option("max_columns", 300)
# data directory
data_dir = "../input/optiver-realized-volatility-prediction/"
def calculate_wap(df):
a1 = df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
a2 = df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
b = df["bid_size1"] + df["ask_size1"] + df["bid_size2"] + df["ask_size2"]
return (a1 + a2) / b
def calculate_wap1(df):
a1 = df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
b1 = df["bid_size1"] + df["ask_size1"]
a2 = df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
b2 = df["bid_size2"] + df["ask_size2"]
x = (a1 / b1 + a2 / b2) / 2
return x
# Function to calculate the log of the return
# Remember that logb(x / y) = logb(x) - logb(y)
def log_return(series):
return np.log(series).diff()
# Calculate the realized volatility
def realized_volatility(series):
return np.sqrt(np.sum(series**2))
# Function to count unique elements of a series
def count_unique(series):
return len(np.unique(series))
def ewma_vol_999(ReturnSeries, Lambda=0.999):
# ReturnSeries = ReturnSeries[ReturnSeries != 0]
ReturnSeries = log_return(ReturnSeries)
SampleSize = len(ReturnSeries)
Average = ReturnSeries.mean()
e = np.arange(SampleSize - 1, -1, -1)
r = np.repeat(Lambda, SampleSize)
vecLambda = np.power(r, e)
sxxewm = (np.power(ReturnSeries - Average, 2) * vecLambda).sum()
# Vart = sxxewm/vecLambda.sum()
EWMAVol = np.sqrt(sxxewm)
return EWMAVol
# Function to read our base train and test set
def read_train_test():
train = pd.read_csv(data_dir + "train.csv")
test = pd.read_csv(data_dir + "test.csv")
# Create a key to merge with book and trade data
train["row_id"] = train["stock_id"].astype(str) + "-" + train["time_id"].astype(str)
test["row_id"] = test["stock_id"].astype(str) + "-" + test["time_id"].astype(str)
print(f"Our training set has {train.shape[0]} rows")
return train, test
# Function to get group stats for different windows (seconds in bucket)
def get_stats_window(df, create_feature_dict, seconds_in_bucket, add_suffix=False):
# Group by the window
df_feature = (
df[df["seconds_in_bucket"] >= seconds_in_bucket]
.groupby(["time_id"])
.agg(create_feature_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix("_" + str(seconds_in_bucket))
return df_feature
def get_stats_window2(
df, create_feature_dict, seconds_in_bucket_s, seconds_in_bucket_e, add_suffix=False
):
# Group by the window
df_feature = (
df[
(df["seconds_in_bucket"] >= seconds_in_bucket_s)
& (df["seconds_in_bucket"] < seconds_in_bucket_e)
]
.groupby(["time_id"])
.agg(create_feature_dict)
.reset_index()
)
# Rename columns joining suffix
df_feature.columns = ["_".join(col) for col in df_feature.columns]
# Add a suffix to differentiate windows
if add_suffix:
df_feature = df_feature.add_suffix(
"_" + str(seconds_in_bucket_s) + "_" + str(seconds_in_bucket_e)
)
return df_feature
# Function to preprocess book data (for each stock id)
def book_preprocessor(file_path):
df = pd.read_parquet(file_path)
# Calculate Wap
df["wap1"] = (
df["bid_price1"] * df["ask_size1"] + df["ask_price1"] * df["bid_size1"]
) / (df["bid_size1"] + df["ask_size1"])
df["wap2"] = (
df["bid_price2"] * df["ask_size2"] + df["ask_price2"] * df["bid_size2"]
) / (df["bid_size2"] + df["ask_size2"])
# df['wap3'] = calculate_wap(df)
# Calculate log returns
df["log_return1"] = df.groupby(["time_id"])["wap1"].apply(log_return)
df["log_return2"] = df.groupby(["time_id"])["wap2"].apply(log_return)
# df['log_return3'] = df.groupby(['time_id'])['wap3'].apply(log_return)
# df['bas_1'] = df['ask_price1'] / df['bid_price1'] - 1
# df['bas_2'] = df['ask_price2'] / df['bid_price2'] - 1
df["price_spread"] = (df["ask_price1"] - df["bid_price1"]) / (
df["ask_price1"] + df["bid_price1"]
)
df["bid_spread"] = df["bid_price1"] / df["bid_price2"] - 1
df["ask_spread"] = df["ask_price1"] / df["ask_price2"] - 1
df["total_volume_1"] = df["ask_size1"] + df["bid_size1"]
# df['total_volume_2'] = (df['ask_size1'] + df['ask_size2']) + (df['bid_size1'] + df['bid_size2'])
# df['volume_imbalance'] = abs((df['ask_size1'] + df['ask_size2']) - (df['bid_size1'] + df['bid_size2']))
# Dict for aggregations
create_feature_dict = {
# 'wap1': [np.mean, np.std],
# 'wap2': [np.mean, np.std],
"log_return1": [realized_volatility, np.mean, np.std],
"log_return2": [realized_volatility, np.mean, np.std],
# 'log_return3': [realized_volatility, np.mean, np.std],
#'wap_balance': [np.mean, np.std],
"seconds_in_bucket": ["size"],
"price_spread": [realized_volatility, np.mean, np.std],
# 'bas_1':[realized_volatility, np.mean, np.std],
# 'bas_2':[realized_volatility, np.mean, np.std],
"bid_spread": [realized_volatility, np.mean, np.std],
"ask_spread": [realized_volatility, np.mean, np.std],
"total_volume_1": [realized_volatility, np.mean, np.std],
# 'total_volume_2':[realized_volatility, np.sum, np.mean, np.std],
#'volume_imbalance':[np.mean, np.std]
}
# Get the stats for different windows
df_feature = get_stats_window(
df, create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_450 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=450, add_suffix=True
)
df_feature_300 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=300, add_suffix=True
)
df_feature_150 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=150, add_suffix=True
)
df_feature_0_150 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=0,
seconds_in_bucket_e=150,
add_suffix=True,
)
df_feature_150_300 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=150,
seconds_in_bucket_e=300,
add_suffix=True,
)
df_feature_300_450 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=300,
seconds_in_bucket_e=450,
add_suffix=True,
)
df_feature_450_600 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=450,
seconds_in_bucket_e=600,
add_suffix=True,
)
# Merge all
df_feature = df_feature.merge(
df_feature_450, how="left", left_on="time_id_", right_on="time_id__450"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_150, how="left", left_on="time_id_", right_on="time_id__150"
)
df_feature = df_feature.merge(
df_feature_0_150, how="left", left_on="time_id_", right_on="time_id__0_150"
)
df_feature = df_feature.merge(
df_feature_150_300, how="left", left_on="time_id_", right_on="time_id__150_300"
)
df_feature = df_feature.merge(
df_feature_300_450, how="left", left_on="time_id_", right_on="time_id__300_450"
)
df_feature = df_feature.merge(
df_feature_450_600, how="left", left_on="time_id_", right_on="time_id__450_600"
)
# Drop unnecesary time_ids
unused_time_id = df_feature.filter(like="time_id__").columns.tolist()
df_feature.drop(unused_time_id, axis=1, inplace=True)
# Create row_id so we can merge
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["time_id_"].apply(lambda x: f"{stock_id}-{x}")
df_feature.drop(["time_id_"], axis=1, inplace=True)
return df_feature
# Function to preprocess trade data (for each stock id)
def trade_preprocessor(file_path):
df = pd.read_parquet(file_path)
df["log_return"] = df.groupby("time_id")["price"].apply(log_return)
df["size_per_order"] = df["size"] / df["order_count"]
# Dict for aggregations
create_feature_dict = {
"log_return": ["std", realized_volatility],
"seconds_in_bucket": ["size"],
"size": ["mean", "std", "sum", realized_volatility],
"order_count": ["mean", "std", "sum", realized_volatility],
"size_per_order": ["mean", "std", "sum", realized_volatility],
}
# Get the stats for different windows
df_feature = get_stats_window(
df, create_feature_dict, seconds_in_bucket=0, add_suffix=False
)
df_feature_450 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=450, add_suffix=True
)
df_feature_300 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=300, add_suffix=True
)
df_feature_150 = get_stats_window(
df, create_feature_dict, seconds_in_bucket=150, add_suffix=True
)
df_feature_0_150 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=0,
seconds_in_bucket_e=150,
add_suffix=True,
)
df_feature_150_300 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=150,
seconds_in_bucket_e=300,
add_suffix=True,
)
df_feature_300_450 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=300,
seconds_in_bucket_e=450,
add_suffix=True,
)
df_feature_450_600 = get_stats_window2(
df,
create_feature_dict,
seconds_in_bucket_s=450,
seconds_in_bucket_e=600,
add_suffix=True,
)
# Merge all
df_feature = df_feature.merge(
df_feature_450, how="left", left_on="time_id_", right_on="time_id__450"
)
df_feature = df_feature.merge(
df_feature_300, how="left", left_on="time_id_", right_on="time_id__300"
)
df_feature = df_feature.merge(
df_feature_150, how="left", left_on="time_id_", right_on="time_id__150"
)
df_feature = df_feature.merge(
df_feature_0_150, how="left", left_on="time_id_", right_on="time_id__0_150"
)
df_feature = df_feature.merge(
df_feature_150_300, how="left", left_on="time_id_", right_on="time_id__150_300"
)
df_feature = df_feature.merge(
df_feature_300_450, how="left", left_on="time_id_", right_on="time_id__300_450"
)
df_feature = df_feature.merge(
df_feature_450_600, how="left", left_on="time_id_", right_on="time_id__450_600"
)
# Drop unnecesary time_ids
unused_time_id = df_feature.filter(like="time_id__").columns.tolist()
df_feature.drop(unused_time_id, axis=1, inplace=True)
df_feature = df_feature.add_prefix("trade_")
stock_id = file_path.split("=")[1]
df_feature["row_id"] = df_feature["trade_time_id_"].apply(
lambda x: f"{stock_id}-{x}"
)
df_feature.drop(["trade_time_id_"], axis=1, inplace=True)
return df_feature
# Function to get group stats for the stock_id and time_id
def get_time_stock(df):
# Get realized volatility columns
# vol_cols = ['log_return1_realized_volatility', 'log_return2_realized_volatility', 'log_return1_realized_volatility_450', 'log_return2_realized_volatility_450',
# 'log_return1_realized_volatility_300', 'log_return2_realized_volatility_300', 'log_return1_realized_volatility_150', 'log_return2_realized_volatility_150',
# 'trade_log_return_realized_volatility', 'trade_log_return_realized_volatility_450', 'trade_log_return_realized_volatility_300', 'trade_log_return_realized_volatility_150']
vol_cols = [f for f in df.columns if ("realized_volatility" in f)]
# # Group by the stock id
# df_stock_id = df.groupby(['stock_id'])[vol_cols].agg(['mean', 'std', 'max', 'min', ]).reset_index()
# # Rename columns joining suffix
# df_stock_id.columns = ['_'.join(col) for col in df_stock_id.columns]
# df_stock_id = df_stock_id.add_suffix('_' + 'stock')
# Group by the stock id
df_time_id = (
df.groupby(["time_id"])[vol_cols]
.agg(
[
"mean",
"std",
"max",
"min",
]
)
.reset_index()
)
# Rename columns joining suffix
df_time_id.columns = ["_".join(col) for col in df_time_id.columns]
df_time_id = df_time_id.add_suffix("_" + "time")
# Merge with original dataframe
# df = df.merge(df_stock_id, how = 'left', left_on = ['stock_id'], right_on = ['stock_id__stock'])
df = df.merge(
df_time_id, how="left", left_on=["time_id"], right_on=["time_id__time"]
)
df.drop(["time_id__time"], axis=1, inplace=True)
return df
# Funtion to make preprocessing function in parallel (for each stock id)
def preprocessor(list_stock_ids, is_train=True):
from tqdm import tqdm
# Parrallel for loop
def for_joblib(stock_id):
# Train
if is_train:
file_path_book = data_dir + "book_train.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_train.parquet/stock_id=" + str(stock_id)
# Test
else:
file_path_book = data_dir + "book_test.parquet/stock_id=" + str(stock_id)
file_path_trade = data_dir + "trade_test.parquet/stock_id=" + str(stock_id)
# Preprocess book and trade data and merge them
df_tmp = pd.merge(
book_preprocessor(file_path_book),
trade_preprocessor(file_path_trade),
on="row_id",
how="left",
)
# Return the merge dataframe
return df_tmp
# Use parallel api to call paralle for loop
df = Parallel(n_jobs=-1, verbose=1)(
delayed(for_joblib)(stock_id) for stock_id in tqdm(list_stock_ids)
)
# Concatenate all the dataframes that return from Parallel
df = pd.concat(df, ignore_index=True)
return df
# Function to calculate the root mean squared percentage error
def rmspe(y_true, y_pred):
return np.sqrt(np.mean(np.square((y_true - y_pred) / y_true)))
# Function to early stop with root mean squared percentage error
def feval_rmspe(y_pred, lgb_train):
y_true = lgb_train.get_label()
return "RMSPE", rmspe(y_true, y_pred), False
train, test = read_train_test()
# Get unique stock ids
test_stock_ids = test["stock_id"].unique()
# Preprocess them using Parallel and our single stock id functions
test_ = preprocessor(test_stock_ids, is_train=False)
test = test.merge(test_, on=["row_id"], how="left")
test = get_time_stock(test)
features = test.drop(columns=["time_id", "row_id"]).columns
MODEL_DIR = "../input/lgbmv12"
y_preds = np.zeros(len(test))
files = glob.glob(f"{MODEL_DIR}/*lgbm*.pkl")
assert len(files) > 0
for i, f in enumerate(files):
print(f)
model = joblib.load(f)
y_preds += model.predict(test[features])
y_preds /= i + 1
test["target"] = y_preds
sub = test[["row_id", "target"]]
sub
sub.to_csv("submission.csv", index=False)
| false | 0 | 5,356 | 0 | 5,356 | 5,356 |
||
69980402
|
# # Machine Learning and Deep Learning Summer Internship Assignment 13
# ## ID: SIRSS1227
# ## Name: Pinaki Mishra
# # Cifar Solution:
from keras.datasets import cifar10
import matplotlib.pyplot as plt
from keras import models, layers
from tensorflow.keras.utils import to_categorical
from keras import optimizers
(xtrain, ytrain), (xtest, ytest) = cifar10.load_data()
print(xtrain.shape)
print(xtest.shape)
print(ytrain.shape)
print(ytest.shape)
# convert the pixel values in float
xtrain = xtrain.astype("float32")
xtest = xtest.astype("float32")
# scale the images
xtrain /= 255 # ths is eqvalent to xtrain = xtrain/255
xtest /= 255
ytrain = to_categorical(ytrain)
ytest = to_categorical(ytest)
labels = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
# explore data
print(ytrain[2000])
plt.imshow(xtrain[2000])
plt.show()
from datetime import datetime
def timer(start_time=None):
if not start_time:
print(datetime.now())
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print(
"Time taken: %i hours %i minutes and %s seconds."
% (thour, tmin, round(tsec, 2))
)
print(xtrain[10])
ytrain[10]
# ## Creating the Model Layers
model = models.Sequential()
model.add(
layers.Conv2D(
32, (3, 3), padding="same", activation="relu", input_shape=(32, 32, 3)
)
)
model.add(layers.Conv2D(32, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
# Classification layers
model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, activation="softmax")) # this is the actual output layer
# initiate Adam optimizer
opt = optimizers.Adam(learning_rate=1e-4, decay=1e-6)
# Let's train the model
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# ## Viewing the Model Summary
model.summary()
# ## Fitting the Model
start_time = timer(None)
result = model.fit(
xtrain,
ytrain,
validation_split=0.1,
verbose=True,
epochs=125,
steps_per_epoch=64,
batch_size=512,
)
timer(start_time)
# ##Visualizing the performance
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.title("CNN Training and Validation Accuracy")
plt.plot(result.history["accuracy"], label="Training Accuracy")
plt.plot(result.history["val_accuracy"], label="Validation Accuracy")
plt.legend(loc="lower right")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.subplot(1, 2, 2)
plt.plot(result.history["loss"], label="Training Loss")
plt.plot(result.history["val_loss"], label="Validation Loss")
plt.legend(loc="upper right")
plt.title("CNN Training and Validation Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
# ## Check performance on test
# check performance on test
scores = model.evaluate(xtest, ytest, verbose=1)
print("Test loss:", scores[0])
print("Test accuracy:", scores[1])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/980/69980402.ipynb
| null | null |
[{"Id": 69980402, "ScriptId": 19140431, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7737610, "CreationDate": "08/04/2021 05:33:11", "VersionNumber": 1.0, "Title": "notebookc4785d16d7", "EvaluationDate": "08/04/2021", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Machine Learning and Deep Learning Summer Internship Assignment 13
# ## ID: SIRSS1227
# ## Name: Pinaki Mishra
# # Cifar Solution:
from keras.datasets import cifar10
import matplotlib.pyplot as plt
from keras import models, layers
from tensorflow.keras.utils import to_categorical
from keras import optimizers
(xtrain, ytrain), (xtest, ytest) = cifar10.load_data()
print(xtrain.shape)
print(xtest.shape)
print(ytrain.shape)
print(ytest.shape)
# convert the pixel values in float
xtrain = xtrain.astype("float32")
xtest = xtest.astype("float32")
# scale the images
xtrain /= 255 # ths is eqvalent to xtrain = xtrain/255
xtest /= 255
ytrain = to_categorical(ytrain)
ytest = to_categorical(ytest)
labels = [
"airplane",
"automobile",
"bird",
"cat",
"deer",
"dog",
"frog",
"horse",
"ship",
"truck",
]
# explore data
print(ytrain[2000])
plt.imshow(xtrain[2000])
plt.show()
from datetime import datetime
def timer(start_time=None):
if not start_time:
print(datetime.now())
start_time = datetime.now()
return start_time
elif start_time:
thour, temp_sec = divmod((datetime.now() - start_time).total_seconds(), 3600)
tmin, tsec = divmod(temp_sec, 60)
print(
"Time taken: %i hours %i minutes and %s seconds."
% (thour, tmin, round(tsec, 2))
)
print(xtrain[10])
ytrain[10]
# ## Creating the Model Layers
model = models.Sequential()
model.add(
layers.Conv2D(
32, (3, 3), padding="same", activation="relu", input_shape=(32, 32, 3)
)
)
model.add(layers.Conv2D(32, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.Conv2D(128, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.Conv2D(64, (3, 3), padding="same", activation="relu"))
model.add(layers.MaxPooling2D(pool_size=2))
model.add(layers.Dropout(0.2))
# Classification layers
model.add(layers.Flatten())
model.add(layers.Dense(512, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(64, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, activation="softmax")) # this is the actual output layer
# initiate Adam optimizer
opt = optimizers.Adam(learning_rate=1e-4, decay=1e-6)
# Let's train the model
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
# ## Viewing the Model Summary
model.summary()
# ## Fitting the Model
start_time = timer(None)
result = model.fit(
xtrain,
ytrain,
validation_split=0.1,
verbose=True,
epochs=125,
steps_per_epoch=64,
batch_size=512,
)
timer(start_time)
# ##Visualizing the performance
plt.figure(figsize=(20, 10))
plt.subplot(1, 2, 1)
plt.title("CNN Training and Validation Accuracy")
plt.plot(result.history["accuracy"], label="Training Accuracy")
plt.plot(result.history["val_accuracy"], label="Validation Accuracy")
plt.legend(loc="lower right")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.subplot(1, 2, 2)
plt.plot(result.history["loss"], label="Training Loss")
plt.plot(result.history["val_loss"], label="Validation Loss")
plt.legend(loc="upper right")
plt.title("CNN Training and Validation Loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.show()
# ## Check performance on test
# check performance on test
scores = model.evaluate(xtest, ytest, verbose=1)
print("Test loss:", scores[0])
print("Test accuracy:", scores[1])
| false | 0 | 1,267 | 0 | 1,267 | 1,267 |
||
69814832
|
<jupyter_start><jupyter_text>ffmpeg-python
Kaggle dataset identifier: ffmpegpython
<jupyter_script>from IPython.display import HTML
from base64 import b64encode
def play(filename):
html = ""
video = open(filename, "rb").read()
src = "data:video/mp4;base64," + b64encode(video).decode()
html += (
'<video width=600 controls autoplay loop><source src="%s" type="video/mp4"></video>'
% src
)
return HTML(html)
import numpy as np
import matplotlib.pyplot as plt
g = 9.81 # m/s^2
Rho = 2.7 # kg/m^2 (aluminium of 1mm-thickness)
Tension = 1000 # N
RhoT = Rho / Tension
w = 0.3
dx = dy = 1.0
len_x = 100.0
len_y = 100.0
x = np.arange(0.0, len_x + dx, dx)
y = np.arange(0.0, len_y + dy, dy)
X, Y = np.meshgrid(x, y)
T = np.zeros((y.shape[0], x.shape[0]))
# dirichlet conditions
T[:, 0] = T[:, -1] = T[0, :] = T[-1, :] = 0
icounter = -1
nk = 600 # Corresponds to the number of iterations.
for k in range(0, nk):
for j in range(1, y.shape[0] - 1):
for i in range(1, x.shape[0] - 1):
R = 0.25 * (
T[j, i + 1]
+ T[j, i - 1]
+ T[j + 1, i]
+ T[j - 1, i]
- g * RhoT * dx**2.0
)
T[j, i] = (1 - w) * T[j, i] + w * R
icounter = icounter + 1
if np.mod(icounter, 10) == 0:
ax = plt.axes(projection="3d")
p = ax.scatter(X, Y, T, c=T, cmap="jet", vmin=-25.0, vmax=0.0)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_zlim([-25.0, 0.0])
plt.colorbar(p)
plt.title("omega is 0.3 and iteration=%.6f" % (k))
plt.savefig("%06.6d.png" % (icounter))
plt.cla()
plt.clf()
play("/kaggle/working/SOR_animation.mp4")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/814/69814832.ipynb
|
ffmpegpython
|
phoenix9032
|
[{"Id": 69814832, "ScriptId": 19055753, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7160783, "CreationDate": "08/03/2021 11:58:33", "VersionNumber": 1.0, "Title": "Diffusion_1.1_20B60087", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 57.0, "LinesInsertedFromPrevious": 57.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93268655, "KernelVersionId": 69814832, "SourceDatasetVersionId": 841430}]
|
[{"Id": 841430, "DatasetId": 444085, "DatasourceVersionId": 864336, "CreatorUserId": 2234817, "LicenseName": "Unknown", "CreationDate": "12/13/2019 06:08:04", "VersionNumber": 1.0, "Title": "ffmpeg-python", "Slug": "ffmpegpython", "Subtitle": "This is a wheel for ffmpeg python wheel for offline installation", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 444085, "CreatorUserId": 2234817, "OwnerUserId": 2234817.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 841430.0, "CurrentDatasourceVersionId": 864336.0, "ForumId": 456869, "Type": 2, "CreationDate": "12/13/2019 06:08:04", "LastActivityDate": "12/13/2019", "TotalViews": 1872, "TotalDownloads": 64, "TotalVotes": 3, "TotalKernels": 25}]
|
[{"Id": 2234817, "UserName": "phoenix9032", "DisplayName": "Doomsday", "RegisterDate": "09/11/2018", "PerformanceTier": 3}]
|
from IPython.display import HTML
from base64 import b64encode
def play(filename):
html = ""
video = open(filename, "rb").read()
src = "data:video/mp4;base64," + b64encode(video).decode()
html += (
'<video width=600 controls autoplay loop><source src="%s" type="video/mp4"></video>'
% src
)
return HTML(html)
import numpy as np
import matplotlib.pyplot as plt
g = 9.81 # m/s^2
Rho = 2.7 # kg/m^2 (aluminium of 1mm-thickness)
Tension = 1000 # N
RhoT = Rho / Tension
w = 0.3
dx = dy = 1.0
len_x = 100.0
len_y = 100.0
x = np.arange(0.0, len_x + dx, dx)
y = np.arange(0.0, len_y + dy, dy)
X, Y = np.meshgrid(x, y)
T = np.zeros((y.shape[0], x.shape[0]))
# dirichlet conditions
T[:, 0] = T[:, -1] = T[0, :] = T[-1, :] = 0
icounter = -1
nk = 600 # Corresponds to the number of iterations.
for k in range(0, nk):
for j in range(1, y.shape[0] - 1):
for i in range(1, x.shape[0] - 1):
R = 0.25 * (
T[j, i + 1]
+ T[j, i - 1]
+ T[j + 1, i]
+ T[j - 1, i]
- g * RhoT * dx**2.0
)
T[j, i] = (1 - w) * T[j, i] + w * R
icounter = icounter + 1
if np.mod(icounter, 10) == 0:
ax = plt.axes(projection="3d")
p = ax.scatter(X, Y, T, c=T, cmap="jet", vmin=-25.0, vmax=0.0)
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.set_zlim([-25.0, 0.0])
plt.colorbar(p)
plt.title("omega is 0.3 and iteration=%.6f" % (k))
plt.savefig("%06.6d.png" % (icounter))
plt.cla()
plt.clf()
play("/kaggle/working/SOR_animation.mp4")
| false | 0 | 683 | 0 | 702 | 683 |
||
69814288
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
INIT_PATH = "/kaggle/input/2021-data-creator-camp-example"
SAVE_PATH = "/kaggle/working/"
train_data = os.path.join(INIT_PATH, "sample_train.csv")
test_data = os.path.join(INIT_PATH, "sample_test.csv")
train_data = pd.read_csv(train_data)
train_data.head()
test_data = pd.read_csv(test_data)
test_data.head()
X_train = train_data.drop(["id", "species"], axis=1)
y_train = train_data["species"]
X_test = test_data.drop(["id"], axis=1)
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
from datetime import datetime
today = datetime.now().strftime("%Y%m%d_%H%M%S")
name = "sample_submission"
csv_name = os.path.join(SAVE_PATH, f"{name}_{today}.csv")
# 결과 파일 저장
submmision = pd.DataFrame(data=y_pred, columns=["species"])
submmision["id"] = test_data["id"]
submmision.to_csv(csv_name, index=False)
display(submmision)
# print(f'|INFO| DATE: {today}')
print(f"|INFO| 제출 파일 저장 완료: {csv_name}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/814/69814288.ipynb
| null | null |
[{"Id": 69814288, "ScriptId": 19083501, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7082127, "CreationDate": "08/03/2021 11:55:36", "VersionNumber": 2.0, "Title": "version1", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 53.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 52.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
INIT_PATH = "/kaggle/input/2021-data-creator-camp-example"
SAVE_PATH = "/kaggle/working/"
train_data = os.path.join(INIT_PATH, "sample_train.csv")
test_data = os.path.join(INIT_PATH, "sample_test.csv")
train_data = pd.read_csv(train_data)
train_data.head()
test_data = pd.read_csv(test_data)
test_data.head()
X_train = train_data.drop(["id", "species"], axis=1)
y_train = train_data["species"]
X_test = test_data.drop(["id"], axis=1)
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
from datetime import datetime
today = datetime.now().strftime("%Y%m%d_%H%M%S")
name = "sample_submission"
csv_name = os.path.join(SAVE_PATH, f"{name}_{today}.csv")
# 결과 파일 저장
submmision = pd.DataFrame(data=y_pred, columns=["species"])
submmision["id"] = test_data["id"]
submmision.to_csv(csv_name, index=False)
display(submmision)
# print(f'|INFO| DATE: {today}')
print(f"|INFO| 제출 파일 저장 완료: {csv_name}")
| false | 0 | 553 | 0 | 553 | 553 |
||
69814841
|
<jupyter_start><jupyter_text>Medical Insurance Premium Prediction
### Context
A Medical Insurance Company Has Released Data For Almost 1000 Customers. Create A Model That Predicts The Yearly Medical Cover Cost. The Data Is Voluntarily Given By Customers.
### Content
The Dataset Contains Health Related Parameters Of The Customers. Use Them To Build A Model And Also Perform EDA On The Same.
The Premium Price Is In INR(₹) Currency And Showcases Prices For A Whole Year.
### Inspiration
Help Solve A Crucial Finance Problem That Would Potentially Impact Many People And Would Help Them Make Better Decisions.
Don't Forget To Submit Your EDAs And Models In The Task Section. These Will Be Keenly Reviewed
Hope You Enjoy Working On The Data.
Image Credits-Unsplash
Kaggle dataset identifier: medical-insurance-premium-prediction
<jupyter_code>import pandas as pd
df = pd.read_csv('medical-insurance-premium-prediction/Medicalpremium.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 986 entries, 0 to 985
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 986 non-null int64
1 Diabetes 986 non-null int64
2 BloodPressureProblems 986 non-null int64
3 AnyTransplants 986 non-null int64
4 AnyChronicDiseases 986 non-null int64
5 Height 986 non-null int64
6 Weight 986 non-null int64
7 KnownAllergies 986 non-null int64
8 HistoryOfCancerInFamily 986 non-null int64
9 NumberOfMajorSurgeries 986 non-null int64
10 PremiumPrice 986 non-null int64
dtypes: int64(11)
memory usage: 84.9 KB
<jupyter_text>Examples:
{
"Age": 45,
"Diabetes": 0,
"BloodPressureProblems": 0,
"AnyTransplants": 0,
"AnyChronicDiseases": 0,
"Height": 155,
"Weight": 57,
"KnownAllergies": 0,
"HistoryOfCancerInFamily": 0,
"NumberOfMajorSurgeries": 0,
"PremiumPrice": 25000
}
{
"Age": 60,
"Diabetes": 1,
"BloodPressureProblems": 0,
"AnyTransplants": 0,
"AnyChronicDiseases": 0,
"Height": 180,
"Weight": 73,
"KnownAllergies": 0,
"HistoryOfCancerInFamily": 0,
"NumberOfMajorSurgeries": 0,
"PremiumPrice": 29000
}
{
"Age": 36,
"Diabetes": 1,
"BloodPressureProblems": 1,
"AnyTransplants": 0,
"AnyChronicDiseases": 0,
"Height": 158,
"Weight": 59,
"KnownAllergies": 0,
"HistoryOfCancerInFamily": 0,
"NumberOfMajorSurgeries": 1,
"PremiumPrice": 23000
}
{
"Age": 52,
"Diabetes": 1,
"BloodPressureProblems": 1,
"AnyTransplants": 0,
"AnyChronicDiseases": 1,
"Height": 183,
"Weight": 93,
"KnownAllergies": 0,
"HistoryOfCancerInFamily": 0,
"NumberOfMajorSurgeries": 2,
"PremiumPrice": 28000
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from plotly.offline import init_notebook_mode, iplot, plot
import plotly.graph_objs as go
import plotly.express as px
import seaborn as sns
from matplotlib import pyplot as plt
from plotly.subplots import make_subplots
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Objective:**
# 1. Perform EDA
# 1. Data Cleanup And Prepration
# 1. Build Model
#
df = pd.read_csv(
"/kaggle/input/medical-insurance-premium-prediction/Medicalpremium.csv"
)
df.shape
# **Basic Stats about the data:**
df.describe().T
df.isna().sum()
# # Feature Engineering:
def conditions(s):
if (
(s["Diabetes"] != 0)
or (s["BloodPressureProblems"] != 0)
or (s["AnyTransplants"] != 0)
or (s["AnyChronicDiseases"] != 0)
or (s["KnownAllergies"] != 0)
or (s["HistoryOfCancerInFamily"] != 0)
or (s["NumberOfMajorSurgeries"] > 1)
or (np.any(s["BMI"] == list(range(19, 25))))
):
return 1
else:
return 0
# Adding few other features that I can think of and can be derived using existing data.
# 1. **Body Mass Index (BMI)**
# 1. **Overall Customer's:** Customer's overall health status including BMI. Value 1 is assigned to customer's having any medical condition or if BMI is outside normal range and value 0 is assigned in rest of the cases.
# Calculating BMI
df["BMI"] = (df["Weight"] / df["Height"] ** 2) * 10000
# Creating a new variable to see number of customers who are totally fit vs others
df["anymedicalcondition"] = df.apply(conditions, axis=1)
# # EDA
# **Checking if BMI has any impact on Premium Price:**
fig = go.Figure()
fig.add_trace(go.Scatter(x=df["PremiumPrice"], y=df["BMI"], mode="markers"))
fig.update_layout(title="Premium Price (INR) Vs BMI")
fig.update_xaxes(title="Premium Price (INR)")
fig.update_yaxes(title="BMI")
fig.show()
# It's quite clear from the chart, we don’t see any specific patter which either indicates positive or negative impact of BMI on Premium Price.
# **Looking at Age distribution of Customer by Diabetes condition**
# fig = go.Figure()
# fig.add_trace(go.Histogram(x=df.loc[df['Diabetes']!=1,'Age'],name='Age Dist (without Diabetes)'))
# fig.add_trace(go.Histogram(x=df.loc[df['Diabetes']==1,'Age'],name='Age Dist (with Diabetes)'))
# fig.update_layout(barmode='overlay',title = 'Age Distribution For Diabetic Vs Non Diabetic Customers')
# fig.update_xaxes(title='Age')
# fig.update_yaxes(title='Count')
# # Reduce opacity to see both histograms
# fig.update_traces(opacity=0.75)
# fig.show()
this_figure = make_subplots(rows=1, cols=2)
fig1 = px.box(df, x="Diabetes", y="Age")
figure2_traces = []
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.add_trace(
go.Histogram(
x=df.loc[df["Diabetes"] != 1, "Age"], name="Age Dist (without Diabetes)"
),
row=1,
col=1,
)
this_figure.add_trace(
go.Histogram(x=df.loc[df["Diabetes"] == 1, "Age"], name="Age Dist (with Diabetes)"),
row=1,
col=1,
)
this_figure.update_layout(
title="Age Distribution of Diabetes and Non Diabetic customers"
)
this_figure.update_xaxes(title="Age (in years)", row=1, col=1)
this_figure.update_xaxes(title="Diabetic?", row=1, col=2)
this_figure.update_yaxes(title="Age", row=1, col=2)
this_figure.update_yaxes(title="Count", row=1, col=1)
this_figure.show()
# Above histogram clearly shows our dataset consists of high number of customers having diabetes with higher age.
def CorrMtx(df, dropDuplicates=True):
df = df.corr()
# Exclude duplicate correlations by masking uper right values
if dropDuplicates:
mask = np.zeros_like(df, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set background color / chart style
sns.set_style(style="white")
# Set up matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Add diverging colormap from red to blue
cmap = sns.diverging_palette(250, 10, as_cmap=True)
# Draw correlation plot with or without duplicates
if dropDuplicates:
sns.heatmap(
df,
mask=mask,
cmap=cmap,
annot=True,
square=True,
linewidth=0.5,
cbar_kws={"shrink": 0.5},
ax=ax,
)
else:
sns.heatmap(
df,
cmap=cmap,
annot=True,
square=True,
linewidth=0.5,
cbar_kws={"shrink": 0.5},
ax=ax,
)
CorrMtx(df.corr())
# **Few key insights that we can observe from above chart are:**
# 1. Premium Price is highly influenced by Age of the customer, Correlation coefficient is **+0.85** shows very high positive relationship.
# 1. Premium Price is also slightly influenced by Number of Surgeries customer has had in past, Correlation coefficient is **+0.43**.
# 1. Our engineered feature anymedicalcondition also shows positive correlation of **+0.4** (which is obvious as we derived it from combination of other features)
# *On a side note: Premium Price shows very slight negative correlation for few variables which can be ignored but seems interesting to explore.*
fig = px.scatter(df, x="Age", y="PremiumPrice", color="Age", trendline="lowess")
fig.update_layout(title="Age Vs Premium Price")
fig.show()
fig = px.scatter(
df,
x="NumberOfMajorSurgeries",
y="PremiumPrice",
color="NumberOfMajorSurgeries",
trendline="ols",
)
fig1 = px.box(df, x="NumberOfMajorSurgeries", y="PremiumPrice")
figure1_traces = []
figure2_traces = []
for trace in range(len(fig["data"])):
figure1_traces.append(fig["data"][trace])
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
this_figure = make_subplots(rows=1, cols=2)
for traces in figure1_traces:
this_figure.append_trace(traces, row=1, col=1)
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.update_layout(title="Number of Major Surgeries Vs Premium Price")
this_figure.update_xaxes(title="Number of Major Surgeries")
this_figure.update_yaxes(title="Premium Price (INR)")
this_figure.show()
fig = px.scatter(
df, x="anymedicalcondition", y="PremiumPrice", color="Age", trendline="ols"
)
fig.update_layout(title="Medical Conditions Vs Premium Price")
fig = px.scatter(
df,
x="anymedicalcondition",
y="PremiumPrice",
color="NumberOfMajorSurgeries",
trendline="ols",
)
fig1 = px.box(df, x="anymedicalcondition", y="PremiumPrice")
figure1_traces = []
figure2_traces = []
for trace in range(len(fig["data"])):
figure1_traces.append(fig["data"][trace])
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
this_figure = make_subplots(rows=1, cols=2)
for traces in figure1_traces:
this_figure.append_trace(traces, row=1, col=1)
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.update_layout(title="Medical Condition Vs Premium Price")
this_figure.update_xaxes(title="Medical Condition")
this_figure.update_yaxes(title="Premium Price (INR)")
this_figure.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/814/69814841.ipynb
|
medical-insurance-premium-prediction
|
tejashvi14
|
[{"Id": 69814841, "ScriptId": 19018350, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3824922, "CreationDate": "08/03/2021 11:58:36", "VersionNumber": 1.0, "Title": "Medical Premium EDA", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 199.0, "LinesInsertedFromPrevious": 199.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93268660, "KernelVersionId": 69814841, "SourceDatasetVersionId": 2490565}]
|
[{"Id": 2490565, "DatasetId": 1507683, "DatasourceVersionId": 2533140, "CreatorUserId": 5472192, "LicenseName": "CC0: Public Domain", "CreationDate": "08/02/2021 07:49:44", "VersionNumber": 1.0, "Title": "Medical Insurance Premium Prediction", "Slug": "medical-insurance-premium-prediction", "Subtitle": "Predict Yearly Medical Cover Cost(\u20b9)", "Description": "### Context\n\nA Medical Insurance Company Has Released Data For Almost 1000 Customers. Create A Model That Predicts The Yearly Medical Cover Cost. The Data Is Voluntarily Given By Customers.\n\n\n### Content\n\nThe Dataset Contains Health Related Parameters Of The Customers. Use Them To Build A Model And Also Perform EDA On The Same. \nThe Premium Price Is In INR(\u20b9) Currency And Showcases Prices For A Whole Year.\n\n### Inspiration\n\nHelp Solve A Crucial Finance Problem That Would Potentially Impact Many People And Would Help Them Make Better Decisions.\nDon't Forget To Submit Your EDAs And Models In The Task Section. These Will Be Keenly Reviewed\nHope You Enjoy Working On The Data.\n\nImage Credits-Unsplash", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1507683, "CreatorUserId": 5472192, "OwnerUserId": 5472192.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2497017.0, "CurrentDatasourceVersionId": 2539640.0, "ForumId": 1527430, "Type": 2, "CreationDate": "08/02/2021 07:49:44", "LastActivityDate": "08/02/2021", "TotalViews": 47770, "TotalDownloads": 5196, "TotalVotes": 88, "TotalKernels": 17}]
|
[{"Id": 5472192, "UserName": "tejashvi14", "DisplayName": "Tejashvi", "RegisterDate": "07/15/2020", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from plotly.offline import init_notebook_mode, iplot, plot
import plotly.graph_objs as go
import plotly.express as px
import seaborn as sns
from matplotlib import pyplot as plt
from plotly.subplots import make_subplots
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Objective:**
# 1. Perform EDA
# 1. Data Cleanup And Prepration
# 1. Build Model
#
df = pd.read_csv(
"/kaggle/input/medical-insurance-premium-prediction/Medicalpremium.csv"
)
df.shape
# **Basic Stats about the data:**
df.describe().T
df.isna().sum()
# # Feature Engineering:
def conditions(s):
if (
(s["Diabetes"] != 0)
or (s["BloodPressureProblems"] != 0)
or (s["AnyTransplants"] != 0)
or (s["AnyChronicDiseases"] != 0)
or (s["KnownAllergies"] != 0)
or (s["HistoryOfCancerInFamily"] != 0)
or (s["NumberOfMajorSurgeries"] > 1)
or (np.any(s["BMI"] == list(range(19, 25))))
):
return 1
else:
return 0
# Adding few other features that I can think of and can be derived using existing data.
# 1. **Body Mass Index (BMI)**
# 1. **Overall Customer's:** Customer's overall health status including BMI. Value 1 is assigned to customer's having any medical condition or if BMI is outside normal range and value 0 is assigned in rest of the cases.
# Calculating BMI
df["BMI"] = (df["Weight"] / df["Height"] ** 2) * 10000
# Creating a new variable to see number of customers who are totally fit vs others
df["anymedicalcondition"] = df.apply(conditions, axis=1)
# # EDA
# **Checking if BMI has any impact on Premium Price:**
fig = go.Figure()
fig.add_trace(go.Scatter(x=df["PremiumPrice"], y=df["BMI"], mode="markers"))
fig.update_layout(title="Premium Price (INR) Vs BMI")
fig.update_xaxes(title="Premium Price (INR)")
fig.update_yaxes(title="BMI")
fig.show()
# It's quite clear from the chart, we don’t see any specific patter which either indicates positive or negative impact of BMI on Premium Price.
# **Looking at Age distribution of Customer by Diabetes condition**
# fig = go.Figure()
# fig.add_trace(go.Histogram(x=df.loc[df['Diabetes']!=1,'Age'],name='Age Dist (without Diabetes)'))
# fig.add_trace(go.Histogram(x=df.loc[df['Diabetes']==1,'Age'],name='Age Dist (with Diabetes)'))
# fig.update_layout(barmode='overlay',title = 'Age Distribution For Diabetic Vs Non Diabetic Customers')
# fig.update_xaxes(title='Age')
# fig.update_yaxes(title='Count')
# # Reduce opacity to see both histograms
# fig.update_traces(opacity=0.75)
# fig.show()
this_figure = make_subplots(rows=1, cols=2)
fig1 = px.box(df, x="Diabetes", y="Age")
figure2_traces = []
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.add_trace(
go.Histogram(
x=df.loc[df["Diabetes"] != 1, "Age"], name="Age Dist (without Diabetes)"
),
row=1,
col=1,
)
this_figure.add_trace(
go.Histogram(x=df.loc[df["Diabetes"] == 1, "Age"], name="Age Dist (with Diabetes)"),
row=1,
col=1,
)
this_figure.update_layout(
title="Age Distribution of Diabetes and Non Diabetic customers"
)
this_figure.update_xaxes(title="Age (in years)", row=1, col=1)
this_figure.update_xaxes(title="Diabetic?", row=1, col=2)
this_figure.update_yaxes(title="Age", row=1, col=2)
this_figure.update_yaxes(title="Count", row=1, col=1)
this_figure.show()
# Above histogram clearly shows our dataset consists of high number of customers having diabetes with higher age.
def CorrMtx(df, dropDuplicates=True):
df = df.corr()
# Exclude duplicate correlations by masking uper right values
if dropDuplicates:
mask = np.zeros_like(df, dtype=np.bool)
mask[np.triu_indices_from(mask)] = True
# Set background color / chart style
sns.set_style(style="white")
# Set up matplotlib figure
f, ax = plt.subplots(figsize=(11, 9))
# Add diverging colormap from red to blue
cmap = sns.diverging_palette(250, 10, as_cmap=True)
# Draw correlation plot with or without duplicates
if dropDuplicates:
sns.heatmap(
df,
mask=mask,
cmap=cmap,
annot=True,
square=True,
linewidth=0.5,
cbar_kws={"shrink": 0.5},
ax=ax,
)
else:
sns.heatmap(
df,
cmap=cmap,
annot=True,
square=True,
linewidth=0.5,
cbar_kws={"shrink": 0.5},
ax=ax,
)
CorrMtx(df.corr())
# **Few key insights that we can observe from above chart are:**
# 1. Premium Price is highly influenced by Age of the customer, Correlation coefficient is **+0.85** shows very high positive relationship.
# 1. Premium Price is also slightly influenced by Number of Surgeries customer has had in past, Correlation coefficient is **+0.43**.
# 1. Our engineered feature anymedicalcondition also shows positive correlation of **+0.4** (which is obvious as we derived it from combination of other features)
# *On a side note: Premium Price shows very slight negative correlation for few variables which can be ignored but seems interesting to explore.*
fig = px.scatter(df, x="Age", y="PremiumPrice", color="Age", trendline="lowess")
fig.update_layout(title="Age Vs Premium Price")
fig.show()
fig = px.scatter(
df,
x="NumberOfMajorSurgeries",
y="PremiumPrice",
color="NumberOfMajorSurgeries",
trendline="ols",
)
fig1 = px.box(df, x="NumberOfMajorSurgeries", y="PremiumPrice")
figure1_traces = []
figure2_traces = []
for trace in range(len(fig["data"])):
figure1_traces.append(fig["data"][trace])
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
this_figure = make_subplots(rows=1, cols=2)
for traces in figure1_traces:
this_figure.append_trace(traces, row=1, col=1)
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.update_layout(title="Number of Major Surgeries Vs Premium Price")
this_figure.update_xaxes(title="Number of Major Surgeries")
this_figure.update_yaxes(title="Premium Price (INR)")
this_figure.show()
fig = px.scatter(
df, x="anymedicalcondition", y="PremiumPrice", color="Age", trendline="ols"
)
fig.update_layout(title="Medical Conditions Vs Premium Price")
fig = px.scatter(
df,
x="anymedicalcondition",
y="PremiumPrice",
color="NumberOfMajorSurgeries",
trendline="ols",
)
fig1 = px.box(df, x="anymedicalcondition", y="PremiumPrice")
figure1_traces = []
figure2_traces = []
for trace in range(len(fig["data"])):
figure1_traces.append(fig["data"][trace])
for trace in range(len(fig1["data"])):
figure2_traces.append(fig1["data"][trace])
this_figure = make_subplots(rows=1, cols=2)
for traces in figure1_traces:
this_figure.append_trace(traces, row=1, col=1)
for traces in figure2_traces:
this_figure.append_trace(traces, row=1, col=2)
this_figure.update_layout(title="Medical Condition Vs Premium Price")
this_figure.update_xaxes(title="Medical Condition")
this_figure.update_yaxes(title="Premium Price (INR)")
this_figure.show()
|
[{"medical-insurance-premium-prediction/Medicalpremium.csv": {"column_names": "[\"Age\", \"Diabetes\", \"BloodPressureProblems\", \"AnyTransplants\", \"AnyChronicDiseases\", \"Height\", \"Weight\", \"KnownAllergies\", \"HistoryOfCancerInFamily\", \"NumberOfMajorSurgeries\", \"PremiumPrice\"]", "column_data_types": "{\"Age\": \"int64\", \"Diabetes\": \"int64\", \"BloodPressureProblems\": \"int64\", \"AnyTransplants\": \"int64\", \"AnyChronicDiseases\": \"int64\", \"Height\": \"int64\", \"Weight\": \"int64\", \"KnownAllergies\": \"int64\", \"HistoryOfCancerInFamily\": \"int64\", \"NumberOfMajorSurgeries\": \"int64\", \"PremiumPrice\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 986 entries, 0 to 985\nData columns (total 11 columns):\n # Column Non-Null Count Dtype\n--- ------ -------------- -----\n 0 Age 986 non-null int64\n 1 Diabetes 986 non-null int64\n 2 BloodPressureProblems 986 non-null int64\n 3 AnyTransplants 986 non-null int64\n 4 AnyChronicDiseases 986 non-null int64\n 5 Height 986 non-null int64\n 6 Weight 986 non-null int64\n 7 KnownAllergies 986 non-null int64\n 8 HistoryOfCancerInFamily 986 non-null int64\n 9 NumberOfMajorSurgeries 986 non-null int64\n 10 PremiumPrice 986 non-null int64\ndtypes: int64(11)\nmemory usage: 84.9 KB\n", "summary": "{\"Age\": {\"count\": 986.0, \"mean\": 41.74543610547667, \"std\": 13.963371389855682, \"min\": 18.0, \"25%\": 30.0, \"50%\": 42.0, \"75%\": 53.0, \"max\": 66.0}, \"Diabetes\": {\"count\": 986.0, \"mean\": 0.4198782961460446, \"std\": 0.49378922875252945, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"BloodPressureProblems\": {\"count\": 986.0, \"mean\": 0.4685598377281947, \"std\": 0.49926377774285313, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"AnyTransplants\": {\"count\": 986.0, \"mean\": 0.055780933062880324, \"std\": 0.22961465994678726, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"AnyChronicDiseases\": {\"count\": 986.0, \"mean\": 0.18052738336713997, \"std\": 0.3848213056997442, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"Height\": {\"count\": 986.0, \"mean\": 168.18255578093306, \"std\": 10.098154827654469, \"min\": 145.0, \"25%\": 161.0, \"50%\": 168.0, \"75%\": 176.0, \"max\": 188.0}, \"Weight\": {\"count\": 986.0, \"mean\": 76.95030425963489, \"std\": 14.265095839082017, \"min\": 51.0, \"25%\": 67.0, \"50%\": 75.0, \"75%\": 87.0, \"max\": 132.0}, \"KnownAllergies\": {\"count\": 986.0, \"mean\": 0.2150101419878296, \"std\": 0.41103787158451843, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"HistoryOfCancerInFamily\": {\"count\": 986.0, \"mean\": 0.11764705882352941, \"std\": 0.3223532463115337, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"NumberOfMajorSurgeries\": {\"count\": 986.0, \"mean\": 0.6673427991886409, \"std\": 0.749204951277794, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 3.0}, \"PremiumPrice\": {\"count\": 986.0, \"mean\": 24336.713995943206, \"std\": 6248.184382239677, \"min\": 15000.0, \"25%\": 21000.0, \"50%\": 23000.0, \"75%\": 28000.0, \"max\": 40000.0}}", "examples": "{\"Age\":{\"0\":45,\"1\":60,\"2\":36,\"3\":52},\"Diabetes\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"BloodPressureProblems\":{\"0\":0,\"1\":0,\"2\":1,\"3\":1},\"AnyTransplants\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"AnyChronicDiseases\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"Height\":{\"0\":155,\"1\":180,\"2\":158,\"3\":183},\"Weight\":{\"0\":57,\"1\":73,\"2\":59,\"3\":93},\"KnownAllergies\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"HistoryOfCancerInFamily\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"NumberOfMajorSurgeries\":{\"0\":0,\"1\":0,\"2\":1,\"3\":2},\"PremiumPrice\":{\"0\":25000,\"1\":29000,\"2\":23000,\"3\":28000}}"}}]
| true | 1 |
<start_data_description><data_path>medical-insurance-premium-prediction/Medicalpremium.csv:
<column_names>
['Age', 'Diabetes', 'BloodPressureProblems', 'AnyTransplants', 'AnyChronicDiseases', 'Height', 'Weight', 'KnownAllergies', 'HistoryOfCancerInFamily', 'NumberOfMajorSurgeries', 'PremiumPrice']
<column_types>
{'Age': 'int64', 'Diabetes': 'int64', 'BloodPressureProblems': 'int64', 'AnyTransplants': 'int64', 'AnyChronicDiseases': 'int64', 'Height': 'int64', 'Weight': 'int64', 'KnownAllergies': 'int64', 'HistoryOfCancerInFamily': 'int64', 'NumberOfMajorSurgeries': 'int64', 'PremiumPrice': 'int64'}
<dataframe_Summary>
{'Age': {'count': 986.0, 'mean': 41.74543610547667, 'std': 13.963371389855682, 'min': 18.0, '25%': 30.0, '50%': 42.0, '75%': 53.0, 'max': 66.0}, 'Diabetes': {'count': 986.0, 'mean': 0.4198782961460446, 'std': 0.49378922875252945, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'BloodPressureProblems': {'count': 986.0, 'mean': 0.4685598377281947, 'std': 0.49926377774285313, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'AnyTransplants': {'count': 986.0, 'mean': 0.055780933062880324, 'std': 0.22961465994678726, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'AnyChronicDiseases': {'count': 986.0, 'mean': 0.18052738336713997, 'std': 0.3848213056997442, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'Height': {'count': 986.0, 'mean': 168.18255578093306, 'std': 10.098154827654469, 'min': 145.0, '25%': 161.0, '50%': 168.0, '75%': 176.0, 'max': 188.0}, 'Weight': {'count': 986.0, 'mean': 76.95030425963489, 'std': 14.265095839082017, 'min': 51.0, '25%': 67.0, '50%': 75.0, '75%': 87.0, 'max': 132.0}, 'KnownAllergies': {'count': 986.0, 'mean': 0.2150101419878296, 'std': 0.41103787158451843, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'HistoryOfCancerInFamily': {'count': 986.0, 'mean': 0.11764705882352941, 'std': 0.3223532463115337, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'NumberOfMajorSurgeries': {'count': 986.0, 'mean': 0.6673427991886409, 'std': 0.749204951277794, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 3.0}, 'PremiumPrice': {'count': 986.0, 'mean': 24336.713995943206, 'std': 6248.184382239677, 'min': 15000.0, '25%': 21000.0, '50%': 23000.0, '75%': 28000.0, 'max': 40000.0}}
<dataframe_info>
RangeIndex: 986 entries, 0 to 985
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Age 986 non-null int64
1 Diabetes 986 non-null int64
2 BloodPressureProblems 986 non-null int64
3 AnyTransplants 986 non-null int64
4 AnyChronicDiseases 986 non-null int64
5 Height 986 non-null int64
6 Weight 986 non-null int64
7 KnownAllergies 986 non-null int64
8 HistoryOfCancerInFamily 986 non-null int64
9 NumberOfMajorSurgeries 986 non-null int64
10 PremiumPrice 986 non-null int64
dtypes: int64(11)
memory usage: 84.9 KB
<some_examples>
{'Age': {'0': 45, '1': 60, '2': 36, '3': 52}, 'Diabetes': {'0': 0, '1': 1, '2': 1, '3': 1}, 'BloodPressureProblems': {'0': 0, '1': 0, '2': 1, '3': 1}, 'AnyTransplants': {'0': 0, '1': 0, '2': 0, '3': 0}, 'AnyChronicDiseases': {'0': 0, '1': 0, '2': 0, '3': 1}, 'Height': {'0': 155, '1': 180, '2': 158, '3': 183}, 'Weight': {'0': 57, '1': 73, '2': 59, '3': 93}, 'KnownAllergies': {'0': 0, '1': 0, '2': 0, '3': 0}, 'HistoryOfCancerInFamily': {'0': 0, '1': 0, '2': 0, '3': 0}, 'NumberOfMajorSurgeries': {'0': 0, '1': 0, '2': 1, '3': 2}, 'PremiumPrice': {'0': 25000, '1': 29000, '2': 23000, '3': 28000}}
<end_description>
| 2,414 | 0 | 3,404 | 2,414 |
69814326
|
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
import pandas as pd
import numpy as np
import xgboost as xgb
np.random.seed(2018)
import numpy as np
def apk(actual, predicted, k=7, default=0.0):
# MAP@7 이므로, 최대 7개만 사용한다
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
# 점수를 부여하는 조건은 다음과 같다 :
# 예측값이 정답에 있고 (‘p in actual’)
# 예측값이 중복이 아니면 (‘p not in predicted[:i]’)
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
# 정답값이 공백일 경우, 무조건 0.0점을 반환한다
if not actual:
return default
# 정답의 개수(len(actual))로 average precision을 구한다
return score / min(len(actual), k)
def mapk(actual, predicted, k=7, default=0.0):
# list of list인 정답값(actual)과 예측값(predicted)에서 고객별 Average Precision을 구하고, np.mean()을 통해 평균을 계산한다
return np.mean([apk(a, p, k, default) for a, p in zip(actual, predicted)])
# 데이터를 불러온다.
trn = pd.read_csv("../input/train_ver2.csv")
tst = pd.read_csv("../input/test_ver2.csv")
trn.shape
# import pickle
# import gzip
# # save
# # google drive 용량이 모자라서 trn.pickle 이 저장 되지 않았다. 그래서 구글 one dirive 가입해서 해결 함.
# # with open('../model/trn.pickle', 'wb') as f:
# # pickle.dump(trn, f, pickle.HIGHEST_PROTOCOL)
# # with open('../model/tst.pickle', 'wb') as f:
# # pickle.dump(tst, f, pickle.HIGHEST_PROTOCOL)
# load
# with open('../model/trn.pickle', 'rb') as f:
# trn = pickle.load(f)
# with open('../model/tst.pickle', 'rb') as f:
# tst = pickle.load(f)
## 데이터 전처리 ##
# 제품 변수를 별도로 저장해 놓는다.
prods = trn.columns[24:].tolist()
# 제품 변수 결측값을 미리 0으로 대체한다.
trn[prods] = trn[prods].fillna(0.0).astype(np.int8)
# 24개 제품 중 하나도 보유하지 않는 고객 데이터를 제거한다.
no_product = trn[prods].sum(axis=1) == 0
trn = trn[~no_product]
# 훈련 데이터와 테스트 데이터를 통합한다. 테스트 데이터에 없는 제품 변수는 0으로 채운다.
for col in trn.columns[24:]:
tst[col] = 0
df = pd.concat([trn, tst], axis=0)
del trn, tst
# 학습에 사용할 변수를 담는 list이다.
features = []
# 범주형 변수를 .factorize() 함수를 통해 label encoding한다.
categorical_cols = [
"ind_empleado",
"pais_residencia",
"sexo",
"tiprel_1mes",
"indresi",
"indext",
"conyuemp",
"canal_entrada",
"indfall",
"tipodom",
"nomprov",
"segmento",
]
for col in categorical_cols:
df[col], _ = df[col].factorize(na_sentinel=-99)
features += categorical_cols
# 수치형 변수의 특이값과 결측값을 -99로 대체하고, 정수형으로 변환한다.
df["age"].replace(" NA", -99, inplace=True)
df["age"] = df["age"].astype(np.int8)
df["antiguedad"].replace(" NA", -99, inplace=True)
df["antiguedad"] = df["antiguedad"].astype(np.int8)
df["renta"].replace(" NA", -99, inplace=True)
df["renta"].fillna(-99, inplace=True)
df["renta"] = df["renta"].astype(float).astype(np.int8)
df["indrel_1mes"].replace("P", 5, inplace=True)
df["indrel_1mes"].fillna(-99, inplace=True)
df["indrel_1mes"] = df["indrel_1mes"].astype(float).astype(np.int8)
# 학습에 사용할 수치형 변수를 features에 추구한다.
features += [
"age",
"antiguedad",
"renta",
"ind_nuevo",
"indrel",
"indrel_1mes",
"ind_actividad_cliente",
]
# (피쳐 엔지니어링) 두 날짜 변수에서 연도와 월 정보를 추출한다.
df["fecha_alta_month"] = (
df["fecha_alta"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[1]))
.astype(np.int8)
)
df["fecha_alta_year"] = (
df["fecha_alta"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[0]))
.astype(np.int16)
)
features += ["fecha_alta_month", "fecha_alta_year"]
df["ult_fec_cli_1t_month"] = (
df["ult_fec_cli_1t"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[1]))
.astype(np.int8)
)
df["ult_fec_cli_1t_year"] = (
df["ult_fec_cli_1t"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[0]))
.astype(np.int16)
)
features += ["ult_fec_cli_1t_month", "ult_fec_cli_1t_year"]
# 그 외 변수의 결측값은 모두 -99로 대체한다.
df.fillna(-99, inplace=True)
# (피쳐 엔지니어링) lag-1 데이터를 생성한다.
# 코드 2-12와 유사한 코드 흐름이다.
# 날짜를 숫자로 변환하는 함수이다. 2015-01-28은 1, 2016-06-28은 18로 변환된다
def date_to_int(str_date):
Y, M, D = [int(a) for a in str_date.strip().split("-")]
int_date = (int(Y) - 2015) * 12 + int(M)
return int_date
# 날짜를 숫자로 변환하여 int_date에 저장한다
df["int_date"] = df["fecha_dato"].map(date_to_int).astype(np.int8)
# 데이터를 복사하고, int_date 날짜에 1을 더하여 lag를 생성한다. 변수명에 _prev를 추가한다.
df_lag = df.copy()
df_lag.columns = [
col + "_prev" if col not in ["ncodpers", "int_date"] else col for col in df.columns
]
df_lag["int_date"] += 1
# 원본 데이터와 lag 데이터를 ncodper와 int_date 기준으로 합친다. Lag 데이터의 int_date는 1 밀려 있기 때문에, 저번 달의 제품 정보가 삽입된다.
df_trn = df.merge(df_lag, on=["ncodpers", "int_date"], how="left")
# 메모리 효율을 위해 불필요한 변수를 메모리에서 제거한다
del df, df_lag
# 저번 달의 제품 정보가 존재하지 않을 경우를 대비하여 0으로 대체한다.
for prod in prods:
prev = prod + "_prev"
df_trn[prev].fillna(0, inplace=True)
df_trn.fillna(-99, inplace=True)
# lag-1 변수를 추가한다.
features += [feature + "_prev" for feature in features]
features += [prod + "_prev" for prod in prods]
import pickle
with open("../model/df_trn.pickle", "wb") as f:
pickle.dump(df_trn, f, pickle.HIGHEST_PROTOCOL)
###
### Baseline 모델 이후, 다양한 피쳐 엔지니어링을 여기에 추가한다.
###
## 모델 학습
# 학습을 위하여 데이터를 훈련, 테스트용으로 분리한다.
# 학습에는 2016-01-28 ~ 2016-04-28 데이터만 사용하고, 검증에는 2016-05-28 데이터를 사용한다.
use_dates = ["2016-01-28", "2016-02-28", "2016-03-28", "2016-04-28", "2016-05-28"]
trn = df_trn[df_trn["fecha_dato"].isin(use_dates)]
tst = df_trn[df_trn["fecha_dato"] == "2016-06-28"]
del df_trn
# 훈련 데이터에서 신규 구매 건수만 추출한다.
X = []
Y = []
for i, prod in enumerate(prods):
prev = prod + "_prev"
prX = trn[(trn[prod] == 1) & (trn[prev] == 0)]
prY = np.zeros(prX.shape[0], dtype=np.int8) + i
X.append(prX)
Y.append(prY)
XY = pd.concat(X)
Y = np.hstack(Y)
XY["y"] = Y
# 훈련, 검증 데이터로 분리한다.
vld_date = "2016-05-28"
XY_trn = XY[XY["fecha_dato"] != vld_date]
XY_vld = XY[XY["fecha_dato"] == vld_date]
# XGBoost 모델 parameter를 설정한다.
param = {
"booster": "gbtree",
"max_depth": 8,
"nthread": 4,
"num_class": len(prods),
"objective": "multi:softprob",
"silent": 1,
"eval_metric": "mlogloss",
"eta": 0.1,
"min_child_weight": 10,
"colsample_bytree": 0.8,
"colsample_bylevel": 0.9,
"seed": 2018,
}
# 훈련, 검증 데이터를 XGBoost 형태로 변환한다.
# X_trn = XY_trn.as_matrix(columns=features)
# Y_trn = XY_trn.as_matrix(columns=['y'])
# DataFrame 을 넙파이 형식으로 변환시킨다.
X_trn = XY_trn[features].values
Y_trn = XY_trn["y"].values
dtrn = xgb.DMatrix(X_trn, label=Y_trn, feature_names=features)
X_vld = XY_vld[features].values
Y_vld = XY_vld["y"].values
dvld = xgb.DMatrix(X_vld, label=Y_vld, feature_names=features)
# XGBoost 모델을 훈련 데이터로 학습한다!
watch_list = [(dtrn, "train"), (dvld, "eval")]
model = xgb.train(
param, dtrn, num_boost_round=1000, evals=watch_list, early_stopping_rounds=20
)
# 학습한 모델을 저장한다.
import pickle
pickle.dump(model, open("../model/xgb.baseline.pkl", "wb"))
best_ntree_limit = model.best_ntree_limit
# # MAP@7 평가 척도를 위한 준비작업이다.
# # 고객 식별 번호를 추출한다.
# vld = trn[trn['fecha_dato'] == vld_date]
# ncodpers_vld = vld['ncodpers'].values
# # 검증 데이터에서 신규 구매를 구한다.
# for prod in prods:
# prev = prod + '_prev'
# padd = prod + '_add'
# vld[padd] = vld[prod] - vld[prev]
# # add_vld = vld[prod + '_add' for prod in prods].values
# e = [prod + '_add' for prod in prods]
# add_vld = vld[e].values
# add_vld_list = [list() for i in range(len(ncodpers_vld))]
# # 고객별 신규 구매 정답 값을 add_vld_list에 저장하고, 총 count를 count_vld에 저장한다.
# count_vld = 0
# for ncodper in range(len(ncodpers_vld)):
# for prod in range(len(prods)):
# if add_vld[ncodper, prod] > 0:
# add_vld_list[ncodper].append(prod)
# count_vld += 1
# # 검증 데이터에서 얻을 수 있는 MAP@7 최고점을 미리 구한다. (0.042663)
# print(mapk(add_vld_list, add_vld_list, 7, 0.0))
# # 검증 데이터에 대한 예측 값을 구한다.
# X_vld = vld[features].values
# Y_vld = vld['y'].values
# dvld = xgb.DMatrix(X_vld, label=Y_vld, feature_names=features)
# preds_vld = model.predict(dvld, ntree_limit=best_ntree_limit)
# # 저번 달에 보유한 제품은 신규 구매가 불가하기 때문에, 확률값에서 미리 1을 빼준다
# f = [prod + '_prev' for prod in prods]
# preds_vld = preds_vld - vld[f].values
# # 검증 데이터 예측 상위 7개를 추출한다.
# result_vld = []
# for ncodper, pred in zip(ncodpers_vld, preds_vld):
# y_prods = [(y,p,ip) for y,p,ip in zip(pred, prods, range(len(prods)))]
# y_prods = sorted(y_prods, key=lambda a: a[0], reverse=True)[:7]
# result_vld.append([ip for y,p,ip in y_prods])
# # 검증 데이터에서의 MAP@7 점수를 구한다. (0.036466)
# print(mapk(add_vld_list, result_vld, 7, 0.0))
# XGBoost 모델을 전체 훈련 데이터로 재학습한다!
X_all = XY[features].values
Y_all = XY["y"].values
dall = xgb.DMatrix(X_all, label=Y_all, feature_names=features)
watch_list = [(dall, "train")]
# 트리 개수를 늘어난 데이터 양만큼 비례해서 증가한다.
best_ntree_limit = int(best_ntree_limit * (len(XY_trn) + len(XY_vld)) / len(XY_trn))
# XGBoost 모델 재학습!
model = xgb.train(param, dall, num_boost_round=best_ntree_limit, evals=watch_list)
# 변수 중요도를 출력해본다. 예상하던 변수가 상위로 올라와 있는가?
print("Feature importance:")
for kv in sorted(
[(k, v) for k, v in model.get_fscore().items()], key=lambda kv: kv[1], reverse=True
):
print(kv)
# 캐글 제출을 위하여 테스트 데이터에 대한 예측 값을 구한다.
X_tst = tst[features].values
dtst = xgb.DMatrix(X_tst, feature_names=features)
preds_tst = model.predict(dtst, ntree_limit=best_ntree_limit)
ncodpers_tst = tst["ncodpers"].values
columns = [prod + "_prev" for prod in prods]
preds_tst = preds_tst - tst[columns].values
# 제출 파일을 생성한다.
submit_file = open("../model/xgb.baseline.2021-07-31", "w")
submit_file.write("ncodpers,added_products\n")
for ncodper, pred in zip(ncodpers_tst, preds_tst):
y_prods = [(y, p, ip) for y, p, ip in zip(pred, prods, range(len(prods)))]
y_prods = sorted(y_prods, key=lambda a: a[0], reverse=True)[:7]
y_prods = [p for y, p, ip in y_prods]
submit_file.write("{},{}\n".format(int(ncodper), " ".join(y_prods)))
ls
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/814/69814326.ipynb
| null | null |
[{"Id": 69814326, "ScriptId": 18739882, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2074454, "CreationDate": "08/03/2021 11:55:50", "VersionNumber": 3.0, "Title": "Santander_product_recommendation_ XGBoost", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 321.0, "LinesInsertedFromPrevious": 303.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 18.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from google.colab import drive
drive.mount("/content/gdrive", force_remount=True)
import pandas as pd
import numpy as np
import xgboost as xgb
np.random.seed(2018)
import numpy as np
def apk(actual, predicted, k=7, default=0.0):
# MAP@7 이므로, 최대 7개만 사용한다
if len(predicted) > k:
predicted = predicted[:k]
score = 0.0
num_hits = 0.0
for i, p in enumerate(predicted):
# 점수를 부여하는 조건은 다음과 같다 :
# 예측값이 정답에 있고 (‘p in actual’)
# 예측값이 중복이 아니면 (‘p not in predicted[:i]’)
if p in actual and p not in predicted[:i]:
num_hits += 1.0
score += num_hits / (i + 1.0)
# 정답값이 공백일 경우, 무조건 0.0점을 반환한다
if not actual:
return default
# 정답의 개수(len(actual))로 average precision을 구한다
return score / min(len(actual), k)
def mapk(actual, predicted, k=7, default=0.0):
# list of list인 정답값(actual)과 예측값(predicted)에서 고객별 Average Precision을 구하고, np.mean()을 통해 평균을 계산한다
return np.mean([apk(a, p, k, default) for a, p in zip(actual, predicted)])
# 데이터를 불러온다.
trn = pd.read_csv("../input/train_ver2.csv")
tst = pd.read_csv("../input/test_ver2.csv")
trn.shape
# import pickle
# import gzip
# # save
# # google drive 용량이 모자라서 trn.pickle 이 저장 되지 않았다. 그래서 구글 one dirive 가입해서 해결 함.
# # with open('../model/trn.pickle', 'wb') as f:
# # pickle.dump(trn, f, pickle.HIGHEST_PROTOCOL)
# # with open('../model/tst.pickle', 'wb') as f:
# # pickle.dump(tst, f, pickle.HIGHEST_PROTOCOL)
# load
# with open('../model/trn.pickle', 'rb') as f:
# trn = pickle.load(f)
# with open('../model/tst.pickle', 'rb') as f:
# tst = pickle.load(f)
## 데이터 전처리 ##
# 제품 변수를 별도로 저장해 놓는다.
prods = trn.columns[24:].tolist()
# 제품 변수 결측값을 미리 0으로 대체한다.
trn[prods] = trn[prods].fillna(0.0).astype(np.int8)
# 24개 제품 중 하나도 보유하지 않는 고객 데이터를 제거한다.
no_product = trn[prods].sum(axis=1) == 0
trn = trn[~no_product]
# 훈련 데이터와 테스트 데이터를 통합한다. 테스트 데이터에 없는 제품 변수는 0으로 채운다.
for col in trn.columns[24:]:
tst[col] = 0
df = pd.concat([trn, tst], axis=0)
del trn, tst
# 학습에 사용할 변수를 담는 list이다.
features = []
# 범주형 변수를 .factorize() 함수를 통해 label encoding한다.
categorical_cols = [
"ind_empleado",
"pais_residencia",
"sexo",
"tiprel_1mes",
"indresi",
"indext",
"conyuemp",
"canal_entrada",
"indfall",
"tipodom",
"nomprov",
"segmento",
]
for col in categorical_cols:
df[col], _ = df[col].factorize(na_sentinel=-99)
features += categorical_cols
# 수치형 변수의 특이값과 결측값을 -99로 대체하고, 정수형으로 변환한다.
df["age"].replace(" NA", -99, inplace=True)
df["age"] = df["age"].astype(np.int8)
df["antiguedad"].replace(" NA", -99, inplace=True)
df["antiguedad"] = df["antiguedad"].astype(np.int8)
df["renta"].replace(" NA", -99, inplace=True)
df["renta"].fillna(-99, inplace=True)
df["renta"] = df["renta"].astype(float).astype(np.int8)
df["indrel_1mes"].replace("P", 5, inplace=True)
df["indrel_1mes"].fillna(-99, inplace=True)
df["indrel_1mes"] = df["indrel_1mes"].astype(float).astype(np.int8)
# 학습에 사용할 수치형 변수를 features에 추구한다.
features += [
"age",
"antiguedad",
"renta",
"ind_nuevo",
"indrel",
"indrel_1mes",
"ind_actividad_cliente",
]
# (피쳐 엔지니어링) 두 날짜 변수에서 연도와 월 정보를 추출한다.
df["fecha_alta_month"] = (
df["fecha_alta"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[1]))
.astype(np.int8)
)
df["fecha_alta_year"] = (
df["fecha_alta"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[0]))
.astype(np.int16)
)
features += ["fecha_alta_month", "fecha_alta_year"]
df["ult_fec_cli_1t_month"] = (
df["ult_fec_cli_1t"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[1]))
.astype(np.int8)
)
df["ult_fec_cli_1t_year"] = (
df["ult_fec_cli_1t"]
.map(lambda x: 0.0 if x.__class__ is float else float(x.split("-")[0]))
.astype(np.int16)
)
features += ["ult_fec_cli_1t_month", "ult_fec_cli_1t_year"]
# 그 외 변수의 결측값은 모두 -99로 대체한다.
df.fillna(-99, inplace=True)
# (피쳐 엔지니어링) lag-1 데이터를 생성한다.
# 코드 2-12와 유사한 코드 흐름이다.
# 날짜를 숫자로 변환하는 함수이다. 2015-01-28은 1, 2016-06-28은 18로 변환된다
def date_to_int(str_date):
Y, M, D = [int(a) for a in str_date.strip().split("-")]
int_date = (int(Y) - 2015) * 12 + int(M)
return int_date
# 날짜를 숫자로 변환하여 int_date에 저장한다
df["int_date"] = df["fecha_dato"].map(date_to_int).astype(np.int8)
# 데이터를 복사하고, int_date 날짜에 1을 더하여 lag를 생성한다. 변수명에 _prev를 추가한다.
df_lag = df.copy()
df_lag.columns = [
col + "_prev" if col not in ["ncodpers", "int_date"] else col for col in df.columns
]
df_lag["int_date"] += 1
# 원본 데이터와 lag 데이터를 ncodper와 int_date 기준으로 합친다. Lag 데이터의 int_date는 1 밀려 있기 때문에, 저번 달의 제품 정보가 삽입된다.
df_trn = df.merge(df_lag, on=["ncodpers", "int_date"], how="left")
# 메모리 효율을 위해 불필요한 변수를 메모리에서 제거한다
del df, df_lag
# 저번 달의 제품 정보가 존재하지 않을 경우를 대비하여 0으로 대체한다.
for prod in prods:
prev = prod + "_prev"
df_trn[prev].fillna(0, inplace=True)
df_trn.fillna(-99, inplace=True)
# lag-1 변수를 추가한다.
features += [feature + "_prev" for feature in features]
features += [prod + "_prev" for prod in prods]
import pickle
with open("../model/df_trn.pickle", "wb") as f:
pickle.dump(df_trn, f, pickle.HIGHEST_PROTOCOL)
###
### Baseline 모델 이후, 다양한 피쳐 엔지니어링을 여기에 추가한다.
###
## 모델 학습
# 학습을 위하여 데이터를 훈련, 테스트용으로 분리한다.
# 학습에는 2016-01-28 ~ 2016-04-28 데이터만 사용하고, 검증에는 2016-05-28 데이터를 사용한다.
use_dates = ["2016-01-28", "2016-02-28", "2016-03-28", "2016-04-28", "2016-05-28"]
trn = df_trn[df_trn["fecha_dato"].isin(use_dates)]
tst = df_trn[df_trn["fecha_dato"] == "2016-06-28"]
del df_trn
# 훈련 데이터에서 신규 구매 건수만 추출한다.
X = []
Y = []
for i, prod in enumerate(prods):
prev = prod + "_prev"
prX = trn[(trn[prod] == 1) & (trn[prev] == 0)]
prY = np.zeros(prX.shape[0], dtype=np.int8) + i
X.append(prX)
Y.append(prY)
XY = pd.concat(X)
Y = np.hstack(Y)
XY["y"] = Y
# 훈련, 검증 데이터로 분리한다.
vld_date = "2016-05-28"
XY_trn = XY[XY["fecha_dato"] != vld_date]
XY_vld = XY[XY["fecha_dato"] == vld_date]
# XGBoost 모델 parameter를 설정한다.
param = {
"booster": "gbtree",
"max_depth": 8,
"nthread": 4,
"num_class": len(prods),
"objective": "multi:softprob",
"silent": 1,
"eval_metric": "mlogloss",
"eta": 0.1,
"min_child_weight": 10,
"colsample_bytree": 0.8,
"colsample_bylevel": 0.9,
"seed": 2018,
}
# 훈련, 검증 데이터를 XGBoost 형태로 변환한다.
# X_trn = XY_trn.as_matrix(columns=features)
# Y_trn = XY_trn.as_matrix(columns=['y'])
# DataFrame 을 넙파이 형식으로 변환시킨다.
X_trn = XY_trn[features].values
Y_trn = XY_trn["y"].values
dtrn = xgb.DMatrix(X_trn, label=Y_trn, feature_names=features)
X_vld = XY_vld[features].values
Y_vld = XY_vld["y"].values
dvld = xgb.DMatrix(X_vld, label=Y_vld, feature_names=features)
# XGBoost 모델을 훈련 데이터로 학습한다!
watch_list = [(dtrn, "train"), (dvld, "eval")]
model = xgb.train(
param, dtrn, num_boost_round=1000, evals=watch_list, early_stopping_rounds=20
)
# 학습한 모델을 저장한다.
import pickle
pickle.dump(model, open("../model/xgb.baseline.pkl", "wb"))
best_ntree_limit = model.best_ntree_limit
# # MAP@7 평가 척도를 위한 준비작업이다.
# # 고객 식별 번호를 추출한다.
# vld = trn[trn['fecha_dato'] == vld_date]
# ncodpers_vld = vld['ncodpers'].values
# # 검증 데이터에서 신규 구매를 구한다.
# for prod in prods:
# prev = prod + '_prev'
# padd = prod + '_add'
# vld[padd] = vld[prod] - vld[prev]
# # add_vld = vld[prod + '_add' for prod in prods].values
# e = [prod + '_add' for prod in prods]
# add_vld = vld[e].values
# add_vld_list = [list() for i in range(len(ncodpers_vld))]
# # 고객별 신규 구매 정답 값을 add_vld_list에 저장하고, 총 count를 count_vld에 저장한다.
# count_vld = 0
# for ncodper in range(len(ncodpers_vld)):
# for prod in range(len(prods)):
# if add_vld[ncodper, prod] > 0:
# add_vld_list[ncodper].append(prod)
# count_vld += 1
# # 검증 데이터에서 얻을 수 있는 MAP@7 최고점을 미리 구한다. (0.042663)
# print(mapk(add_vld_list, add_vld_list, 7, 0.0))
# # 검증 데이터에 대한 예측 값을 구한다.
# X_vld = vld[features].values
# Y_vld = vld['y'].values
# dvld = xgb.DMatrix(X_vld, label=Y_vld, feature_names=features)
# preds_vld = model.predict(dvld, ntree_limit=best_ntree_limit)
# # 저번 달에 보유한 제품은 신규 구매가 불가하기 때문에, 확률값에서 미리 1을 빼준다
# f = [prod + '_prev' for prod in prods]
# preds_vld = preds_vld - vld[f].values
# # 검증 데이터 예측 상위 7개를 추출한다.
# result_vld = []
# for ncodper, pred in zip(ncodpers_vld, preds_vld):
# y_prods = [(y,p,ip) for y,p,ip in zip(pred, prods, range(len(prods)))]
# y_prods = sorted(y_prods, key=lambda a: a[0], reverse=True)[:7]
# result_vld.append([ip for y,p,ip in y_prods])
# # 검증 데이터에서의 MAP@7 점수를 구한다. (0.036466)
# print(mapk(add_vld_list, result_vld, 7, 0.0))
# XGBoost 모델을 전체 훈련 데이터로 재학습한다!
X_all = XY[features].values
Y_all = XY["y"].values
dall = xgb.DMatrix(X_all, label=Y_all, feature_names=features)
watch_list = [(dall, "train")]
# 트리 개수를 늘어난 데이터 양만큼 비례해서 증가한다.
best_ntree_limit = int(best_ntree_limit * (len(XY_trn) + len(XY_vld)) / len(XY_trn))
# XGBoost 모델 재학습!
model = xgb.train(param, dall, num_boost_round=best_ntree_limit, evals=watch_list)
# 변수 중요도를 출력해본다. 예상하던 변수가 상위로 올라와 있는가?
print("Feature importance:")
for kv in sorted(
[(k, v) for k, v in model.get_fscore().items()], key=lambda kv: kv[1], reverse=True
):
print(kv)
# 캐글 제출을 위하여 테스트 데이터에 대한 예측 값을 구한다.
X_tst = tst[features].values
dtst = xgb.DMatrix(X_tst, feature_names=features)
preds_tst = model.predict(dtst, ntree_limit=best_ntree_limit)
ncodpers_tst = tst["ncodpers"].values
columns = [prod + "_prev" for prod in prods]
preds_tst = preds_tst - tst[columns].values
# 제출 파일을 생성한다.
submit_file = open("../model/xgb.baseline.2021-07-31", "w")
submit_file.write("ncodpers,added_products\n")
for ncodper, pred in zip(ncodpers_tst, preds_tst):
y_prods = [(y, p, ip) for y, p, ip in zip(pred, prods, range(len(prods)))]
y_prods = sorted(y_prods, key=lambda a: a[0], reverse=True)[:7]
y_prods = [p for y, p, ip in y_prods]
submit_file.write("{},{}\n".format(int(ncodper), " ".join(y_prods)))
ls
| false | 0 | 4,510 | 0 | 4,510 | 4,510 |
||
69863885
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ## Load Data
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("../input/titanic/test.csv")
test_data.head()
# ## Explore a pattern
women = train_data.loc[train_data.Sex == "female"]["Survived"]
# "Rate" simply means the number of things per some other number.
rate_women = sum(women) / len(women) #
print("% of women who survived:", rate_women * 100) # A percentage is a rate per 100.
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men * 100)
# ## Model with single feature gender
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
# # visulaizing Decision Tree
# Decision Trees are stored in a list in the estimators_ attribute in the model. We can check the length of the list, which should be equal to n_estiamtors value.
len(model.estimators_)
from sklearn import tree
from matplotlib import pyplot as plt
from dtreeviz.trees import *
plt.figure(figsize=(20, 20))
_ = tree.plot_tree(model.estimators_[0], feature_names=X.columns, filled=True)
model.estimators_[0].tree_.max_depth
# ## visualize the first Decision Tree
viz = dtreeviz(
model.estimators_[0],
X,
y,
feature_names=X.columns,
target_name="Target",
orientation="TD",
)
viz
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/863/69863885.ipynb
| null | null |
[{"Id": 69863885, "ScriptId": 19093388, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3606274, "CreationDate": "08/03/2021 16:47:28", "VersionNumber": 2.0, "Title": "Getting Started with Titanic-Random forest Viz", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 79.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
# ## Load Data
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
train_data.head()
test_data = pd.read_csv("../input/titanic/test.csv")
test_data.head()
# ## Explore a pattern
women = train_data.loc[train_data.Sex == "female"]["Survived"]
# "Rate" simply means the number of things per some other number.
rate_women = sum(women) / len(women) #
print("% of women who survived:", rate_women * 100) # A percentage is a rate per 100.
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men * 100)
# ## Model with single feature gender
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(n_estimators=100, max_depth=5, random_state=1)
model.fit(X, y)
predictions = model.predict(X_test)
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
# # visulaizing Decision Tree
# Decision Trees are stored in a list in the estimators_ attribute in the model. We can check the length of the list, which should be equal to n_estiamtors value.
len(model.estimators_)
from sklearn import tree
from matplotlib import pyplot as plt
from dtreeviz.trees import *
plt.figure(figsize=(20, 20))
_ = tree.plot_tree(model.estimators_[0], feature_names=X.columns, filled=True)
model.estimators_[0].tree_.max_depth
# ## visualize the first Decision Tree
viz = dtreeviz(
model.estimators_[0],
X,
y,
feature_names=X.columns,
target_name="Target",
orientation="TD",
)
viz
| false | 0 | 719 | 0 | 719 | 719 |
||
69863214
|
<jupyter_start><jupyter_text>South Park Scripts Dataset
This dataset contains two file.
**SouthPark_Episodes.csv**: Episode names, air date, season, episode number, and description of episode.
**SouthPark_Lines.csv**: Includes episode name, character and its line.
We have over 300 episodes and over 95000 lines.
This dataset suitable for **NLP tasks, data visualizations, recommender systems** and more.
Source: https://southpark.fandom.com/wiki/List_of_Episodes
Scraping: https://www.kaggle.com/mustafacicek/pandas-read-html-south-park-lines-collection
Kaggle dataset identifier: south-park-scripts-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('south-park-scripts-dataset/SouthPark_Lines.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 95320 entries, 0 to 95319
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Title 95320 non-null object
1 Character 95320 non-null object
2 Line 95308 non-null object
dtypes: object(3)
memory usage: 2.2+ MB
<jupyter_text>Examples:
{
"Title": "Cartman Gets an Anal Probe",
"Character": "Scene Description",
"Line": "At the bus stop."
}
{
"Title": "Cartman Gets an Anal Probe",
"Character": "The Boys",
"Line": "School days, school days, teacher's golden ru..."
}
{
"Title": "Cartman Gets an Anal Probe",
"Character": "Kyle Broflovski",
"Line": "Ah, damn it! My little brother's trying to follow me to school again."
}
{
"Title": "Cartman Gets an Anal Probe",
"Character": "Ike Broflovski",
"Line": "Eat banana."
}
<jupyter_code>import pandas as pd
df = pd.read_csv('south-park-scripts-dataset/SouthPark_Episodes.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 309 entries, 0 to 308
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Title 309 non-null object
1 Air Date 309 non-null object
2 Code 309 non-null int64
3 # 309 non-null int64
4 Description 309 non-null object
5 Season 309 non-null int64
6 Episode 309 non-null int64
dtypes: int64(4), object(3)
memory usage: 17.0+ KB
<jupyter_text>Examples:
{
"Title": "Cartman Gets an Anal Probe",
"Air Date": "August 13, 1997",
"Code": 101,
"#": 1,
"Description": "While the boys are waiting for the school bus, Cartman explains the odd nightmare he had the previous night involving alien visitors.",
"Season": 1,
"Episode": 1
}
{
"Title": "Weight Gain 4000",
"Air Date": "August 20, 1997",
"Code": 102,
"#": 2,
"Description": "When Cartman's environmental essay wins a national contest, America's sweetheart, Kathie Lee Gifford, comes to South Park to present the award.",
"Season": 1,
"Episode": 2
}
{
"Title": "Volcano",
"Air Date": "August 27, 1997",
"Code": 103,
"#": 3,
"Description": "A weekend trip to experience the finer points of camping, fishing and blowing animals to smithereens is threatened by an erupting volcano.",
"Season": 1,
"Episode": 3
}
{
"Title": "Big Gay Al's Big Gay Boat Ride",
"Air Date": "September 3, 1997",
"Code": 104,
"#": 4,
"Description": "When Stan discovers his new dog Sparky is gay, he becomes so confused he loses his will to play in the big Homecoming Football game against North Park.",
"Season": 1,
"Episode": 4
}
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re, string
from requests import get
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
import spacy
nlp = spacy.load("en_core_web_sm")
from PIL import Image
from io import BytesIO
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
pd.options.mode.chained_assignment = None
episodes = pd.read_csv("../input/south-park-scripts-dataset/SouthPark_Episodes.csv")
display(episodes.head())
print(episodes.shape)
lines = pd.read_csv("../input/south-park-scripts-dataset/SouthPark_Lines.csv")
display(lines.head())
print(lines.shape)
title_episodelist = episodes.Title.tolist()
title_lineslist = lines.Title.tolist()
[x for x in title_episodelist if x not in title_lineslist]
rename_titles = {
"Imaginationland": "Imaginationland Episode I",
"REHASH": "#REHASH",
"HappyHolograms": "#HappyHolograms",
"Shots": "Shots!!!",
}
lines["Title"].replace(rename_titles, inplace=True)
df = episodes.merge(lines, on="Title")
df
df = df[df["Line"].notnull()]
df["Act"] = df.Line.apply(lambda x: re.findall(r"\[.*?]", x) if "[" in x else "None")
df["Act"] = df["Act"].astype(str)
df["Act"] = df["Act"].str.replace("[", "").str.replace("]", "", regex=True)
df["Line2"] = df["Line"].str.replace(r"\[.*?]", "", regex=True)
df
contractions = {
"ain't": "am not / are not / is not / has not / have not",
"aren't": "are not / am not",
"can't": "can not",
"can't've": "can not have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he had / he would",
"he'd've": "he would have",
"he'll": "he shall / he will",
"he'll've": "he shall have / he will have",
"he's": "he has / he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how has / how is / how does",
"I'd": "I had / I would",
"I'd've": "I would have",
"I'll": "I shall / I will",
"I'll've": "I shall have / I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had / it would",
"it'd've": "it would have",
"it'll": "it shall / it will",
"it'll've": "it shall have / it will have",
"it's": "it has / it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she had / she would",
"she'd've": "she would have",
"she'll": "she shall / she will",
"she'll've": "she shall have / she will have",
"she's": "she has / she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as / so is",
"that'd": "that would / that had",
"that'd've": "that would have",
"that's": "that has / that is",
"there'd": "there had / there would",
"there'd've": "there would have",
"there's": "there has / there is",
"they'd": "they had / they would",
"they'd've": "they would have",
"they'll": "they shall / they will",
"they'll've": "they shall have / they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had / we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what shall / what will",
"what'll've": "what shall have / what will have",
"what're": "what are",
"what's": "what has / what is",
"what've": "what have",
"when's": "when has / when is",
"when've": "when have",
"where'd": "where did",
"where's": "where has / where is",
"where've": "where have",
"who'll": "who shall / who will",
"who'll've": "who shall have / who will have",
"who's": "who has / who is",
"who've": "who have",
"why's": "why has / why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had / you would",
"you'd've": "you would have",
"you'll": "you shall / you will",
"you'll've": "you shall have / you will have",
"you're": "you are",
"you've": "you have",
"wanna": "want to",
"gonna": "going to",
"gotta": "have got to",
}
all_stopwords = nlp.Defaults.stop_words
def tokenizer(text):
text = text.replace("in'", "ing")
text = text.replace("m'kay", "mkay")
tokens = text.split()
tokens = [
re.sub(token, contractions[token], token)
if token in contractions.keys()
else token
for token in tokens
]
tokens = [token.strip(string.punctuation) for token in tokens]
tokens = [token.lower() for token in tokens]
tokens = [token for token in tokens if token.isalpha()]
tokens = [token for token in tokens if len(token) > 1]
tokens = [token for token in tokens if token not in all_stopwords]
# n = lambda pos: pos[:2].startswith("N")
# tokens = [word for (word, pos) in nltk.pos_tag(tokens) if n(pos)]
lemmatizer = nltk.wordnet.WordNetLemmatizer()
lemmas = [
lemmatizer.lemmatize(token, "v") if token != "butters" else "butters"
for token in tokens
]
lemmas = [
lemmatizer.lemmatize(token) if token != "butters" else "butters"
for token in lemmas
]
lemmas = [
lemma
for lemma in lemmas
if lemma
not in [
"think",
"know",
"come",
"want",
"look",
"right",
"try",
"need",
"say",
"yes",
"good",
"okay",
"people",
"time",
"tell",
"talk",
"stop",
"thing",
"mean",
"maybe",
"let",
]
]
lemmas = [
lemma.replace(lemma, "guys") if lemma == "guy" else lemma for lemma in lemmas
]
return " ".join(lemmas)
df["Lines_Final"] = df["Line2"].apply(tokenizer)
df.iloc[:, -4:]
img_dict = {
"Cartman": "https://i.imgur.com/DtPNaXk.png",
"Kyle": "https://i.imgur.com/5vBEIri.png",
"Kenny": "https://i.imgur.com/IbHV3iA.png",
"Stan": "https://i.imgur.com/Pl94sMm.png",
"Randy": "https://i.imgur.com/EgMCfsr.png",
"Mr. Mackey": "https://i.imgur.com/bdJE2SB.png",
"Butters": "https://i.imgur.com/EQAUV7i.png",
}
def plot_wordcloud(character, max_words=500):
character_df = df[df.Character == character]
txt = " ".join(character_df["Lines_Final"].values)
im = Image.open(BytesIO(get(img_dict[character]).content))
mask = np.array(im)
colors = ImageColorGenerator(mask)
fig, ax = plt.subplots(figsize=(15, 15))
stopwords = set(STOPWORDS)
wc = WordCloud(
background_color="black",
max_words=max_words,
mask=mask, # shape
stopwords=stopwords,
width=1000,
height=1000,
color_func=colors,
collocations=True, # bigrams
contour_color="white",
contour_width=1,
normalize_plurals=False, # For Butters
min_word_length=3,
random_state=42,
).generate(txt)
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.savefig(character + ".png")
plt.show()
plot_wordcloud("Cartman", 100)
plot_wordcloud("Stan", 100)
plot_wordcloud("Kenny", 100)
plot_wordcloud("Kyle", 100)
plot_wordcloud("Butters", 100)
plot_wordcloud("Randy", 100)
plot_wordcloud("Mr. Mackey", 100)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/863/69863214.ipynb
|
south-park-scripts-dataset
|
mustafacicek
|
[{"Id": 69863214, "ScriptId": 19100795, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4388004, "CreationDate": "08/03/2021 16:43:19", "VersionNumber": 1.0, "Title": "Word Clouds: South Park Characters", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 267.0, "LinesInsertedFromPrevious": 267.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 93309899, "KernelVersionId": 69863214, "SourceDatasetVersionId": 2495366}]
|
[{"Id": 2495366, "DatasetId": 1510802, "DatasourceVersionId": 2537980, "CreatorUserId": 4388004, "LicenseName": "Other (specified in description)", "CreationDate": "08/03/2021 14:25:10", "VersionNumber": 1.0, "Title": "South Park Scripts Dataset", "Slug": "south-park-scripts-dataset", "Subtitle": "South Park scripts for all episodes", "Description": "This dataset contains two file. \n\n**SouthPark_Episodes.csv**: Episode names, air date, season, episode number, and description of episode.\n**SouthPark_Lines.csv**: Includes episode name, character and its line.\n\n\nWe have over 300 episodes and over 95000 lines.\n\n\nThis dataset suitable for **NLP tasks, data visualizations, recommender systems** and more.\n\nSource: https://southpark.fandom.com/wiki/List_of_Episodes\nScraping: https://www.kaggle.com/mustafacicek/pandas-read-html-south-park-lines-collection", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1510802, "CreatorUserId": 4388004, "OwnerUserId": 4388004.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2495366.0, "CurrentDatasourceVersionId": 2537980.0, "ForumId": 1530562, "Type": 2, "CreationDate": "08/03/2021 14:25:10", "LastActivityDate": "08/03/2021", "TotalViews": 4382, "TotalDownloads": 207, "TotalVotes": 7, "TotalKernels": 2}]
|
[{"Id": 4388004, "UserName": "mustafacicek", "DisplayName": "Mustafa Cicek", "RegisterDate": "01/27/2020", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import re, string
from requests import get
import nltk
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
from nltk.tokenize import RegexpTokenizer
import spacy
nlp = spacy.load("en_core_web_sm")
from PIL import Image
from io import BytesIO
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import warnings
warnings.simplefilter(action="ignore", category=FutureWarning)
pd.options.mode.chained_assignment = None
episodes = pd.read_csv("../input/south-park-scripts-dataset/SouthPark_Episodes.csv")
display(episodes.head())
print(episodes.shape)
lines = pd.read_csv("../input/south-park-scripts-dataset/SouthPark_Lines.csv")
display(lines.head())
print(lines.shape)
title_episodelist = episodes.Title.tolist()
title_lineslist = lines.Title.tolist()
[x for x in title_episodelist if x not in title_lineslist]
rename_titles = {
"Imaginationland": "Imaginationland Episode I",
"REHASH": "#REHASH",
"HappyHolograms": "#HappyHolograms",
"Shots": "Shots!!!",
}
lines["Title"].replace(rename_titles, inplace=True)
df = episodes.merge(lines, on="Title")
df
df = df[df["Line"].notnull()]
df["Act"] = df.Line.apply(lambda x: re.findall(r"\[.*?]", x) if "[" in x else "None")
df["Act"] = df["Act"].astype(str)
df["Act"] = df["Act"].str.replace("[", "").str.replace("]", "", regex=True)
df["Line2"] = df["Line"].str.replace(r"\[.*?]", "", regex=True)
df
contractions = {
"ain't": "am not / are not / is not / has not / have not",
"aren't": "are not / am not",
"can't": "can not",
"can't've": "can not have",
"'cause": "because",
"could've": "could have",
"couldn't": "could not",
"couldn't've": "could not have",
"didn't": "did not",
"doesn't": "does not",
"don't": "do not",
"hadn't": "had not",
"hadn't've": "had not have",
"hasn't": "has not",
"haven't": "have not",
"he'd": "he had / he would",
"he'd've": "he would have",
"he'll": "he shall / he will",
"he'll've": "he shall have / he will have",
"he's": "he has / he is",
"how'd": "how did",
"how'd'y": "how do you",
"how'll": "how will",
"how's": "how has / how is / how does",
"I'd": "I had / I would",
"I'd've": "I would have",
"I'll": "I shall / I will",
"I'll've": "I shall have / I will have",
"I'm": "I am",
"I've": "I have",
"isn't": "is not",
"it'd": "it had / it would",
"it'd've": "it would have",
"it'll": "it shall / it will",
"it'll've": "it shall have / it will have",
"it's": "it has / it is",
"let's": "let us",
"ma'am": "madam",
"mayn't": "may not",
"might've": "might have",
"mightn't": "might not",
"mightn't've": "might not have",
"must've": "must have",
"mustn't": "must not",
"mustn't've": "must not have",
"needn't": "need not",
"needn't've": "need not have",
"o'clock": "of the clock",
"oughtn't": "ought not",
"oughtn't've": "ought not have",
"shan't": "shall not",
"sha'n't": "shall not",
"shan't've": "shall not have",
"she'd": "she had / she would",
"she'd've": "she would have",
"she'll": "she shall / she will",
"she'll've": "she shall have / she will have",
"she's": "she has / she is",
"should've": "should have",
"shouldn't": "should not",
"shouldn't've": "should not have",
"so've": "so have",
"so's": "so as / so is",
"that'd": "that would / that had",
"that'd've": "that would have",
"that's": "that has / that is",
"there'd": "there had / there would",
"there'd've": "there would have",
"there's": "there has / there is",
"they'd": "they had / they would",
"they'd've": "they would have",
"they'll": "they shall / they will",
"they'll've": "they shall have / they will have",
"they're": "they are",
"they've": "they have",
"to've": "to have",
"wasn't": "was not",
"we'd": "we had / we would",
"we'd've": "we would have",
"we'll": "we will",
"we'll've": "we will have",
"we're": "we are",
"we've": "we have",
"weren't": "were not",
"what'll": "what shall / what will",
"what'll've": "what shall have / what will have",
"what're": "what are",
"what's": "what has / what is",
"what've": "what have",
"when's": "when has / when is",
"when've": "when have",
"where'd": "where did",
"where's": "where has / where is",
"where've": "where have",
"who'll": "who shall / who will",
"who'll've": "who shall have / who will have",
"who's": "who has / who is",
"who've": "who have",
"why's": "why has / why is",
"why've": "why have",
"will've": "will have",
"won't": "will not",
"won't've": "will not have",
"would've": "would have",
"wouldn't": "would not",
"wouldn't've": "would not have",
"y'all": "you all",
"y'all'd": "you all would",
"y'all'd've": "you all would have",
"y'all're": "you all are",
"y'all've": "you all have",
"you'd": "you had / you would",
"you'd've": "you would have",
"you'll": "you shall / you will",
"you'll've": "you shall have / you will have",
"you're": "you are",
"you've": "you have",
"wanna": "want to",
"gonna": "going to",
"gotta": "have got to",
}
all_stopwords = nlp.Defaults.stop_words
def tokenizer(text):
text = text.replace("in'", "ing")
text = text.replace("m'kay", "mkay")
tokens = text.split()
tokens = [
re.sub(token, contractions[token], token)
if token in contractions.keys()
else token
for token in tokens
]
tokens = [token.strip(string.punctuation) for token in tokens]
tokens = [token.lower() for token in tokens]
tokens = [token for token in tokens if token.isalpha()]
tokens = [token for token in tokens if len(token) > 1]
tokens = [token for token in tokens if token not in all_stopwords]
# n = lambda pos: pos[:2].startswith("N")
# tokens = [word for (word, pos) in nltk.pos_tag(tokens) if n(pos)]
lemmatizer = nltk.wordnet.WordNetLemmatizer()
lemmas = [
lemmatizer.lemmatize(token, "v") if token != "butters" else "butters"
for token in tokens
]
lemmas = [
lemmatizer.lemmatize(token) if token != "butters" else "butters"
for token in lemmas
]
lemmas = [
lemma
for lemma in lemmas
if lemma
not in [
"think",
"know",
"come",
"want",
"look",
"right",
"try",
"need",
"say",
"yes",
"good",
"okay",
"people",
"time",
"tell",
"talk",
"stop",
"thing",
"mean",
"maybe",
"let",
]
]
lemmas = [
lemma.replace(lemma, "guys") if lemma == "guy" else lemma for lemma in lemmas
]
return " ".join(lemmas)
df["Lines_Final"] = df["Line2"].apply(tokenizer)
df.iloc[:, -4:]
img_dict = {
"Cartman": "https://i.imgur.com/DtPNaXk.png",
"Kyle": "https://i.imgur.com/5vBEIri.png",
"Kenny": "https://i.imgur.com/IbHV3iA.png",
"Stan": "https://i.imgur.com/Pl94sMm.png",
"Randy": "https://i.imgur.com/EgMCfsr.png",
"Mr. Mackey": "https://i.imgur.com/bdJE2SB.png",
"Butters": "https://i.imgur.com/EQAUV7i.png",
}
def plot_wordcloud(character, max_words=500):
character_df = df[df.Character == character]
txt = " ".join(character_df["Lines_Final"].values)
im = Image.open(BytesIO(get(img_dict[character]).content))
mask = np.array(im)
colors = ImageColorGenerator(mask)
fig, ax = plt.subplots(figsize=(15, 15))
stopwords = set(STOPWORDS)
wc = WordCloud(
background_color="black",
max_words=max_words,
mask=mask, # shape
stopwords=stopwords,
width=1000,
height=1000,
color_func=colors,
collocations=True, # bigrams
contour_color="white",
contour_width=1,
normalize_plurals=False, # For Butters
min_word_length=3,
random_state=42,
).generate(txt)
plt.imshow(wc, interpolation="bilinear")
plt.axis("off")
plt.savefig(character + ".png")
plt.show()
plot_wordcloud("Cartman", 100)
plot_wordcloud("Stan", 100)
plot_wordcloud("Kenny", 100)
plot_wordcloud("Kyle", 100)
plot_wordcloud("Butters", 100)
plot_wordcloud("Randy", 100)
plot_wordcloud("Mr. Mackey", 100)
|
[{"south-park-scripts-dataset/SouthPark_Lines.csv": {"column_names": "[\"Title\", \"Character\", \"Line\"]", "column_data_types": "{\"Title\": \"object\", \"Character\": \"object\", \"Line\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 95320 entries, 0 to 95319\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Title 95320 non-null object\n 1 Character 95320 non-null object\n 2 Line 95308 non-null object\ndtypes: object(3)\nmemory usage: 2.2+ MB\n", "summary": "{\"Title\": {\"count\": 95320, \"unique\": 309, \"top\": \"Clubhouses\", \"freq\": 609}, \"Character\": {\"count\": 95320, \"unique\": 4698, \"top\": \"Cartman\", \"freq\": 11307}, \"Line\": {\"count\": 95308, \"unique\": 89169, \"top\": \"What?\", \"freq\": 374}}", "examples": "{\"Title\":{\"0\":\"Cartman Gets an Anal Probe\",\"1\":\"Cartman Gets an Anal Probe\",\"2\":\"Cartman Gets an Anal Probe\",\"3\":\"Cartman Gets an Anal Probe\"},\"Character\":{\"0\":\"Scene Description\",\"1\":\"The Boys\",\"2\":\"Kyle Broflovski\",\"3\":\"Ike Broflovski\"},\"Line\":{\"0\":\"At the bus stop.\",\"1\":\"School days, school days, teacher's golden ru...\",\"2\":\"Ah, damn it! My little brother's trying to follow me to school again.\",\"3\":\"Eat banana.\"}}"}}, {"south-park-scripts-dataset/SouthPark_Episodes.csv": {"column_names": "[\"Title\", \"Air Date\", \"Code\", \"#\", \"Description\", \"Season\", \"Episode\"]", "column_data_types": "{\"Title\": \"object\", \"Air Date\": \"object\", \"Code\": \"int64\", \"#\": \"int64\", \"Description\": \"object\", \"Season\": \"int64\", \"Episode\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 309 entries, 0 to 308\nData columns (total 7 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Title 309 non-null object\n 1 Air Date 309 non-null object\n 2 Code 309 non-null int64 \n 3 # 309 non-null int64 \n 4 Description 309 non-null object\n 5 Season 309 non-null int64 \n 6 Episode 309 non-null int64 \ndtypes: int64(4), object(3)\nmemory usage: 17.0+ KB\n", "summary": "{\"Code\": {\"count\": 309.0, \"mean\": 1109.31715210356, \"std\": 651.1267679332004, \"min\": 101.0, \"25%\": 513.0, \"50%\": 1102.0, \"75%\": 1609.0, \"max\": 2402.0}, \"#\": {\"count\": 309.0, \"mean\": 155.0, \"std\": 89.34483756770729, \"min\": 1.0, \"25%\": 78.0, \"50%\": 155.0, \"75%\": 232.0, \"max\": 309.0}, \"Season\": {\"count\": 309.0, \"mean\": 11.019417475728156, \"std\": 6.5221608977735945, \"min\": 1.0, \"25%\": 5.0, \"50%\": 11.0, \"75%\": 16.0, \"max\": 24.0}, \"Episode\": {\"count\": 309.0, \"mean\": 7.375404530744337, \"std\": 4.250459514667039, \"min\": 1.0, \"25%\": 4.0, \"50%\": 7.0, \"75%\": 10.0, \"max\": 18.0}}", "examples": "{\"Title\":{\"0\":\"Cartman Gets an Anal Probe\",\"1\":\"Weight Gain 4000\",\"2\":\"Volcano\",\"3\":\"Big Gay Al's Big Gay Boat Ride\"},\"Air Date\":{\"0\":\"August 13, 1997\",\"1\":\"August 20, 1997\",\"2\":\"August 27, 1997\",\"3\":\"September 3, 1997\"},\"Code\":{\"0\":101,\"1\":102,\"2\":103,\"3\":104},\"#\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Description\":{\"0\":\"While the boys are waiting for the school bus, Cartman explains the odd nightmare he had the previous night involving alien visitors.\",\"1\":\"When Cartman's environmental essay wins a national contest, America's sweetheart, Kathie Lee Gifford, comes to South Park to present the award.\",\"2\":\"A weekend trip to experience the finer points of camping, fishing and blowing animals to smithereens is threatened by an erupting volcano.\",\"3\":\"When Stan discovers his new dog Sparky is gay, he becomes so confused he loses his will to play in the big Homecoming Football game against North Park.\"},\"Season\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"Episode\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4}}"}}]
| true | 2 |
<start_data_description><data_path>south-park-scripts-dataset/SouthPark_Lines.csv:
<column_names>
['Title', 'Character', 'Line']
<column_types>
{'Title': 'object', 'Character': 'object', 'Line': 'object'}
<dataframe_Summary>
{'Title': {'count': 95320, 'unique': 309, 'top': 'Clubhouses', 'freq': 609}, 'Character': {'count': 95320, 'unique': 4698, 'top': 'Cartman', 'freq': 11307}, 'Line': {'count': 95308, 'unique': 89169, 'top': 'What?', 'freq': 374}}
<dataframe_info>
RangeIndex: 95320 entries, 0 to 95319
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Title 95320 non-null object
1 Character 95320 non-null object
2 Line 95308 non-null object
dtypes: object(3)
memory usage: 2.2+ MB
<some_examples>
{'Title': {'0': 'Cartman Gets an Anal Probe', '1': 'Cartman Gets an Anal Probe', '2': 'Cartman Gets an Anal Probe', '3': 'Cartman Gets an Anal Probe'}, 'Character': {'0': 'Scene Description', '1': 'The Boys', '2': 'Kyle Broflovski', '3': 'Ike Broflovski'}, 'Line': {'0': 'At the bus stop.', '1': "School days, school days, teacher's golden ru...", '2': "Ah, damn it! My little brother's trying to follow me to school again.", '3': 'Eat banana.'}}
<end_description>
<start_data_description><data_path>south-park-scripts-dataset/SouthPark_Episodes.csv:
<column_names>
['Title', 'Air Date', 'Code', '#', 'Description', 'Season', 'Episode']
<column_types>
{'Title': 'object', 'Air Date': 'object', 'Code': 'int64', '#': 'int64', 'Description': 'object', 'Season': 'int64', 'Episode': 'int64'}
<dataframe_Summary>
{'Code': {'count': 309.0, 'mean': 1109.31715210356, 'std': 651.1267679332004, 'min': 101.0, '25%': 513.0, '50%': 1102.0, '75%': 1609.0, 'max': 2402.0}, '#': {'count': 309.0, 'mean': 155.0, 'std': 89.34483756770729, 'min': 1.0, '25%': 78.0, '50%': 155.0, '75%': 232.0, 'max': 309.0}, 'Season': {'count': 309.0, 'mean': 11.019417475728156, 'std': 6.5221608977735945, 'min': 1.0, '25%': 5.0, '50%': 11.0, '75%': 16.0, 'max': 24.0}, 'Episode': {'count': 309.0, 'mean': 7.375404530744337, 'std': 4.250459514667039, 'min': 1.0, '25%': 4.0, '50%': 7.0, '75%': 10.0, 'max': 18.0}}
<dataframe_info>
RangeIndex: 309 entries, 0 to 308
Data columns (total 7 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Title 309 non-null object
1 Air Date 309 non-null object
2 Code 309 non-null int64
3 # 309 non-null int64
4 Description 309 non-null object
5 Season 309 non-null int64
6 Episode 309 non-null int64
dtypes: int64(4), object(3)
memory usage: 17.0+ KB
<some_examples>
{'Title': {'0': 'Cartman Gets an Anal Probe', '1': 'Weight Gain 4000', '2': 'Volcano', '3': "Big Gay Al's Big Gay Boat Ride"}, 'Air Date': {'0': 'August 13, 1997', '1': 'August 20, 1997', '2': 'August 27, 1997', '3': 'September 3, 1997'}, 'Code': {'0': 101, '1': 102, '2': 103, '3': 104}, '#': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Description': {'0': 'While the boys are waiting for the school bus, Cartman explains the odd nightmare he had the previous night involving alien visitors.', '1': "When Cartman's environmental essay wins a national contest, America's sweetheart, Kathie Lee Gifford, comes to South Park to present the award.", '2': 'A weekend trip to experience the finer points of camping, fishing and blowing animals to smithereens is threatened by an erupting volcano.', '3': 'When Stan discovers his new dog Sparky is gay, he becomes so confused he loses his will to play in the big Homecoming Football game against North Park.'}, 'Season': {'0': 1, '1': 1, '2': 1, '3': 1}, 'Episode': {'0': 1, '1': 2, '2': 3, '3': 4}}
<end_description>
| 2,838 | 5 | 4,001 | 2,838 |
69863785
|
<jupyter_start><jupyter_text>1.88 Million US Wildfires
### Context:
This data publication contains a spatial database of wildfires that occurred in the United States from 1992 to 2015. It is the third update of a publication originally generated to support the national Fire Program Analysis (FPA) system. The wildfire records were acquired from the reporting systems of federal, state, and local fire organizations. The following core data elements were required for records to be included in this data publication: discovery date, final fire size, and a point location at least as precise as Public Land Survey System (PLSS) section (1-square mile grid). The data were transformed to conform, when possible, to the data standards of the National Wildfire Coordinating Group (NWCG). Basic error-checking was performed and redundant records were identified and removed, to the degree possible. The resulting product, referred to as the Fire Program Analysis fire-occurrence database (FPA FOD), includes 1.88 million geo-referenced wildfire records, representing a total of 140 million acres burned during the 24-year period.
### Content:
This dataset is an SQLite database that contains the following information:
* Fires: Table including wildfire data for the period of 1992-2015 compiled from US federal, state, and local reporting systems.
* FOD_ID = Global unique identifier.
* FPA_ID = Unique identifier that contains information necessary to track back to the original record in the source dataset.
* SOURCE_SYSTEM_TYPE = Type of source database or system that the record was drawn from (federal, nonfederal, or interagency).
* SOURCE_SYSTEM = Name of or other identifier for source database or system that the record was drawn from. See Table 1 in Short (2014), or \Supplements\FPA_FOD_source_list.pdf, for a list of sources and their identifier.
* NWCG_REPORTING_AGENCY = Active National Wildlife Coordinating Group (NWCG) Unit Identifier for the agency preparing the fire report (BIA = Bureau of Indian Affairs, BLM = Bureau of Land Management, BOR = Bureau of Reclamation, DOD = Department of Defense, DOE = Department of Energy, FS = Forest Service, FWS = Fish and Wildlife Service, IA = Interagency Organization, NPS = National Park Service, ST/C&L = State, County, or Local Organization, and TRIBE = Tribal Organization).
* NWCG_REPORTING_UNIT_ID = Active NWCG Unit Identifier for the unit preparing the fire report.
* NWCG_REPORTING_UNIT_NAME = Active NWCG Unit Name for the unit preparing the fire report.
* SOURCE_REPORTING_UNIT = Code for the agency unit preparing the fire report, based on code/name in the source dataset.
* SOURCE_REPORTING_UNIT_NAME = Name of reporting agency unit preparing the fire report, based on code/name in the source dataset.
* LOCAL_FIRE_REPORT_ID = Number or code that uniquely identifies an incident report for a particular reporting unit and a particular calendar year.
* LOCAL_INCIDENT_ID = Number or code that uniquely identifies an incident for a particular local fire management organization within a particular calendar year.
* FIRE_CODE = Code used within the interagency wildland fire community to track and compile cost information for emergency fire suppression (https://www.firecode.gov/).
* FIRE_NAME = Name of the incident, from the fire report (primary) or ICS-209 report (secondary).
* ICS_209_INCIDENT_NUMBER = Incident (event) identifier, from the ICS-209 report.
* ICS_209_NAME = Name of the incident, from the ICS-209 report.
* MTBS_ID = Incident identifier, from the MTBS perimeter dataset.
* MTBS_FIRE_NAME = Name of the incident, from the MTBS perimeter dataset.
* COMPLEX_NAME = Name of the complex under which the fire was ultimately managed, when discernible.
* FIRE_YEAR = Calendar year in which the fire was discovered or confirmed to exist.
* DISCOVERY_DATE = Date on which the fire was discovered or confirmed to exist.
* DISCOVERY_DOY = Day of year on which the fire was discovered or confirmed to exist.
* DISCOVERY_TIME = Time of day that the fire was discovered or confirmed to exist.
* STAT_CAUSE_CODE = Code for the (statistical) cause of the fire.
* STAT_CAUSE_DESCR = Description of the (statistical) cause of the fire.
* CONT_DATE = Date on which the fire was declared contained or otherwise controlled (mm/dd/yyyy where mm=month, dd=day, and yyyy=year).
* CONT_DOY = Day of year on which the fire was declared contained or otherwise controlled.
* CONT_TIME = Time of day that the fire was declared contained or otherwise controlled (hhmm where hh=hour, mm=minutes).
* FIRE_SIZE = Estimate of acres within the final perimeter of the fire.
* FIRE_SIZE_CLASS = Code for fire size based on the number of acres within the final fire perimeter expenditures (A=greater than 0 but less than or equal to 0.25 acres, B=0.26-9.9 acres, C=10.0-99.9 acres, D=100-299 acres, E=300 to 999 acres, F=1000 to 4999 acres, and G=5000+ acres).
* LATITUDE = Latitude (NAD83) for point location of the fire (decimal degrees).
* LONGITUDE = Longitude (NAD83) for point location of the fire (decimal degrees).
* OWNER_CODE = Code for primary owner or entity responsible for managing the land at the point of origin of the fire at the time of the incident.
* OWNER_DESCR = Name of primary owner or entity responsible for managing the land at the point of origin of the fire at the time of the incident.
* STATE = Two-letter alphabetic code for the state in which the fire burned (or originated), based on the nominal designation in the fire report.
* COUNTY = County, or equivalent, in which the fire burned (or originated), based on nominal designation in the fire report.
* FIPS_CODE = Three-digit code from the Federal Information Process Standards (FIPS) publication 6-4 for representation of counties and equivalent entities.
* FIPS_NAME = County name from the FIPS publication 6-4 for representation of counties and equivalent entities.
* NWCG_UnitIDActive_20170109: Look-up table containing all NWCG identifiers for agency units that were active (i.e., valid) as of 9 January 2017, when the list was downloaded from https://www.nifc.blm.gov/unit_id/Publish.html and used as the source of values available to populate the following fields in the Fires table: NWCG_REPORTING_AGENCY, NWCG_REPORTING_UNIT_ID, and NWCG_REPORTING_UNIT_NAME.
* UnitId = NWCG Unit ID.
* GeographicArea = Two-letter code for the geographic area in which the unit is located (NA=National, IN=International, AK=Alaska, CA=California, EA=Eastern Area, GB=Great Basin, NR=Northern Rockies, NW=Northwest, RM=Rocky Mountain, SA=Southern Area, and SW=Southwest).
* Gacc = Seven or eight-letter code for the Geographic Area Coordination Center in which the unit is located or primarily affiliated with (CAMBCIFC=Canadian Interagency Forest Fire Centre, USAKCC=Alaska Interagency Coordination Center, USCAONCC=Northern California Area Coordination Center, USCAOSCC=Southern California Coordination Center, USCORMCC=Rocky Mountain Area Coordination Center, USGASAC=Southern Area Coordination Center, USIDNIC=National Interagency Coordination Center, USMTNRC=Northern Rockies Coordination Center, USNMSWC=Southwest Area Coordination Center, USORNWC=Northwest Area Coordination Center, USUTGBC=Western Great Basin Coordination Center, USWIEACC=Eastern Area Coordination Center).
* WildlandRole = Role of the unit within the wildland fire community.
* UnitType = Type of unit (e.g., federal, state, local).
* Department = Department (or state/territory) to which the unit belongs (AK=Alaska, AL=Alabama, AR=Arkansas, AZ=Arizona, CA=California, CO=Colorado, CT=Connecticut, DE=Delaware, DHS=Department of Homeland Security, DOC= Department of Commerce, DOD=Department of Defense, DOE=Department of Energy, DOI= Department of Interior, DOL=Department of Labor, FL=Florida, GA=Georgia, IA=Iowa, IA/GC=Non-Departmental Agencies, ID=Idaho, IL=Illinois, IN=Indiana, KS=Kansas, KY=Kentucky, LA=Louisiana, MA=Massachusetts, MD=Maryland, ME=Maine, MI=Michigan, MN=Minnesota, MO=Missouri, MS=Mississippi, MT=Montana, NC=North Carolina, NE=Nebraska, NG=Non-Government, NH=New Hampshire, NJ=New Jersey, NM=New Mexico, NV=Nevada, NY=New York, OH=Ohio, OK=Oklahoma, OR=Oregon, PA=Pennsylvania, PR=Puerto Rico, RI=Rhode Island, SC=South Carolina, SD=South Dakota, ST/L=State or Local Government, TN=Tennessee, Tribe=Tribe, TX=Texas, USDA=Department of Agriculture, UT=Utah, VA=Virginia, VI=U. S. Virgin Islands, VT=Vermont, WA=Washington, WI=Wisconsin, WV=West Virginia, WY=Wyoming).
* Agency = Agency or bureau to which the unit belongs (AG=Air Guard, ANC=Alaska Native Corporation, BIA=Bureau of Indian Affairs, BLM=Bureau of Land Management, BOEM=Bureau of Ocean Energy Management, BOR=Bureau of Reclamation, BSEE=Bureau of Safety and Environmental Enforcement, C&L=County & Local, CDF=California Department of Forestry & Fire Protection, DC=Department of Corrections, DFE=Division of Forest Environment, DFF=Division of Forestry Fire & State Lands, DFL=Division of Forests and Land, DFR=Division of Forest Resources, DL=Department of Lands, DNR=Department of Natural Resources, DNRC=Department of Natural Resources and Conservation, DNRF=Department of Natural Resources Forest Service, DOA=Department of Agriculture, DOC=Department of Conservation, DOE=Department of Energy, DOF=Department of Forestry, DVF=Division of Forestry, DWF=Division of Wildland Fire, EPA=Environmental Protection Agency, FC=Forestry Commission, FEMA=Federal Emergency Management Agency, FFC=Bureau of Forest Fire Control, FFP=Forest Fire Protection, FFS=Forest Fire Service, FR=Forest Rangers, FS=Forest Service, FWS=Fish & Wildlife Service, HQ=Headquarters, JC=Job Corps, NBC=National Business Center, NG=National Guard, NNSA=National Nuclear Security Administration, NPS=National Park Service, NWS=National Weather Service, OES=Office of Emergency Services, PRI=Private, SF=State Forestry, SFS=State Forest Service, SP=State Parks, TNC=The Nature Conservancy, USA=United States Army, USACE=United States Army Corps of Engineers, USAF=United States Air Force, USGS=United States Geological Survey, USN=United States Navy).
* Parent = Agency subgroup to which the unit belongs (A concatenation of State and Unit from this report - https://www.nifc.blm.gov/unit_id/publish/UnitIdReport.rtf).
* Country = Country in which the unit is located (e.g. US = United States).
* State = Two-letter code for the state in which the unit is located (or primarily affiliated).
* Code = Unit code (follows state code to create UnitId).
* Name = Unit name.
Kaggle dataset identifier: 188-million-us-wildfires
<jupyter_script># **This notebook is an exercise in the [Pandas](https://www.kaggle.com/learn/pandas) course. You can reference the tutorial at [this link](https://www.kaggle.com/residentmario/indexing-selecting-assigning).**
# ---
# # Introduction
# In this set of exercises we will work with the [Wine Reviews dataset](https://www.kaggle.com/zynicide/wine-reviews).
# Run the following cell to load your data and some utility functions (including code to check your answers).
import pandas as pd
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
pd.set_option("display.max_rows", 5)
from learntools.core import binder
binder.bind(globals())
from learntools.pandas.indexing_selecting_and_assigning import *
print("Setup complete.")
# Look at an overview of your data by running the following line.
reviews.head()
# # Exercises
# ## 1.
# Select the `description` column from `reviews` and assign the result to the variable `desc`.
# Your code here
desc = reviews["description"]
# Check your answer
q1.check()
# Follow-up question: what type of object is `desc`? If you're not sure, you can check by calling Python's `type` function: `type(desc)`.
# q1.hint()
# q1.solution()
# ## 2.
# Select the first value from the description column of `reviews`, assigning it to variable `first_description`.
first_description = desc[0]
# Check your answer
q2.check()
first_description
# q2.hint()
# q2.solution()
# ## 3.
# Select the first row of data (the first record) from `reviews`, assigning it to the variable `first_row`.
first_row = reviews.iloc[0, :]
# Check your answer
q3.check()
first_row
# q3.hint()
# q3.solution()
# ## 4.
# Select the first 10 values from the `description` column in `reviews`, assigning the result to variable `first_descriptions`.
# Hint: format your output as a pandas Series.
first_descriptions = desc.iloc[:10]
# Check your answer
q4.check()
first_descriptions
# q4.hint()
# q4.solution()
# ## 5.
# Select the records with index labels `1`, `2`, `3`, `5`, and `8`, assigning the result to the variable `sample_reviews`.
# In other words, generate the following DataFrame:
# 
sample_reviews = reviews.iloc[[1, 2, 3, 5, 8], :]
# Check your answer
q5.check()
sample_reviews
# q5.hint()
# q5.solution()
# ## 6.
# Create a variable `df` containing the `country`, `province`, `region_1`, and `region_2` columns of the records with the index labels `0`, `1`, `10`, and `100`. In other words, generate the following DataFrame:
# 
df = reviews.iloc[[0, 1, 10, 100], :][["country", "province", "region_1", "region_2"]]
# Check your answer
q6.check()
df
# q6.hint()
# q6.solution()
# ## 7.
# Create a variable `df` containing the `country` and `variety` columns of the first 100 records.
# Hint: you may use `loc` or `iloc`. When working on the answer this question and the several of the ones that follow, keep the following "gotcha" described in the tutorial:
# > `iloc` uses the Python stdlib indexing scheme, where the first element of the range is included and the last one excluded.
# `loc`, meanwhile, indexes inclusively.
# > This is particularly confusing when the DataFrame index is a simple numerical list, e.g. `0,...,1000`. In this case `df.iloc[0:1000]` will return 1000 entries, while `df.loc[0:1000]` return 1001 of them! To get 1000 elements using `loc`, you will need to go one lower and ask for `df.iloc[0:999]`.
df = ____
# Check your answer
q7.check()
df
# q7.hint()
# q7.solution()
# ## 8.
# Create a DataFrame `italian_wines` containing reviews of wines made in `Italy`. Hint: `reviews.country` equals what?
italian_wines = reviews[reviews.country == "Italy"]
# Check your answer
q8.check()
# q8.hint()
# q8.solution()
# ## 9.
# Create a DataFrame `top_oceania_wines` containing all reviews with at least 95 points (out of 100) for wines from Australia or New Zealand.
top_oceania_wines = reviews[
((reviews.country == "Australia") | (reviews.country == "New Zealand"))
& (reviews.points >= 95)
]
# Check your answer
q9.check()
top_oceania_wines
# q9.hint()
# q9.solution()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/863/69863785.ipynb
|
188-million-us-wildfires
|
rtatman
|
[{"Id": 69863785, "ScriptId": 19098867, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4618499, "CreationDate": "08/03/2021 16:46:46", "VersionNumber": 1.0, "Title": "Exercise: Indexing, Selecting & Assigning", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 169.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 161.0, "LinesInsertedFromFork": 8.0, "LinesDeletedFromFork": 8.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 161.0, "TotalVotes": 0}]
|
[{"Id": 93310326, "KernelVersionId": 69863785, "SourceDatasetVersionId": 1151655}, {"Id": 93310318, "KernelVersionId": 69863785, "SourceDatasetVersionId": 1252}, {"Id": 93310319, "KernelVersionId": 69863785, "SourceDatasetVersionId": 3919}, {"Id": 93310322, "KernelVersionId": 69863785, "SourceDatasetVersionId": 8172}, {"Id": 93310320, "KernelVersionId": 69863785, "SourceDatasetVersionId": 4877}, {"Id": 93310321, "KernelVersionId": 69863785, "SourceDatasetVersionId": 5624}, {"Id": 93310325, "KernelVersionId": 69863785, "SourceDatasetVersionId": 466349}, {"Id": 93310324, "KernelVersionId": 69863785, "SourceDatasetVersionId": 403916}, {"Id": 93310327, "KernelVersionId": 69863785, "SourceDatasetVersionId": 1434247}, {"Id": 93310323, "KernelVersionId": 69863785, "SourceDatasetVersionId": 13206}]
|
[{"Id": 1151655, "DatasetId": 2478, "DatasourceVersionId": 1182398, "CreatorUserId": 998023, "LicenseName": "CC0: Public Domain", "CreationDate": "05/12/2020 21:03:49", "VersionNumber": 2.0, "Title": "1.88 Million US Wildfires", "Slug": "188-million-us-wildfires", "Subtitle": "24 years of geo-referenced wildfire records", "Description": "### Context: \n\nThis data publication contains a spatial database of wildfires that occurred in the United States from 1992 to 2015. It is the third update of a publication originally generated to support the national Fire Program Analysis (FPA) system. The wildfire records were acquired from the reporting systems of federal, state, and local fire organizations. The following core data elements were required for records to be included in this data publication: discovery date, final fire size, and a point location at least as precise as Public Land Survey System (PLSS) section (1-square mile grid). The data were transformed to conform, when possible, to the data standards of the National Wildfire Coordinating Group (NWCG). Basic error-checking was performed and redundant records were identified and removed, to the degree possible. The resulting product, referred to as the Fire Program Analysis fire-occurrence database (FPA FOD), includes 1.88 million geo-referenced wildfire records, representing a total of 140 million acres burned during the 24-year period.\n\n### Content: \n\nThis dataset is an SQLite database that contains the following information:\n\n* Fires: Table including wildfire data for the period of 1992-2015 compiled from US federal, state, and local reporting systems.\n* FOD_ID = Global unique identifier.\n* FPA_ID = Unique identifier that contains information necessary to track back to the original record in the source dataset.\n* SOURCE_SYSTEM_TYPE = Type of source database or system that the record was drawn from (federal, nonfederal, or interagency).\n* SOURCE_SYSTEM = Name of or other identifier for source database or system that the record was drawn from. See Table 1 in Short (2014), or \\Supplements\\FPA_FOD_source_list.pdf, for a list of sources and their identifier.\n* NWCG_REPORTING_AGENCY = Active National Wildlife Coordinating Group (NWCG) Unit Identifier for the agency preparing the fire report (BIA = Bureau of Indian Affairs, BLM = Bureau of Land Management, BOR = Bureau of Reclamation, DOD = Department of Defense, DOE = Department of Energy, FS = Forest Service, FWS = Fish and Wildlife Service, IA = Interagency Organization, NPS = National Park Service, ST/C&L = State, County, or Local Organization, and TRIBE = Tribal Organization).\n* NWCG_REPORTING_UNIT_ID = Active NWCG Unit Identifier for the unit preparing the fire report.\n* NWCG_REPORTING_UNIT_NAME = Active NWCG Unit Name for the unit preparing the fire report.\n* SOURCE_REPORTING_UNIT = Code for the agency unit preparing the fire report, based on code/name in the source dataset.\n* SOURCE_REPORTING_UNIT_NAME = Name of reporting agency unit preparing the fire report, based on code/name in the source dataset.\n* LOCAL_FIRE_REPORT_ID = Number or code that uniquely identifies an incident report for a particular reporting unit and a particular calendar year.\n* LOCAL_INCIDENT_ID = Number or code that uniquely identifies an incident for a particular local fire management organization within a particular calendar year.\n* FIRE_CODE = Code used within the interagency wildland fire community to track and compile cost information for emergency fire suppression (https://www.firecode.gov/).\n* FIRE_NAME = Name of the incident, from the fire report (primary) or ICS-209 report (secondary).\n* ICS_209_INCIDENT_NUMBER = Incident (event) identifier, from the ICS-209 report.\n* ICS_209_NAME = Name of the incident, from the ICS-209 report.\n* MTBS_ID = Incident identifier, from the MTBS perimeter dataset.\n* MTBS_FIRE_NAME = Name of the incident, from the MTBS perimeter dataset.\n* COMPLEX_NAME = Name of the complex under which the fire was ultimately managed, when discernible.\n* FIRE_YEAR = Calendar year in which the fire was discovered or confirmed to exist.\n* DISCOVERY_DATE = Date on which the fire was discovered or confirmed to exist.\n* DISCOVERY_DOY = Day of year on which the fire was discovered or confirmed to exist.\n* DISCOVERY_TIME = Time of day that the fire was discovered or confirmed to exist.\n* STAT_CAUSE_CODE = Code for the (statistical) cause of the fire.\n* STAT_CAUSE_DESCR = Description of the (statistical) cause of the fire.\n* CONT_DATE = Date on which the fire was declared contained or otherwise controlled (mm/dd/yyyy where mm=month, dd=day, and yyyy=year).\n* CONT_DOY = Day of year on which the fire was declared contained or otherwise controlled.\n* CONT_TIME = Time of day that the fire was declared contained or otherwise controlled (hhmm where hh=hour, mm=minutes).\n* FIRE_SIZE = Estimate of acres within the final perimeter of the fire.\n* FIRE_SIZE_CLASS = Code for fire size based on the number of acres within the final fire perimeter expenditures (A=greater than 0 but less than or equal to 0.25 acres, B=0.26-9.9 acres, C=10.0-99.9 acres, D=100-299 acres, E=300 to 999 acres, F=1000 to 4999 acres, and G=5000+ acres).\n* LATITUDE = Latitude (NAD83) for point location of the fire (decimal degrees).\n* LONGITUDE = Longitude (NAD83) for point location of the fire (decimal degrees).\n* OWNER_CODE = Code for primary owner or entity responsible for managing the land at the point of origin of the fire at the time of the incident.\n* OWNER_DESCR = Name of primary owner or entity responsible for managing the land at the point of origin of the fire at the time of the incident.\n* STATE = Two-letter alphabetic code for the state in which the fire burned (or originated), based on the nominal designation in the fire report.\n* COUNTY = County, or equivalent, in which the fire burned (or originated), based on nominal designation in the fire report.\n* FIPS_CODE = Three-digit code from the Federal Information Process Standards (FIPS) publication 6-4 for representation of counties and equivalent entities.\n* FIPS_NAME = County name from the FIPS publication 6-4 for representation of counties and equivalent entities.\n* NWCG_UnitIDActive_20170109: Look-up table containing all NWCG identifiers for agency units that were active (i.e., valid) as of 9 January 2017, when the list was downloaded from https://www.nifc.blm.gov/unit_id/Publish.html and used as the source of values available to populate the following fields in the Fires table: NWCG_REPORTING_AGENCY, NWCG_REPORTING_UNIT_ID, and NWCG_REPORTING_UNIT_NAME.\n* UnitId = NWCG Unit ID.\n* GeographicArea = Two-letter code for the geographic area in which the unit is located (NA=National, IN=International, AK=Alaska, CA=California, EA=Eastern Area, GB=Great Basin, NR=Northern Rockies, NW=Northwest, RM=Rocky Mountain, SA=Southern Area, and SW=Southwest).\n* Gacc = Seven or eight-letter code for the Geographic Area Coordination Center in which the unit is located or primarily affiliated with (CAMBCIFC=Canadian Interagency Forest Fire Centre, USAKCC=Alaska Interagency Coordination Center, USCAONCC=Northern California Area Coordination Center, USCAOSCC=Southern California Coordination Center, USCORMCC=Rocky Mountain Area Coordination Center, USGASAC=Southern Area Coordination Center, USIDNIC=National Interagency Coordination Center, USMTNRC=Northern Rockies Coordination Center, USNMSWC=Southwest Area Coordination Center, USORNWC=Northwest Area Coordination Center, USUTGBC=Western Great Basin Coordination Center, USWIEACC=Eastern Area Coordination Center).\n* WildlandRole = Role of the unit within the wildland fire community.\n* UnitType = Type of unit (e.g., federal, state, local).\n* Department = Department (or state/territory) to which the unit belongs (AK=Alaska, AL=Alabama, AR=Arkansas, AZ=Arizona, CA=California, CO=Colorado, CT=Connecticut, DE=Delaware, DHS=Department of Homeland Security, DOC= Department of Commerce, DOD=Department of Defense, DOE=Department of Energy, DOI= Department of Interior, DOL=Department of Labor, FL=Florida, GA=Georgia, IA=Iowa, IA/GC=Non-Departmental Agencies, ID=Idaho, IL=Illinois, IN=Indiana, KS=Kansas, KY=Kentucky, LA=Louisiana, MA=Massachusetts, MD=Maryland, ME=Maine, MI=Michigan, MN=Minnesota, MO=Missouri, MS=Mississippi, MT=Montana, NC=North Carolina, NE=Nebraska, NG=Non-Government, NH=New Hampshire, NJ=New Jersey, NM=New Mexico, NV=Nevada, NY=New York, OH=Ohio, OK=Oklahoma, OR=Oregon, PA=Pennsylvania, PR=Puerto Rico, RI=Rhode Island, SC=South Carolina, SD=South Dakota, ST/L=State or Local Government, TN=Tennessee, Tribe=Tribe, TX=Texas, USDA=Department of Agriculture, UT=Utah, VA=Virginia, VI=U. S. Virgin Islands, VT=Vermont, WA=Washington, WI=Wisconsin, WV=West Virginia, WY=Wyoming).\n* Agency = Agency or bureau to which the unit belongs (AG=Air Guard, ANC=Alaska Native Corporation, BIA=Bureau of Indian Affairs, BLM=Bureau of Land Management, BOEM=Bureau of Ocean Energy Management, BOR=Bureau of Reclamation, BSEE=Bureau of Safety and Environmental Enforcement, C&L=County & Local, CDF=California Department of Forestry & Fire Protection, DC=Department of Corrections, DFE=Division of Forest Environment, DFF=Division of Forestry Fire & State Lands, DFL=Division of Forests and Land, DFR=Division of Forest Resources, DL=Department of Lands, DNR=Department of Natural Resources, DNRC=Department of Natural Resources and Conservation, DNRF=Department of Natural Resources Forest Service, DOA=Department of Agriculture, DOC=Department of Conservation, DOE=Department of Energy, DOF=Department of Forestry, DVF=Division of Forestry, DWF=Division of Wildland Fire, EPA=Environmental Protection Agency, FC=Forestry Commission, FEMA=Federal Emergency Management Agency, FFC=Bureau of Forest Fire Control, FFP=Forest Fire Protection, FFS=Forest Fire Service, FR=Forest Rangers, FS=Forest Service, FWS=Fish & Wildlife Service, HQ=Headquarters, JC=Job Corps, NBC=National Business Center, NG=National Guard, NNSA=National Nuclear Security Administration, NPS=National Park Service, NWS=National Weather Service, OES=Office of Emergency Services, PRI=Private, SF=State Forestry, SFS=State Forest Service, SP=State Parks, TNC=The Nature Conservancy, USA=United States Army, USACE=United States Army Corps of Engineers, USAF=United States Air Force, USGS=United States Geological Survey, USN=United States Navy).\n* Parent = Agency subgroup to which the unit belongs (A concatenation of State and Unit from this report - https://www.nifc.blm.gov/unit_id/publish/UnitIdReport.rtf).\n* Country = Country in which the unit is located (e.g. US = United States).\n* State = Two-letter code for the state in which the unit is located (or primarily affiliated).\n* Code = Unit code (follows state code to create UnitId).\n* Name = Unit name.\n\n### Acknowledgements: \n\nThese data were collected using funding from the U.S. Government and can be used without additional permissions or fees. If you use these data in a publication, presentation, or other research product please use the following citation:\n\nShort, Karen C. 2017. Spatial wildfire occurrence data for the United States, 1992-2015 [FPA_FOD_20170508]. 4th Edition. Fort Collins, CO: Forest Service Research Data Archive. https://doi.org/10.2737/RDS-2013-0009.4\n\n### Inspiration: \n\n* Have wildfires become more or less frequent over time?\n* What counties are the most and least fire-prone?\n* Given the size, location and date, can you predict the cause of a fire wildfire?", "VersionNotes": "Refresh data", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2478, "CreatorUserId": 1162990, "OwnerUserId": 1162990.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1151655.0, "CurrentDatasourceVersionId": 1182398.0, "ForumId": 6555, "Type": 2, "CreationDate": "09/13/2017 22:41:53", "LastActivityDate": "02/05/2018", "TotalViews": 321722, "TotalDownloads": 26743, "TotalVotes": 1192, "TotalKernels": 2236}]
|
[{"Id": 1162990, "UserName": "rtatman", "DisplayName": "Rachael Tatman", "RegisterDate": "07/10/2017", "PerformanceTier": 4}]
|
# **This notebook is an exercise in the [Pandas](https://www.kaggle.com/learn/pandas) course. You can reference the tutorial at [this link](https://www.kaggle.com/residentmario/indexing-selecting-assigning).**
# ---
# # Introduction
# In this set of exercises we will work with the [Wine Reviews dataset](https://www.kaggle.com/zynicide/wine-reviews).
# Run the following cell to load your data and some utility functions (including code to check your answers).
import pandas as pd
reviews = pd.read_csv("../input/wine-reviews/winemag-data-130k-v2.csv", index_col=0)
pd.set_option("display.max_rows", 5)
from learntools.core import binder
binder.bind(globals())
from learntools.pandas.indexing_selecting_and_assigning import *
print("Setup complete.")
# Look at an overview of your data by running the following line.
reviews.head()
# # Exercises
# ## 1.
# Select the `description` column from `reviews` and assign the result to the variable `desc`.
# Your code here
desc = reviews["description"]
# Check your answer
q1.check()
# Follow-up question: what type of object is `desc`? If you're not sure, you can check by calling Python's `type` function: `type(desc)`.
# q1.hint()
# q1.solution()
# ## 2.
# Select the first value from the description column of `reviews`, assigning it to variable `first_description`.
first_description = desc[0]
# Check your answer
q2.check()
first_description
# q2.hint()
# q2.solution()
# ## 3.
# Select the first row of data (the first record) from `reviews`, assigning it to the variable `first_row`.
first_row = reviews.iloc[0, :]
# Check your answer
q3.check()
first_row
# q3.hint()
# q3.solution()
# ## 4.
# Select the first 10 values from the `description` column in `reviews`, assigning the result to variable `first_descriptions`.
# Hint: format your output as a pandas Series.
first_descriptions = desc.iloc[:10]
# Check your answer
q4.check()
first_descriptions
# q4.hint()
# q4.solution()
# ## 5.
# Select the records with index labels `1`, `2`, `3`, `5`, and `8`, assigning the result to the variable `sample_reviews`.
# In other words, generate the following DataFrame:
# 
sample_reviews = reviews.iloc[[1, 2, 3, 5, 8], :]
# Check your answer
q5.check()
sample_reviews
# q5.hint()
# q5.solution()
# ## 6.
# Create a variable `df` containing the `country`, `province`, `region_1`, and `region_2` columns of the records with the index labels `0`, `1`, `10`, and `100`. In other words, generate the following DataFrame:
# 
df = reviews.iloc[[0, 1, 10, 100], :][["country", "province", "region_1", "region_2"]]
# Check your answer
q6.check()
df
# q6.hint()
# q6.solution()
# ## 7.
# Create a variable `df` containing the `country` and `variety` columns of the first 100 records.
# Hint: you may use `loc` or `iloc`. When working on the answer this question and the several of the ones that follow, keep the following "gotcha" described in the tutorial:
# > `iloc` uses the Python stdlib indexing scheme, where the first element of the range is included and the last one excluded.
# `loc`, meanwhile, indexes inclusively.
# > This is particularly confusing when the DataFrame index is a simple numerical list, e.g. `0,...,1000`. In this case `df.iloc[0:1000]` will return 1000 entries, while `df.loc[0:1000]` return 1001 of them! To get 1000 elements using `loc`, you will need to go one lower and ask for `df.iloc[0:999]`.
df = ____
# Check your answer
q7.check()
df
# q7.hint()
# q7.solution()
# ## 8.
# Create a DataFrame `italian_wines` containing reviews of wines made in `Italy`. Hint: `reviews.country` equals what?
italian_wines = reviews[reviews.country == "Italy"]
# Check your answer
q8.check()
# q8.hint()
# q8.solution()
# ## 9.
# Create a DataFrame `top_oceania_wines` containing all reviews with at least 95 points (out of 100) for wines from Australia or New Zealand.
top_oceania_wines = reviews[
((reviews.country == "Australia") | (reviews.country == "New Zealand"))
& (reviews.points >= 95)
]
# Check your answer
q9.check()
top_oceania_wines
# q9.hint()
# q9.solution()
| false | 1 | 1,342 | 0 | 4,369 | 1,342 |
||
69863870
|
# ## Dicas de Jupyter Notebook
# Este programa que estamos usando no navegador se chama Jupyter Notebook e é um interpretador de Python interativo. Podemos digitar um comando de Python, apertar _Shift + Enter_ e esse comando é executado, tendo seu resultado impresso na tela.
# Existem alguns atalhos bastante úteis para trabalhar com Jupyter Notebook:
# 0. 'Esc' - Saí do modo edição da célula atual (modo navegação)
# 1. 'Enter' - Edita a célula selecionada (modo navegação)
# 2. 'Shift + Enter' - Executa a célula selecionada (modo edição)
# 3. 'A' - Adiciona uma nova célula acima da célula atual (modo navegação)
# 4. 'B' - Adiciona uma nova célula abaixo da célula atual (modo navegação)
# 5. 'X' - Recorta a célula selecionada (modo navegação)
# 6. 'Z' - Desfaz a deleção de uma célula (modo navegação)
# 7. 'Ctrl + Z' - Desfaz as últimas edições (modo edição)
# 8. 'Ctrl + Shift + Z' - Refaz o último comando de desfazer (modo edição)
# 9. 'H' - Abre um arquivo de ajuda com todos os atalhos (modo navegação)
# 10. 'Tab' - Faz sugestões para completar o código (modo edição)
# 11. 'Shift + Tab' - Faz sugestões de assinatura de um método ou função (modo edição)
# 12. '?função' - Mostra a documentação de uma dada função (modo edição)
#
# # Revisão de Python
# ## Strings
# Abaixo segue uma breve revisão sobre `strings`:
texto = "machine learning python UnB"
texto.lower()
texto = texto.upper()
texto
texto[:4]
texto[10:14]
texto[::-1]
# O split gera uma lista de strings, separando a nossa string original em todos os lugares que existem espaços.
texto.split()
texto_dividido = texto.split()
texto_dividido
texto_dividido[-1]
texto_dividido[-1]
# O método `.join()` junta strings que estão em uma lista em uma única string.
# oijoaoh
" machine learning "
" Azul ".join(texto_dividido)
# O método `.replace()` serve para trocar uma parte do texto por outro texto.
texto = "THE MACHINE IS DEEP"
texto.replace("machine", "deep")
texto
texto1 = texto.replace("machine", "deep")
texto
# A função `len()` serve para nos dizer qual o tamanho do texto, isto é, quantos caracteres ele tem.
len(texto)
# **Exercício 1:**
# O DNA é composto por 4 bases: Guanine ('G'), Cytosine ('C'), Adenine ('A'), e Thymine ('T').
# O RNA é composto por 4 bases, mas em vez de Thymine ('T'), tem Uracil ('U').
# Vamos pegar um texto na forma `GCATATAC` e retornar a conversão para RNA, que seria `GCAUAUAC`.
texto = "GCATATAC"
texto.replace("T", "U")
# Link para site com vários exercícios de programação: https://www.codewars.com
pessoas = {"nome": "Luciano", "sexo": "masculino", "idade": 46}
print(pessoas)
pessoas["time"] = "Corinthians"
print(pessoas)
del pessoas["idade"]
print(pessoas)
# ## Listas e For
lista = [1, 3, 5, 7]
lista[-4]
lista_quadrado = []
for x in lista:
lista_quadrado.append(x**2)
lista
lista_quadrado
lista_quadrado[:2]
lista_quadrado[-4:]
lista_inversa = lista_quadrado[::-1]
len(lista_quadrado)
# ## Listcomp
# Uma outra forma de executar um `for` e criar uma lista ao mesmo tempo é fazendo um listcomp, que é mais rápido e mais enxuto. É uma prática muito boa em Python.
lista_quadrado2 = [numero**2 for numero in lista]
lista_quadrado2
# ## Funções
def eleva_quadrado(lista):
return [numero**2 for numero in lista]
eleva_quadrado(lista)
temperatura = [1, -2, 6, -3]
eleva_quadrado(temperatura)
# ## Comparações e Booleanos
# `False` é representado como `0`, e `True` é representado como `1`:
0 == False
1 == True
lista
lista[False]
lista[True]
# ## If e Else
# Observação: O Módulo da Divisão, ou então, Operação Módulo, é o operador que extraí o resto da divisão. É possível obter o resto da divisão de várias maneiras, porém, a linguagem Python, bem como, a maioria das linguagens de programação, disponibilizam um operador para este fim.
# O sinal de porcentagem % é o operador módulo. Ainda que não faça muito sentido, esse pode ser considerado quase que um padrão entre as linguagens de programação.
# Ao lado esquerdo, devemos colocar o número a ser dividido, e ao lado direito, o divisor.
lista = []
lista_par = []
for x in range(8):
if x % 2:
lista.append(x)
else:
lista_par.append(x)
lista
lista_par
# **Exercício 2**:
# Criar uma função que recebe uma lista de números e retorna a soma dos números não negativos:
# Exemplo: [1,-4,7,12] => 1 + 7 + 12 = 20
# Obs: se não houver nada para somar, retorne 0.
a = [1, -2, 3, 4, 5]
a[::-1]
# ## Dicionários
# O dicionário mapeia uma chave a um valor.
alfabeto = {"A": 1, "B": 2, "C": 3}
alfabeto
alfabeto["C"]
lista = [1, 2, 3]
a, b, c = lista
a
b
for k in alfabeto:
print(k)
for v in alfabeto.values():
print(v)
for k, v in alfabeto.items():
print(k, v)
len(alfabeto)
# **Exercício**
# Crie uma função que transforme uma lista de listas (a lista interna sempre com 2 elementos) em um dicionário:
# animals = [["cat", "dog"], ["duck", "cow"]] --> {"cat" : "dog", "duck" : "cow"}
# ## Base de dados
# Nesse notebook usaremos os dados disponibilizados na plataforma Kaggle sobre os sistemas de emprétimo de bicicletas, que hoje existem em diversas cidades.
# Na maioria desses sistemas, as pessoas podem alugar as bicicletas em um ponto da cidade e devolvê-las em outro, usando estações de coleta específicas. Os dados gerados por esses sistemas são interessantes para análise e trabalhos de Machine Learning uma vez que as informações sobre a duração da viagem, os pontos de partida e chegada, e o tempo gasto na viagem normalmente são registrados.
# No caso específico, são disponibilizados dados sobre o sistema de empréstimo de bicicletas da cidade de Washington, D.C., agregados com dados de clima. Os dados correspondem a empréstimos realizados durante 2 anos, separados por hora. Os dados de treino correspondem aos primeiros 19 dias de cada mês, enquanto os dados de teste vão do dia 20 até o final do mês.
# Nosso objetivo é usar Machine Learning para prever o total de bicicletas que serão alugadas a cada hora, usando como base os dados anteriores de aluguel.
# Mais detalhes sobre a base de dados -> [Bike Sharing Demand](https://www.kaggle.com/c/bike-sharing-demand)
# ## Primeira Etapa: Análise Exploratória dos Dados
# ## Importando Bibliotecas
# Para trabalharmos com dados tabulares, iremos utilizar a biblioteca `pandas`
import pandas as pd
df = pd.read_csv("../input/train.csv")
df.shape
df.head()
df.tail(7)
df.sample(5)
df.sample(7).T
df.info()
df.describe()
# ## Seleções de linhas:
# Existem algumas formas diferentes de selecionar colunas, sendo que `.loc` e `.iloc` são mais recomendadas por terem o comportamento mais previsível.
df[20:30]
df.iloc[20:30]
df.loc[20:30]
# ## Seleções de Colunas:
# Existem algumas formas de selecionar colunas. Indicamos usar `df['humidity']` porque funciona com nomes de colunas com espaço e é enxuto.
df.head()
df["temp"]
df.humidity.mean()
df.loc[10:20, "humidity"]
df.iloc[3:5, 7]
# ## Series e DataFrame
# A nossa variável `df` é um `DataFrame`:
type(df)
# Quando selecionamos uma coluna temos um objeto do tipo `Series`.
type(df["temp"])
# ## Explorando uma Series
df["datetime"].describe()
df["temp"].value_counts()
df["temp"].mode()
# **Exercício 3:**
# 1. Faça um `.describe()` na coluna `atemp`
# 2. Faça um `.value_counts()` na coluna `atemp`
# 3. Faça um `.mode()` na coluna `atemp`
# ## Selecionando Várias Colunas:
bd = df[["workingday", "humidity"]]
bd.head(3)
# **Exercício 4:**
# 1. Faça um `.describe` em uma seleção composta pelas colunas `temp` e `atemp`.
# ## Como selecionar linhas e colunas com `.loc` e `.iloc`?
df.head(3).T
df.loc[20:30, "workingday":"humidity"]
# **Exercício 5:**
# Selecione o equivalente de `df.loc[20:30, 'workingday':'humidity']` usando o comando `.iloc`:
# ## Como selecionar células com `.at` e `.iat`?
df.at[29, "humidity"]
# **Exercício 6:**
# Selecione o equivalente ao comando `df.at[20, 'humidity']` usando o comando `.iat`:
# ## Trabalhando com Datas
df.dtypes
df["datetime"] = pd.to_datetime(df["datetime"])
df.dtypes
# Alternativamente, poderíamos ter feito:
df = pd.read_csv("../input/train.csv", parse_dates=[0])
# ## Atributos de `.dt`:
# Ao transformarmos a coluna `datetime` no tipo `datetime64[ns]` temos uma série de funcionalidades quee podemos acessar por meio do atributo `.dt`:
df["datetime"].dt.month
# ## Criando novas colunas:
df["month"] = df["datetime"].dt.month
df["year"] = df["datetime"].dt.year
# **Exercício 7:**
# Crie novas colunas usando os atributos de `df['datetime'].dt` assim como fizemos no comando acima:
#
# year
# day
# dayofweek
# hour
# ## Filtros
df["month"] == 7
# Quando jogamos uma sequência de `True` e `False` dentro de um `DataFrame` ou `Series`, ela retorna apenas os valores que cumpram esse critério.
bd = df[df["month"] == 7]
bd.head()
bd = df[(df["month"] == 1) & (df["temp"] < 14)]
bd.info()
bd = df[(df["month"] == 1) | (df["temp"] < 14)]
bd.tail(5).T
df[(df["month"] == 1) & (df["temp"] < 14)].shape, df[
(df["month"] == 1) | (df["temp"] < 14)
].shape
# **Exercício 8:**
# Selecione os valores maiores do que a mediana para a coluna `temp` e maiores do que os maiores 25% da coluna `count`.
# # Avaliando Número de Valores Diferentes por Coluna
df.nunique()
# **Exercício 9:**
# 1. Faça um `.value_counts()` na coluna `season` e compare com os valores da célula acima.
# ## Histogramas e Boxplot
# Para avaliarmos a distribuição dos valores podemos usar os gráficos abaixo:
df["temp"].hist()
df["temp"].describe()
df["temp"].plot.box()
# **Exercício 10:**
#
# 1. Faça um histograma da coluna `humidity`
# 2. Faça um boxplot da coluna `humidity`
# ## Usando o Seaborn
# O `seaborn` tem uma documentação bem legal, vale a pena conferir:
# https://seaborn.pydata.org/tutorial/distributions.html
import seaborn as sns
sns.distplot(df["temp"], bins=10)
sns.boxplot(y="temp", data=df)
# Podemos usar dados categóricos na coluna `x` para fazermos gráficos como o abaixo:
sns.boxplot(y="temp", x="season", data=df)
# Outro plot bem legal é o gráfico de violino, que mostra um `boxplot` e a distribuição dos valores:
sns.boxplot(y="temp", x="season", data=df, hue="weather")
sns.violinplot(y="temp", data=df)
sns.violinplot(y="temp", x="season", data=df)
sns.violinplot(y="temp", x="season", data=df)
sns.violinplot(y="temp", x="season", data=df, hue="weather")
# **Exercício 11:**
# 1. Faça um boxplot usando as variáveis `humidity` e `weather`.
# 2. Faça um violinplot usando as variáveis `humidity` e `weather`.
# ## Group By
# O pandas tem um `.groupby` similar ao do `SQL`:
df.head(5)
df.groupby("workingday")["count"].mean()
df.groupby("workingday")["count"].mean().plot.bar()
sns.distplot(df["temp"], bins=10)
# **Exercício 12:**
# 1. Agrupe as visitas por `month` e tire a mediana da coluna `count`.
# (Dica: Existe um método chamado `.median()` que segue o mesmo funcionamento do `.mean()`
# ## Plotando Gráficos Similares no Seaborn
# O `barplot` do `seaborn` faz algo muito parecido ao que fizemos acima com o `.groupby()`:
import pandas as pd
import seaborn as sns
df = pd.read_csv("../input/train.csv")
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
sns.distplot(df["temp"], bins=10)
sns.barplot(y="count", x="workingday", data=df)
sns.barplot(y="count", x="season", data=df)
# Temos a opção de adicionar o parâmetro `hue='workingday'` para avaliarmos uma terceira variável no mesmo gráfico:
sns.barplot(y="count", x="season", hue="workingday", data=df)
# **Exercício 13:**
# 1. Plote um gráfico de barras usando as variáveis `casual` e `weather`
# ## Fazendo Análise Exploratória de Grupos
# Uma forma bem legal de avaliar os grupos é fazendo um `.describe()` após um `.groupby()`:
df.groupby("month")["count"].describe()
df.groupby("month")["count"].describe()["mean"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["std"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["mean"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["50%"].sort_index(ascending=False).plot()
# **Exercício 14:**
# 1. Plote um gráfico de linha com os valores máximos por mês
# 2. Plote um gráfico de linha com os valores mínimos por mês
# ## Variáveis Contínuas
# Um gráfico interessante para variáveis contínuas é o `pairplot`:
sns.pairplot(x_vars="temp", y_vars="count", data=df, size=7)
# Podemos criar um código de cores de acordo com a coluna `season`:
sns.pairplot(x_vars="temp", y_vars="count", data=df, hue="season", size=7)
# Podemos traçar regressões lineares com o parâmetro `kind=reg`:
sns.pairplot(
x_vars="humidity", y_vars="count", data=df, hue="season", size=7, kind="reg"
)
# **Exercício 15:**
# 1. Faça um `pairplot` entre `temp` e `casual`, usando `hue='holiday'`.
# 2. Adicione o parâmetro `kind='reg'`
# ## Correlação
# Podemos calcular a correlação passando o método `.corr()` para um `DataFrame`:
df[["humidity", "count"]].corr()
# Podemos inclusive fazer isso após um `.groupby()`:
df.groupby("season")[["humidity", "count"]].corr()
df.corr()
sns.heatmap(df.corr())
# **Exercício 16:**
#
# 1. Calcule a correlação entre `temp` e `casual`.
# 2. Calcule a correlação entre `temp` e `casual` agrupando por `holiday`
# ## Sort
# Podemos ordenar o índice:
df.sort_index()
df.sort_index(inplace=True)
# Ou uma coluna:
df.sort_values(by="count", ascending=False)
# Ou um conjunto de colunas:
df.sort_values(["count", "registered"])
# **Exercício 17:**
# 1. Retorne um DataFrame ordenado por `humidity` e `weather`
# ## Shift
# O `.shift()` serve para deslocar uma coluna para cima ou para baixo em relação a uma referência horizontal.
df["count"].shift(1)
df["last_count_1"] = df["count"].shift(1)
df.T
for i in range(1, 6):
df["last_count_" + str(i)] = df["count"].shift(i)
df
# **Exercício 17:**
# 1. Crie um `for` para criar as colunas `last_registered_1` a `last_registered_5` a partir da coluna `registered`
# ## Lidando com NaNs
# Agora temos valores em branco no nosso `DataFrame`:
df.info()
# Podemos remover eles:
df.dropna()
# Ou podemos preenche-los:
df.fillna(-1)
# **Exercício 18:**
# 1. Use o `.fillna(-1)` com o parâmetro `inplace=True`
# ## Categorias
# O tipo de dados `Category` é muito útil para reduzir a utilização de espaço em disco e ao mesmo tempo facilitar a visualização e compreensão dos dados.
# Ele pega textos / objetos e guarda isso na memória em forma de número e cria um mapa desses números com os textos / objetos correspondentes.
# Ao mostrarmos um gráfico dos meses agrupados, não temos os nomes dos meses no eixo y.
df.groupby("month")["count"].mean().plot.barh()
# Podemos resolver isso transformando em uma coluna categórica.
df.info()
df["month"] = df["month"].astype("category")
# Temos dois atributos importantes, `.categories` que é o mapa dos códigos:
df["month"].cat.categories
# E temos o `.codes` que é o código que está sendo usado para a representação interna:
df["month"].cat.codes
# Podemos acessar direto o `.cat.categories` e salvar os meses por cima para mudar o mapa:
df["month"].cat.categories = [
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro",
]
df.groupby("month")["count"].mean().plot.barh()
# Se quisermos ordernar os mapas, temos que usar o `.cat.as_ordered(True)` para dizer ao `pandas` que essa categoria é ordenável na ordem que está:
df["month"].cat.as_ordered(True)
df.groupby("month")["count"].mean().sort_index(ascending=False).plot.barh()
# **Exercício 19:**
#
# 1) Faça com a coluna `season` o mesmo que foi feito com a coluna `month`, isto é, transforme ela em uma coluna com tipo de dados categórico.
# Lembre-se que os códigos correspondem a:
# 1. Primavera
# 2. Verão
# 3. Outono
# 4. Inverno
#
# 2) Plote um gráfico de barras com a temperatura média por `season`.
# ## Resample
# Vamos mudar o índice do nosso `DataFrame` para podermos usar o `.resample()`:
df.set_index("datetime", inplace=True)
# Vamos olhar como está o nosso `DataFrame`:
df.head()
# Alguns dos comportamentos do `.resample()` funcionam de forma muito parecida com um `.groupby()` para datas:
df.resample("M")["count"].mean()
df.resample("M")["count"].mean().plot.barh(figsize=(20, 10))
df.resample("2M")["count"].mean().plot.barh()
df.resample("Q")["count"].mean().plot.barh()
df.resample("Y")["count"].mean().plot.barh()
# **Exercício 20:**
# 1. Crie um resample semestral.
# 2. Plote um gráfico de barras do resultado do passo anterior.
# ## Transform
# O `.transform()` serve para juntar resultados de uma agregação a tabela original:
df["count_mean_mo"] = df.groupby(["month"])["count"].transform("mean")
df["media_mensal"] = df.groupby(["month"])["count"].transform("mean")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/863/69863870.ipynb
| null | null |
[{"Id": 69863870, "ScriptId": 18948480, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7946510, "CreationDate": "08/03/2021 16:47:21", "VersionNumber": 2.0, "Title": "UnB_GPP_Lab1_Python_Pandas_Exercicio", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 694.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 688.0, "LinesInsertedFromFork": 19.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 675.0, "TotalVotes": 0}]
| null | null | null | null |
# ## Dicas de Jupyter Notebook
# Este programa que estamos usando no navegador se chama Jupyter Notebook e é um interpretador de Python interativo. Podemos digitar um comando de Python, apertar _Shift + Enter_ e esse comando é executado, tendo seu resultado impresso na tela.
# Existem alguns atalhos bastante úteis para trabalhar com Jupyter Notebook:
# 0. 'Esc' - Saí do modo edição da célula atual (modo navegação)
# 1. 'Enter' - Edita a célula selecionada (modo navegação)
# 2. 'Shift + Enter' - Executa a célula selecionada (modo edição)
# 3. 'A' - Adiciona uma nova célula acima da célula atual (modo navegação)
# 4. 'B' - Adiciona uma nova célula abaixo da célula atual (modo navegação)
# 5. 'X' - Recorta a célula selecionada (modo navegação)
# 6. 'Z' - Desfaz a deleção de uma célula (modo navegação)
# 7. 'Ctrl + Z' - Desfaz as últimas edições (modo edição)
# 8. 'Ctrl + Shift + Z' - Refaz o último comando de desfazer (modo edição)
# 9. 'H' - Abre um arquivo de ajuda com todos os atalhos (modo navegação)
# 10. 'Tab' - Faz sugestões para completar o código (modo edição)
# 11. 'Shift + Tab' - Faz sugestões de assinatura de um método ou função (modo edição)
# 12. '?função' - Mostra a documentação de uma dada função (modo edição)
#
# # Revisão de Python
# ## Strings
# Abaixo segue uma breve revisão sobre `strings`:
texto = "machine learning python UnB"
texto.lower()
texto = texto.upper()
texto
texto[:4]
texto[10:14]
texto[::-1]
# O split gera uma lista de strings, separando a nossa string original em todos os lugares que existem espaços.
texto.split()
texto_dividido = texto.split()
texto_dividido
texto_dividido[-1]
texto_dividido[-1]
# O método `.join()` junta strings que estão em uma lista em uma única string.
# oijoaoh
" machine learning "
" Azul ".join(texto_dividido)
# O método `.replace()` serve para trocar uma parte do texto por outro texto.
texto = "THE MACHINE IS DEEP"
texto.replace("machine", "deep")
texto
texto1 = texto.replace("machine", "deep")
texto
# A função `len()` serve para nos dizer qual o tamanho do texto, isto é, quantos caracteres ele tem.
len(texto)
# **Exercício 1:**
# O DNA é composto por 4 bases: Guanine ('G'), Cytosine ('C'), Adenine ('A'), e Thymine ('T').
# O RNA é composto por 4 bases, mas em vez de Thymine ('T'), tem Uracil ('U').
# Vamos pegar um texto na forma `GCATATAC` e retornar a conversão para RNA, que seria `GCAUAUAC`.
texto = "GCATATAC"
texto.replace("T", "U")
# Link para site com vários exercícios de programação: https://www.codewars.com
pessoas = {"nome": "Luciano", "sexo": "masculino", "idade": 46}
print(pessoas)
pessoas["time"] = "Corinthians"
print(pessoas)
del pessoas["idade"]
print(pessoas)
# ## Listas e For
lista = [1, 3, 5, 7]
lista[-4]
lista_quadrado = []
for x in lista:
lista_quadrado.append(x**2)
lista
lista_quadrado
lista_quadrado[:2]
lista_quadrado[-4:]
lista_inversa = lista_quadrado[::-1]
len(lista_quadrado)
# ## Listcomp
# Uma outra forma de executar um `for` e criar uma lista ao mesmo tempo é fazendo um listcomp, que é mais rápido e mais enxuto. É uma prática muito boa em Python.
lista_quadrado2 = [numero**2 for numero in lista]
lista_quadrado2
# ## Funções
def eleva_quadrado(lista):
return [numero**2 for numero in lista]
eleva_quadrado(lista)
temperatura = [1, -2, 6, -3]
eleva_quadrado(temperatura)
# ## Comparações e Booleanos
# `False` é representado como `0`, e `True` é representado como `1`:
0 == False
1 == True
lista
lista[False]
lista[True]
# ## If e Else
# Observação: O Módulo da Divisão, ou então, Operação Módulo, é o operador que extraí o resto da divisão. É possível obter o resto da divisão de várias maneiras, porém, a linguagem Python, bem como, a maioria das linguagens de programação, disponibilizam um operador para este fim.
# O sinal de porcentagem % é o operador módulo. Ainda que não faça muito sentido, esse pode ser considerado quase que um padrão entre as linguagens de programação.
# Ao lado esquerdo, devemos colocar o número a ser dividido, e ao lado direito, o divisor.
lista = []
lista_par = []
for x in range(8):
if x % 2:
lista.append(x)
else:
lista_par.append(x)
lista
lista_par
# **Exercício 2**:
# Criar uma função que recebe uma lista de números e retorna a soma dos números não negativos:
# Exemplo: [1,-4,7,12] => 1 + 7 + 12 = 20
# Obs: se não houver nada para somar, retorne 0.
a = [1, -2, 3, 4, 5]
a[::-1]
# ## Dicionários
# O dicionário mapeia uma chave a um valor.
alfabeto = {"A": 1, "B": 2, "C": 3}
alfabeto
alfabeto["C"]
lista = [1, 2, 3]
a, b, c = lista
a
b
for k in alfabeto:
print(k)
for v in alfabeto.values():
print(v)
for k, v in alfabeto.items():
print(k, v)
len(alfabeto)
# **Exercício**
# Crie uma função que transforme uma lista de listas (a lista interna sempre com 2 elementos) em um dicionário:
# animals = [["cat", "dog"], ["duck", "cow"]] --> {"cat" : "dog", "duck" : "cow"}
# ## Base de dados
# Nesse notebook usaremos os dados disponibilizados na plataforma Kaggle sobre os sistemas de emprétimo de bicicletas, que hoje existem em diversas cidades.
# Na maioria desses sistemas, as pessoas podem alugar as bicicletas em um ponto da cidade e devolvê-las em outro, usando estações de coleta específicas. Os dados gerados por esses sistemas são interessantes para análise e trabalhos de Machine Learning uma vez que as informações sobre a duração da viagem, os pontos de partida e chegada, e o tempo gasto na viagem normalmente são registrados.
# No caso específico, são disponibilizados dados sobre o sistema de empréstimo de bicicletas da cidade de Washington, D.C., agregados com dados de clima. Os dados correspondem a empréstimos realizados durante 2 anos, separados por hora. Os dados de treino correspondem aos primeiros 19 dias de cada mês, enquanto os dados de teste vão do dia 20 até o final do mês.
# Nosso objetivo é usar Machine Learning para prever o total de bicicletas que serão alugadas a cada hora, usando como base os dados anteriores de aluguel.
# Mais detalhes sobre a base de dados -> [Bike Sharing Demand](https://www.kaggle.com/c/bike-sharing-demand)
# ## Primeira Etapa: Análise Exploratória dos Dados
# ## Importando Bibliotecas
# Para trabalharmos com dados tabulares, iremos utilizar a biblioteca `pandas`
import pandas as pd
df = pd.read_csv("../input/train.csv")
df.shape
df.head()
df.tail(7)
df.sample(5)
df.sample(7).T
df.info()
df.describe()
# ## Seleções de linhas:
# Existem algumas formas diferentes de selecionar colunas, sendo que `.loc` e `.iloc` são mais recomendadas por terem o comportamento mais previsível.
df[20:30]
df.iloc[20:30]
df.loc[20:30]
# ## Seleções de Colunas:
# Existem algumas formas de selecionar colunas. Indicamos usar `df['humidity']` porque funciona com nomes de colunas com espaço e é enxuto.
df.head()
df["temp"]
df.humidity.mean()
df.loc[10:20, "humidity"]
df.iloc[3:5, 7]
# ## Series e DataFrame
# A nossa variável `df` é um `DataFrame`:
type(df)
# Quando selecionamos uma coluna temos um objeto do tipo `Series`.
type(df["temp"])
# ## Explorando uma Series
df["datetime"].describe()
df["temp"].value_counts()
df["temp"].mode()
# **Exercício 3:**
# 1. Faça um `.describe()` na coluna `atemp`
# 2. Faça um `.value_counts()` na coluna `atemp`
# 3. Faça um `.mode()` na coluna `atemp`
# ## Selecionando Várias Colunas:
bd = df[["workingday", "humidity"]]
bd.head(3)
# **Exercício 4:**
# 1. Faça um `.describe` em uma seleção composta pelas colunas `temp` e `atemp`.
# ## Como selecionar linhas e colunas com `.loc` e `.iloc`?
df.head(3).T
df.loc[20:30, "workingday":"humidity"]
# **Exercício 5:**
# Selecione o equivalente de `df.loc[20:30, 'workingday':'humidity']` usando o comando `.iloc`:
# ## Como selecionar células com `.at` e `.iat`?
df.at[29, "humidity"]
# **Exercício 6:**
# Selecione o equivalente ao comando `df.at[20, 'humidity']` usando o comando `.iat`:
# ## Trabalhando com Datas
df.dtypes
df["datetime"] = pd.to_datetime(df["datetime"])
df.dtypes
# Alternativamente, poderíamos ter feito:
df = pd.read_csv("../input/train.csv", parse_dates=[0])
# ## Atributos de `.dt`:
# Ao transformarmos a coluna `datetime` no tipo `datetime64[ns]` temos uma série de funcionalidades quee podemos acessar por meio do atributo `.dt`:
df["datetime"].dt.month
# ## Criando novas colunas:
df["month"] = df["datetime"].dt.month
df["year"] = df["datetime"].dt.year
# **Exercício 7:**
# Crie novas colunas usando os atributos de `df['datetime'].dt` assim como fizemos no comando acima:
#
# year
# day
# dayofweek
# hour
# ## Filtros
df["month"] == 7
# Quando jogamos uma sequência de `True` e `False` dentro de um `DataFrame` ou `Series`, ela retorna apenas os valores que cumpram esse critério.
bd = df[df["month"] == 7]
bd.head()
bd = df[(df["month"] == 1) & (df["temp"] < 14)]
bd.info()
bd = df[(df["month"] == 1) | (df["temp"] < 14)]
bd.tail(5).T
df[(df["month"] == 1) & (df["temp"] < 14)].shape, df[
(df["month"] == 1) | (df["temp"] < 14)
].shape
# **Exercício 8:**
# Selecione os valores maiores do que a mediana para a coluna `temp` e maiores do que os maiores 25% da coluna `count`.
# # Avaliando Número de Valores Diferentes por Coluna
df.nunique()
# **Exercício 9:**
# 1. Faça um `.value_counts()` na coluna `season` e compare com os valores da célula acima.
# ## Histogramas e Boxplot
# Para avaliarmos a distribuição dos valores podemos usar os gráficos abaixo:
df["temp"].hist()
df["temp"].describe()
df["temp"].plot.box()
# **Exercício 10:**
#
# 1. Faça um histograma da coluna `humidity`
# 2. Faça um boxplot da coluna `humidity`
# ## Usando o Seaborn
# O `seaborn` tem uma documentação bem legal, vale a pena conferir:
# https://seaborn.pydata.org/tutorial/distributions.html
import seaborn as sns
sns.distplot(df["temp"], bins=10)
sns.boxplot(y="temp", data=df)
# Podemos usar dados categóricos na coluna `x` para fazermos gráficos como o abaixo:
sns.boxplot(y="temp", x="season", data=df)
# Outro plot bem legal é o gráfico de violino, que mostra um `boxplot` e a distribuição dos valores:
sns.boxplot(y="temp", x="season", data=df, hue="weather")
sns.violinplot(y="temp", data=df)
sns.violinplot(y="temp", x="season", data=df)
sns.violinplot(y="temp", x="season", data=df)
sns.violinplot(y="temp", x="season", data=df, hue="weather")
# **Exercício 11:**
# 1. Faça um boxplot usando as variáveis `humidity` e `weather`.
# 2. Faça um violinplot usando as variáveis `humidity` e `weather`.
# ## Group By
# O pandas tem um `.groupby` similar ao do `SQL`:
df.head(5)
df.groupby("workingday")["count"].mean()
df.groupby("workingday")["count"].mean().plot.bar()
sns.distplot(df["temp"], bins=10)
# **Exercício 12:**
# 1. Agrupe as visitas por `month` e tire a mediana da coluna `count`.
# (Dica: Existe um método chamado `.median()` que segue o mesmo funcionamento do `.mean()`
# ## Plotando Gráficos Similares no Seaborn
# O `barplot` do `seaborn` faz algo muito parecido ao que fizemos acima com o `.groupby()`:
import pandas as pd
import seaborn as sns
df = pd.read_csv("../input/train.csv")
df["datetime"] = pd.to_datetime(df["datetime"])
df["month"] = df["datetime"].dt.month
sns.distplot(df["temp"], bins=10)
sns.barplot(y="count", x="workingday", data=df)
sns.barplot(y="count", x="season", data=df)
# Temos a opção de adicionar o parâmetro `hue='workingday'` para avaliarmos uma terceira variável no mesmo gráfico:
sns.barplot(y="count", x="season", hue="workingday", data=df)
# **Exercício 13:**
# 1. Plote um gráfico de barras usando as variáveis `casual` e `weather`
# ## Fazendo Análise Exploratória de Grupos
# Uma forma bem legal de avaliar os grupos é fazendo um `.describe()` após um `.groupby()`:
df.groupby("month")["count"].describe()
df.groupby("month")["count"].describe()["mean"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["std"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["mean"].sort_index(ascending=False).plot()
df.groupby("month")["count"].describe()["50%"].sort_index(ascending=False).plot()
# **Exercício 14:**
# 1. Plote um gráfico de linha com os valores máximos por mês
# 2. Plote um gráfico de linha com os valores mínimos por mês
# ## Variáveis Contínuas
# Um gráfico interessante para variáveis contínuas é o `pairplot`:
sns.pairplot(x_vars="temp", y_vars="count", data=df, size=7)
# Podemos criar um código de cores de acordo com a coluna `season`:
sns.pairplot(x_vars="temp", y_vars="count", data=df, hue="season", size=7)
# Podemos traçar regressões lineares com o parâmetro `kind=reg`:
sns.pairplot(
x_vars="humidity", y_vars="count", data=df, hue="season", size=7, kind="reg"
)
# **Exercício 15:**
# 1. Faça um `pairplot` entre `temp` e `casual`, usando `hue='holiday'`.
# 2. Adicione o parâmetro `kind='reg'`
# ## Correlação
# Podemos calcular a correlação passando o método `.corr()` para um `DataFrame`:
df[["humidity", "count"]].corr()
# Podemos inclusive fazer isso após um `.groupby()`:
df.groupby("season")[["humidity", "count"]].corr()
df.corr()
sns.heatmap(df.corr())
# **Exercício 16:**
#
# 1. Calcule a correlação entre `temp` e `casual`.
# 2. Calcule a correlação entre `temp` e `casual` agrupando por `holiday`
# ## Sort
# Podemos ordenar o índice:
df.sort_index()
df.sort_index(inplace=True)
# Ou uma coluna:
df.sort_values(by="count", ascending=False)
# Ou um conjunto de colunas:
df.sort_values(["count", "registered"])
# **Exercício 17:**
# 1. Retorne um DataFrame ordenado por `humidity` e `weather`
# ## Shift
# O `.shift()` serve para deslocar uma coluna para cima ou para baixo em relação a uma referência horizontal.
df["count"].shift(1)
df["last_count_1"] = df["count"].shift(1)
df.T
for i in range(1, 6):
df["last_count_" + str(i)] = df["count"].shift(i)
df
# **Exercício 17:**
# 1. Crie um `for` para criar as colunas `last_registered_1` a `last_registered_5` a partir da coluna `registered`
# ## Lidando com NaNs
# Agora temos valores em branco no nosso `DataFrame`:
df.info()
# Podemos remover eles:
df.dropna()
# Ou podemos preenche-los:
df.fillna(-1)
# **Exercício 18:**
# 1. Use o `.fillna(-1)` com o parâmetro `inplace=True`
# ## Categorias
# O tipo de dados `Category` é muito útil para reduzir a utilização de espaço em disco e ao mesmo tempo facilitar a visualização e compreensão dos dados.
# Ele pega textos / objetos e guarda isso na memória em forma de número e cria um mapa desses números com os textos / objetos correspondentes.
# Ao mostrarmos um gráfico dos meses agrupados, não temos os nomes dos meses no eixo y.
df.groupby("month")["count"].mean().plot.barh()
# Podemos resolver isso transformando em uma coluna categórica.
df.info()
df["month"] = df["month"].astype("category")
# Temos dois atributos importantes, `.categories` que é o mapa dos códigos:
df["month"].cat.categories
# E temos o `.codes` que é o código que está sendo usado para a representação interna:
df["month"].cat.codes
# Podemos acessar direto o `.cat.categories` e salvar os meses por cima para mudar o mapa:
df["month"].cat.categories = [
"Janeiro",
"Fevereiro",
"Março",
"Abril",
"Maio",
"Junho",
"Julho",
"Agosto",
"Setembro",
"Outubro",
"Novembro",
"Dezembro",
]
df.groupby("month")["count"].mean().plot.barh()
# Se quisermos ordernar os mapas, temos que usar o `.cat.as_ordered(True)` para dizer ao `pandas` que essa categoria é ordenável na ordem que está:
df["month"].cat.as_ordered(True)
df.groupby("month")["count"].mean().sort_index(ascending=False).plot.barh()
# **Exercício 19:**
#
# 1) Faça com a coluna `season` o mesmo que foi feito com a coluna `month`, isto é, transforme ela em uma coluna com tipo de dados categórico.
# Lembre-se que os códigos correspondem a:
# 1. Primavera
# 2. Verão
# 3. Outono
# 4. Inverno
#
# 2) Plote um gráfico de barras com a temperatura média por `season`.
# ## Resample
# Vamos mudar o índice do nosso `DataFrame` para podermos usar o `.resample()`:
df.set_index("datetime", inplace=True)
# Vamos olhar como está o nosso `DataFrame`:
df.head()
# Alguns dos comportamentos do `.resample()` funcionam de forma muito parecida com um `.groupby()` para datas:
df.resample("M")["count"].mean()
df.resample("M")["count"].mean().plot.barh(figsize=(20, 10))
df.resample("2M")["count"].mean().plot.barh()
df.resample("Q")["count"].mean().plot.barh()
df.resample("Y")["count"].mean().plot.barh()
# **Exercício 20:**
# 1. Crie um resample semestral.
# 2. Plote um gráfico de barras do resultado do passo anterior.
# ## Transform
# O `.transform()` serve para juntar resultados de uma agregação a tabela original:
df["count_mean_mo"] = df.groupby(["month"])["count"].transform("mean")
df["media_mensal"] = df.groupby(["month"])["count"].transform("mean")
| false | 0 | 6,097 | 0 | 6,097 | 6,097 |
||
69439171
|
<jupyter_start><jupyter_text>Stanford Dogs Dataset (Train/test)
### Context
Modified version of Jessica Li's dataset, where I made some image processing operations. I cropped the images to have the dog in the center of the picture. All the images should have the same resolution.
### Content
You'll find here a training folder with 120 folders corresponding to the 120 breeds and images of the corresponding dog breed inside and a testing folder structured in a the same manner.
Kaggle dataset identifier: stanford-dogs-dataset-traintest
<jupyter_script>import numpy as np
import pandas as pd
import tensorflow as tf
import os
from os import listdir
import cv2
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import shutil
shutil.copytree(
"/kaggle/input/stanford-dogs-dataset-traintest/cropped/train/", "/dev/shm/train"
)
import numpy as np
import pandas as pd
import tensorflow as tf
import os
from os import listdir
import cv2
from sklearn.model_selection import train_test_split
from tqdm import tqdm
train_path = "/dev/shm/train/"
label_dict = {k: i for i, k in enumerate(os.listdir(train_path))}
train_file_paths = []
train_labels = []
for label in os.listdir(train_path):
for file in os.listdir(train_path + label):
train_file_path = train_path + label + "/{}".format(file)
train_file_paths.append(train_file_path)
train_labels.append(label_dict[label])
from tqdm import tqdm
test_path = "/kaggle/input/stanford-dogs-dataset-traintest/cropped/test/"
# label_dict = {k: i for i, k in enumerate(os.listdir(test_path))}
test_file_paths = []
test_labels = []
for label in os.listdir(test_path):
for file in os.listdir(test_path + label):
test_file_path = test_path + label + "/{}".format(file)
test_file_paths.append(test_file_path)
test_labels.append(label_dict[label])
rng = tf.random.Generator.from_seed(2434, alg="philox")
BATCH_SIZE = 5
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
def read_image(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_png(image, channels=3)
scale = tf.random.uniform([1], minval=1, maxval=3)
new_size = tf.cast(
tf.constant([IMAGE_HEIGHT, IMAGE_WIDTH], dtype=tf.float32) * scale, tf.int32
)
image0 = tf.image.resize(image, new_size)
seed = rng.make_seeds(2)[0]
image0 = tf.image.random_crop(image0, [IMAGE_HEIGHT, IMAGE_HEIGHT, 3])
image0 = tf.image.stateless_random_flip_left_right(image0, seed)
image0 = tf.image.stateless_random_saturation(image0, 0.5, 1.0, seed)
image0 = tf.image.stateless_random_hue(image0, 0.2, seed)
image0 = tf.image.stateless_random_brightness(image0, 0.4, seed)
scale = tf.random.uniform([1], minval=1, maxval=5)
new_size = tf.cast(
tf.constant([IMAGE_HEIGHT, IMAGE_WIDTH], dtype=tf.float32) * scale, tf.int32
)
image1 = tf.image.resize(image, new_size)
seed = rng.make_seeds(2)[0]
image1 = tf.image.random_crop(image1, [IMAGE_HEIGHT, IMAGE_HEIGHT, 3])
image1 = tf.image.stateless_random_flip_left_right(image1, seed)
image1 = tf.image.stateless_random_saturation(image1, 0.5, 1.0, seed)
image1 = tf.image.stateless_random_hue(image1, 0.1, seed)
image1 = tf.image.stateless_random_brightness(image1, 0.4, seed)
return image0, image1, label
dev_paths, val_paths, dev_labels, val_labels = train_test_split(
train_file_paths, train_labels, test_size=0.2, random_state=42
)
BATCH_SIZE = 5
SHUFFLE_BUFFER_SIZE = len(dev_labels)
dev_dataset = tf.data.Dataset.from_tensor_slices(
(train_file_paths, train_labels)
).shuffle(SHUFFLE_BUFFER_SIZE)
for image0, image1, label in (
dev_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
break
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, BATCH_SIZE, figsize=(20, 4))
axs = axs.flatten()
for i in range(BATCH_SIZE):
axs[i].imshow(image0.numpy()[i].astype(np.uint8))
axs[i + BATCH_SIZE].imshow(image1.numpy()[i].astype(np.uint8))
class Encoder(tf.keras.Model):
def __init__(self, cnn, projection_dim):
super(Encoder, self).__init__()
self.cnn = cnn
self.dense = tf.keras.layers.Dense(128, activation="relu")
self.top = tf.keras.layers.Dense(projection_dim)
def call(self, X, training=False):
X = self.cnn(X)
X = self.dense(X)
pred = self.top(X)
return pred
class SimpleMoCo(tf.keras.Model):
def __init__(self, encoder, momentum_encoder, projection_dim, K=2**12, m=0.99):
super(SimpleMoCo, self).__init__()
self.K = K
self.m = m
self.i = 0
self.encoder = encoder
self.momentum_encoder = momentum_encoder
self.memory_bank = tf.Variable(
tf.random.normal((K, projection_dim)), trainable=False
)
def call(self, X0, X1, training=False):
Q = self.encoder(X0)
X1 = self.momentum_encoder(X1)
K = tf.stop_gradient(tf.concat([X1, self.memory_bank], axis=0))
target = tf.concat([tf.eye(len(X0)), tf.zeros((len(X0), self.K))], axis=1)
pred = tf.matmul(Q, K, transpose_b=True)
return pred, X1
def adjust(self, K):
for layer_e, layer_m in zip(self.encoder.layers, self.momentum_encoder.layers):
new_weights = []
for weight_e, weight_m in zip(layer_e.get_weights(), layer_m.get_weights()):
new_weights.append(self.m * weight_e + (1 - self.m) * weight_m)
layer_m.set_weights(new_weights)
ixs = (np.arange(len(K)) + self.i) % self.K
self.memory_bank.assign(
tf.tensor_scatter_nd_update(self.memory_bank, ixs.reshape(-1, 1), K)
)
self.i += self.K
from tensorflow.keras.applications import EfficientNetB3
cnn0 = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for layer in cnn0.layers:
layer.trainable = False
cnn0.compile()
cnn1 = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for layer in cnn0.layers:
layer.trainable = False
cnn1.compile()
encoder = Encoder(cnn0, 128)
momentum_encoder = Encoder(cnn1, 128)
model = SimpleMoCo(encoder, momentum_encoder, 128, K=2**12, m=0.999)
NUM_EPOCH = 10
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = len(dev_labels)
dev_dataset = tf.data.Dataset.from_tensor_slices(
(train_file_paths, train_labels)
).shuffle(SHUFFLE_BUFFER_SIZE)
from adabelief_tf import AdaBeliefOptimizer
from tensorflow_addons.metrics import F1Score
from tqdm import tqdm
optimizer = AdaBeliefOptimizer(
learning_rate=0.01, weight_decay=0, epsilon=1e-14, print_change_log=False
)
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
dev_loss = tf.keras.metrics.Mean(name="dev_loss")
@tf.function
def train_step(X0, X1):
y = tf.concat([tf.eye(len(X0)), tf.zeros((len(X0), model.K))], axis=1)
with tf.GradientTape() as tape:
logit, K = model.call(X0, X1, training=True)
loss = loss_object(y, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
dev_loss.update_state(loss)
return K
with tqdm(total=NUM_EPOCH) as pbar:
for epoch in range(NUM_EPOCH):
step = 0
for X0, X1, label in (
dev_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
X0 = tf.cast(X0, "float32")
X1 = tf.cast(X1, "float32")
K = train_step(X0, X1)
model.adjust(K)
learning_text = "[{}/{}] ".format(
str(step).zfill(4), len(dev_dataset) // BATCH_SIZE
)
progress_text = "dev | Loss: {:.5f} ".format(dev_loss.result().numpy())
pbar.set_postfix_str(learning_text + progress_text)
step += 1
dev_loss.reset_states()
pbar.update(1)
def read_image(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_png(image, channels=3)
return image, label
test_dataset = tf.data.Dataset.from_tensor_slices((test_file_paths, test_labels))
model_image_embeddings = []
cnn_image_embeddings = []
labels = []
cnn = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for X0, label in tqdm(
test_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
X0 = tf.cast(X0, "float32")
model_image_embeddings.append(encoder(X0).numpy())
cnn_image_embeddings.append(cnn(X0).numpy())
labels.append(label.numpy())
model_image_embeddings = np.vstack(model_image_embeddings)
model_image_embeddings = model_image_embeddings / np.linalg.norm(
model_image_embeddings, ord=2, axis=1, keepdims=True
)
cnn_image_embeddings = np.vstack(cnn_image_embeddings)
labels = np.concatenate(labels)
import umap
ixs = np.arange(len(model_image_embeddings))
np.random.shuffle(ixs)
mapper = umap.UMAP(n_neighbors=15, n_components=2, metric="cosine", verbose=True).fit(
model_image_embeddings[ixs]
)
image_umap_embeddings = mapper.transform(model_image_embeddings)
import seaborn as sns
ig, ax = plt.subplots(figsize=(12, 12))
sns.scatterplot(
image_umap_embeddings[:, 0],
image_umap_embeddings[:, 1],
s=5,
hue=labels,
palette="hsv",
ax=ax,
)
plt.legend()
from sklearn.decomposition import TruncatedSVD
import umap
import seaborn as sns
svd_image_embeddings = TruncatedSVD(n_components=128).fit_transform(
cnn_image_embeddings
)
ixs = np.arange(len(svd_image_embeddings))
np.random.shuffle(ixs)
mapper = umap.UMAP(n_neighbors=15, n_components=2, metric="cosine", verbose=True).fit(
svd_image_embeddings[ixs]
)
image_umap_embeddings = mapper.transform(svd_image_embeddings)
ig, ax = plt.subplots(figsize=(12, 12))
sns.scatterplot(
image_umap_embeddings[:, 0],
image_umap_embeddings[:, 1],
s=5,
hue=labels,
palette="hsv",
ax=ax,
)
plt.legend()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/439/69439171.ipynb
|
stanford-dogs-dataset-traintest
|
miljan
|
[{"Id": 69439171, "ScriptId": 18952488, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1958529, "CreationDate": "07/31/2021 03:31:51", "VersionNumber": 3.0, "Title": "moco for transfer learning", "EvaluationDate": "07/31/2021", "IsChange": false, "TotalLines": 278.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 278.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92608027, "KernelVersionId": 69439171, "SourceDatasetVersionId": 308849}]
|
[{"Id": 308849, "DatasetId": 129000, "DatasourceVersionId": 321827, "CreatorUserId": 1983780, "LicenseName": "Other (specified in description)", "CreationDate": "02/28/2019 12:22:40", "VersionNumber": 1.0, "Title": "Stanford Dogs Dataset (Train/test)", "Slug": "stanford-dogs-dataset-traintest", "Subtitle": "Processed version of the raw data", "Description": "### Context\n\nModified version of Jessica Li's dataset, where I made some image processing operations. I cropped the images to have the dog in the center of the picture. All the images should have the same resolution. \n\n\n\n### Content\n\nYou'll find here a training folder with 120 folders corresponding to the 120 breeds and images of the corresponding dog breed inside and a testing folder structured in a the same manner. \n\n\n### Acknowledgements\n\nThanks To Jessica Li who posted it previously. \n\nThe original data source is found on http://vision.stanford.edu/aditya86/ImageNetDogs/ and contains additional information on the train/test splits and baseline results.\n\nIf you use this dataset in a publication, please cite the dataset on the following papers:\n\nAditya Khosla, Nityananda Jayadevaprakash, Bangpeng Yao and Li Fei-Fei. Novel dataset for Fine-Grained Image Categorization. First Workshop on Fine-Grained Visual Categorization (FGVC), IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2011. [pdf] [poster] [BibTex]\n\nSecondary:\nJ. Deng, W. Dong, R. Socher, L.-J. Li, K. Li and L. Fei-Fei, ImageNet: A Large-Scale Hierarchical Image Database. IEEE Computer Vision and Pattern Recognition (CVPR), 2009. [pdf] [BibTex]", "VersionNotes": "Initial release", "TotalCompressedBytes": 206244388.0, "TotalUncompressedBytes": 206244388.0}]
|
[{"Id": 129000, "CreatorUserId": 1983780, "OwnerUserId": 1983780.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 308849.0, "CurrentDatasourceVersionId": 321827.0, "ForumId": 139100, "Type": 2, "CreationDate": "02/28/2019 12:22:40", "LastActivityDate": "02/28/2019", "TotalViews": 19780, "TotalDownloads": 2884, "TotalVotes": 47, "TotalKernels": 14}]
|
[{"Id": 1983780, "UserName": "miljan", "DisplayName": "Miljan Stojiljkovic", "RegisterDate": "06/11/2018", "PerformanceTier": 2}]
|
import numpy as np
import pandas as pd
import tensorflow as tf
import os
from os import listdir
import cv2
from sklearn.model_selection import train_test_split
from tqdm import tqdm
import shutil
shutil.copytree(
"/kaggle/input/stanford-dogs-dataset-traintest/cropped/train/", "/dev/shm/train"
)
import numpy as np
import pandas as pd
import tensorflow as tf
import os
from os import listdir
import cv2
from sklearn.model_selection import train_test_split
from tqdm import tqdm
train_path = "/dev/shm/train/"
label_dict = {k: i for i, k in enumerate(os.listdir(train_path))}
train_file_paths = []
train_labels = []
for label in os.listdir(train_path):
for file in os.listdir(train_path + label):
train_file_path = train_path + label + "/{}".format(file)
train_file_paths.append(train_file_path)
train_labels.append(label_dict[label])
from tqdm import tqdm
test_path = "/kaggle/input/stanford-dogs-dataset-traintest/cropped/test/"
# label_dict = {k: i for i, k in enumerate(os.listdir(test_path))}
test_file_paths = []
test_labels = []
for label in os.listdir(test_path):
for file in os.listdir(test_path + label):
test_file_path = test_path + label + "/{}".format(file)
test_file_paths.append(test_file_path)
test_labels.append(label_dict[label])
rng = tf.random.Generator.from_seed(2434, alg="philox")
BATCH_SIZE = 5
IMAGE_HEIGHT = 224
IMAGE_WIDTH = 224
def read_image(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_png(image, channels=3)
scale = tf.random.uniform([1], minval=1, maxval=3)
new_size = tf.cast(
tf.constant([IMAGE_HEIGHT, IMAGE_WIDTH], dtype=tf.float32) * scale, tf.int32
)
image0 = tf.image.resize(image, new_size)
seed = rng.make_seeds(2)[0]
image0 = tf.image.random_crop(image0, [IMAGE_HEIGHT, IMAGE_HEIGHT, 3])
image0 = tf.image.stateless_random_flip_left_right(image0, seed)
image0 = tf.image.stateless_random_saturation(image0, 0.5, 1.0, seed)
image0 = tf.image.stateless_random_hue(image0, 0.2, seed)
image0 = tf.image.stateless_random_brightness(image0, 0.4, seed)
scale = tf.random.uniform([1], minval=1, maxval=5)
new_size = tf.cast(
tf.constant([IMAGE_HEIGHT, IMAGE_WIDTH], dtype=tf.float32) * scale, tf.int32
)
image1 = tf.image.resize(image, new_size)
seed = rng.make_seeds(2)[0]
image1 = tf.image.random_crop(image1, [IMAGE_HEIGHT, IMAGE_HEIGHT, 3])
image1 = tf.image.stateless_random_flip_left_right(image1, seed)
image1 = tf.image.stateless_random_saturation(image1, 0.5, 1.0, seed)
image1 = tf.image.stateless_random_hue(image1, 0.1, seed)
image1 = tf.image.stateless_random_brightness(image1, 0.4, seed)
return image0, image1, label
dev_paths, val_paths, dev_labels, val_labels = train_test_split(
train_file_paths, train_labels, test_size=0.2, random_state=42
)
BATCH_SIZE = 5
SHUFFLE_BUFFER_SIZE = len(dev_labels)
dev_dataset = tf.data.Dataset.from_tensor_slices(
(train_file_paths, train_labels)
).shuffle(SHUFFLE_BUFFER_SIZE)
for image0, image1, label in (
dev_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
break
import matplotlib.pyplot as plt
fig, axs = plt.subplots(2, BATCH_SIZE, figsize=(20, 4))
axs = axs.flatten()
for i in range(BATCH_SIZE):
axs[i].imshow(image0.numpy()[i].astype(np.uint8))
axs[i + BATCH_SIZE].imshow(image1.numpy()[i].astype(np.uint8))
class Encoder(tf.keras.Model):
def __init__(self, cnn, projection_dim):
super(Encoder, self).__init__()
self.cnn = cnn
self.dense = tf.keras.layers.Dense(128, activation="relu")
self.top = tf.keras.layers.Dense(projection_dim)
def call(self, X, training=False):
X = self.cnn(X)
X = self.dense(X)
pred = self.top(X)
return pred
class SimpleMoCo(tf.keras.Model):
def __init__(self, encoder, momentum_encoder, projection_dim, K=2**12, m=0.99):
super(SimpleMoCo, self).__init__()
self.K = K
self.m = m
self.i = 0
self.encoder = encoder
self.momentum_encoder = momentum_encoder
self.memory_bank = tf.Variable(
tf.random.normal((K, projection_dim)), trainable=False
)
def call(self, X0, X1, training=False):
Q = self.encoder(X0)
X1 = self.momentum_encoder(X1)
K = tf.stop_gradient(tf.concat([X1, self.memory_bank], axis=0))
target = tf.concat([tf.eye(len(X0)), tf.zeros((len(X0), self.K))], axis=1)
pred = tf.matmul(Q, K, transpose_b=True)
return pred, X1
def adjust(self, K):
for layer_e, layer_m in zip(self.encoder.layers, self.momentum_encoder.layers):
new_weights = []
for weight_e, weight_m in zip(layer_e.get_weights(), layer_m.get_weights()):
new_weights.append(self.m * weight_e + (1 - self.m) * weight_m)
layer_m.set_weights(new_weights)
ixs = (np.arange(len(K)) + self.i) % self.K
self.memory_bank.assign(
tf.tensor_scatter_nd_update(self.memory_bank, ixs.reshape(-1, 1), K)
)
self.i += self.K
from tensorflow.keras.applications import EfficientNetB3
cnn0 = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for layer in cnn0.layers:
layer.trainable = False
cnn0.compile()
cnn1 = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for layer in cnn0.layers:
layer.trainable = False
cnn1.compile()
encoder = Encoder(cnn0, 128)
momentum_encoder = Encoder(cnn1, 128)
model = SimpleMoCo(encoder, momentum_encoder, 128, K=2**12, m=0.999)
NUM_EPOCH = 10
BATCH_SIZE = 64
SHUFFLE_BUFFER_SIZE = len(dev_labels)
dev_dataset = tf.data.Dataset.from_tensor_slices(
(train_file_paths, train_labels)
).shuffle(SHUFFLE_BUFFER_SIZE)
from adabelief_tf import AdaBeliefOptimizer
from tensorflow_addons.metrics import F1Score
from tqdm import tqdm
optimizer = AdaBeliefOptimizer(
learning_rate=0.01, weight_decay=0, epsilon=1e-14, print_change_log=False
)
loss_object = tf.keras.losses.CategoricalCrossentropy(from_logits=True)
dev_loss = tf.keras.metrics.Mean(name="dev_loss")
@tf.function
def train_step(X0, X1):
y = tf.concat([tf.eye(len(X0)), tf.zeros((len(X0), model.K))], axis=1)
with tf.GradientTape() as tape:
logit, K = model.call(X0, X1, training=True)
loss = loss_object(y, logit)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(gradients, model.trainable_variables))
dev_loss.update_state(loss)
return K
with tqdm(total=NUM_EPOCH) as pbar:
for epoch in range(NUM_EPOCH):
step = 0
for X0, X1, label in (
dev_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
X0 = tf.cast(X0, "float32")
X1 = tf.cast(X1, "float32")
K = train_step(X0, X1)
model.adjust(K)
learning_text = "[{}/{}] ".format(
str(step).zfill(4), len(dev_dataset) // BATCH_SIZE
)
progress_text = "dev | Loss: {:.5f} ".format(dev_loss.result().numpy())
pbar.set_postfix_str(learning_text + progress_text)
step += 1
dev_loss.reset_states()
pbar.update(1)
def read_image(path, label):
image = tf.io.read_file(path)
image = tf.image.decode_png(image, channels=3)
return image, label
test_dataset = tf.data.Dataset.from_tensor_slices((test_file_paths, test_labels))
model_image_embeddings = []
cnn_image_embeddings = []
labels = []
cnn = EfficientNetB3(
weights="imagenet", include_top=False, pooling="avg", input_shape=None
)
for X0, label in tqdm(
test_dataset.map(read_image, num_parallel_calls=tf.data.AUTOTUNE)
.batch(BATCH_SIZE)
.prefetch(tf.data.AUTOTUNE)
):
X0 = tf.cast(X0, "float32")
model_image_embeddings.append(encoder(X0).numpy())
cnn_image_embeddings.append(cnn(X0).numpy())
labels.append(label.numpy())
model_image_embeddings = np.vstack(model_image_embeddings)
model_image_embeddings = model_image_embeddings / np.linalg.norm(
model_image_embeddings, ord=2, axis=1, keepdims=True
)
cnn_image_embeddings = np.vstack(cnn_image_embeddings)
labels = np.concatenate(labels)
import umap
ixs = np.arange(len(model_image_embeddings))
np.random.shuffle(ixs)
mapper = umap.UMAP(n_neighbors=15, n_components=2, metric="cosine", verbose=True).fit(
model_image_embeddings[ixs]
)
image_umap_embeddings = mapper.transform(model_image_embeddings)
import seaborn as sns
ig, ax = plt.subplots(figsize=(12, 12))
sns.scatterplot(
image_umap_embeddings[:, 0],
image_umap_embeddings[:, 1],
s=5,
hue=labels,
palette="hsv",
ax=ax,
)
plt.legend()
from sklearn.decomposition import TruncatedSVD
import umap
import seaborn as sns
svd_image_embeddings = TruncatedSVD(n_components=128).fit_transform(
cnn_image_embeddings
)
ixs = np.arange(len(svd_image_embeddings))
np.random.shuffle(ixs)
mapper = umap.UMAP(n_neighbors=15, n_components=2, metric="cosine", verbose=True).fit(
svd_image_embeddings[ixs]
)
image_umap_embeddings = mapper.transform(svd_image_embeddings)
ig, ax = plt.subplots(figsize=(12, 12))
sns.scatterplot(
image_umap_embeddings[:, 0],
image_umap_embeddings[:, 1],
s=5,
hue=labels,
palette="hsv",
ax=ax,
)
plt.legend()
| false | 0 | 3,228 | 0 | 3,360 | 3,228 |
||
69439002
|
<jupyter_start><jupyter_text>GloVe 6B
### Context
Global Vector or GloVe is an unsupervised learning algorithm for obtaining vector representations for words
### Content
Contains 4 files for 4 embedding representations.
1. glove.6B.50d.txt - 6 Billion token and 50 Features
2. glove.6B.100d.txt - 6 Billion token and 100 Features
3. glove.6B.200d.txt - 6 Billion token and 200 Features
4. glove.6B.300d.txt - 6 Billion token and 300 Features
Kaggle dataset identifier: glove6b
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from scipy import spatial
from sklearn.metrics import mean_squared_error, auc, mean_absolute_error
from sklearn.svm import SVC
from sklearn.feature_extraction.text import (
TfidfVectorizer,
TfidfTransformer,
CountVectorizer,
)
from sklearn.model_selection import cross_val_score, train_test_split, cross_val_predict
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from sklearn.metrics import mean_squared_error, roc_auc_score
import time
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.ensemble import BaggingRegressor
from sklearn.linear_model import (
LogisticRegression,
SGDRegressor,
LinearRegression,
Ridge,
)
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
sns.set_theme()
train_dir = "/kaggle/input/commonlitreadabilityprize"
train_path = os.path.join(train_dir, "train.csv")
test_path = os.path.join(train_dir, "test.csv")
glove_files = [
"glove.6B.300d.txt",
"glove.6B.200d.txt",
"glove.6B.100d.txt",
"glove.6B.50d.txt",
]
glove_path_dir = "../input/glove6b/"
sample = os.path.join(train_dir, "sample_submission.csv")
df = pd.read_csv(train_path)
df.head()
df_test = pd.read_csv(test_path)
df_sample = pd.read_csv(sample)
array = []
glove_embedding = {}
count = 0
sentence = "example sentence for word emnedding"
with open(glove_path_dir + glove_files[-1], "r") as f:
for line in f:
k = line.split()
if k[0].isalpha():
glove_embedding[k[0]] = np.asarray(k[1:], np.float32)
#
def find_closest_embeddings(embedding, embeddings_dict=glove_embedding):
return sorted(
embeddings_dict.keys(),
key=lambda word: spatial.distance.euclidean(embeddings_dict[word], embedding),
)[:10]
embed_list = find_closest_embeddings(glove_embedding["king"])
embed_list
def print_results_original(results, original):
# Convert the results to a string, and word-wrap them.
results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip()
# Convert the original to a string, and word wrap it.
original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip()
# Print the results.
print("Original ", len(original))
print("Reults ", len(results))
print("-Original-".center(70).replace(" ", "*").replace("-", " "))
print(original)
print("-Results-".center(70).replace(" ", "*").replace("-", " "))
print(results)
print("*" * 70)
from nltk import wordnet
import regex as re
from nltk import stem
stemmer = stem.PorterStemmer()
treebank = df.iloc[:, 3]
stemmed = []
for i in range(df.shape[0]):
for word in df.iloc[i, 3]:
stemmed.append(stemmer.stem(word))
results = "".join(stemmed)
if len(df.iloc[:, 3]) > len(results):
if i < 1:
print_results_original(results, df.iloc[i, 3])
df.iloc[i, 3] = results
stemmed = []
#
df.iloc[:, 3] = df.iloc[:, 3].str.replace(
"[:;.,<>/-_~`!@#$%^&*?[]{}|-+0-9]", "", regex=True
)
df.iloc[16, 3]
def plot_graph(data):
tar_s = np.unique(data["target"]).shape
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
sns.scatterplot(x=np.arange(tar_s[0]), y=data["target"], ax=ax[0])
sns.scatterplot(x=np.arange(tar_s[0]), y=data["standard_error"], ax=ax[1])
return fig, ax
# plot_graph(df)
# A outlier could easily be drop with IQR and pandas dataframe filter method
# There is one outlier in standard error at index = 0
def get_validate_data(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
return X_train, X_test, y_train, y_test
df_describe = df.describe()
Q3 = df_describe.loc["75%"]
Q1 = df_describe.loc["25%"]
IQR = Q3 - Q1
upper_iqr = Q3 + 1.5 * IQR
lower_iqr = Q1 - 1.5 * IQR
# print(f"Condition for outliers is {df['target']<= upper_iqr and df['target']<= lower_iqr}")
df["target"]
m = map(lambda x: x >= lower_iqr, df["target"])
df_upper = df.groupby("target").filter(
lambda x: (x["standard_error"] > upper_iqr["standard_error"])
)
df_lower = df.groupby("target").filter(
lambda x: (x["standard_error"] < lower_iqr["standard_error"])
)
df_upper2 = df.groupby("standard_error").filter(
lambda x: (x["target"] > upper_iqr["target"])
)
df_lower2 = df.groupby("standard_error").filter(
lambda x: (x["target"] < lower_iqr["target"])
)
df.drop(index=df_upper.index, axis=0, inplace=True)
df.drop(index=df_lower.index, axis=0, inplace=True)
df.drop(index=df_upper2.index, axis=0, inplace=True)
df.drop(index=df_lower2.index, axis=0, inplace=True)
# ploting target data after removing outlier
# plot_graph(df)
df.head(5)
df["target_std"] = np.divide(df.iloc[:, 4], df.iloc[:, 5])
df["target_std_m"] = np.subtract(df.iloc[:, 4], df.iloc[:, 5])
mean_err = np.mean(df.iloc[:, 5])
df["target_std_mean"] = np.divide(df.iloc[:, 4], mean_err)
df.head(5)
def rmse_score(model, X, y):
y_pred = model.predict(X)
score = cross_val_score(model, X, y, n_jobs=-1, cv=5)
m = mean_squared_error(y, y_pred)
print(f"Cross_val_Score for X_train prediction:\t{score}\nmean squared error:\t{m}")
return score, m
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words="english")
d = vectorizer.fit_transform(df["excerpt"])
y_train = df.iloc[:, 4]
y_train1 = df.iloc[:, 6]
y_train2 = df.iloc[:, 7]
y_train3 = df.iloc[:, 8]
X_train = d.toarray()
X_train_, X_test_, y_train, y_test = get_validate_data(X_train, y_train)
X_train1, X_test1, y_train1, y_test1 = get_validate_data(X_train, y_train1)
X_train2, X_test2, y_train2, y_test2 = get_validate_data(X_train, y_train2)
X_train3, X_test3, y_train3, y_test3 = get_validate_data(X_train, y_train3)
X_train.shape, y_train.shape
word_dict = {}
words = vectorizer.get_feature_names()
skip_word = 0
X_arr = np.array([])
for i in words:
try:
# X_arr = np.append(X_arr, glove_embedding[i])
word_dict[i] = glove_embedding[i]
except KeyError:
skip_word += 1
print("Skip word\t%s" % skip_word)
print("len of word dict\t%s" % len(word_dict))
print("X array shape\t%s" % X_arr.shape)
# word_array = np.array(list(word_dict.values()))
"""
It is slow model with accuracy of (rmse ) is 0.75, which is not good to enough to get good rank under 200
"""
from sklearn.gaussian_process import GaussianProcessRegressor
model = GaussianProcessRegressor(n_restarts_optimizer=10)
model.fit(X_train_, y_train)
test_score, test_mean = rmse_score(model, X_test_, y_test)
train_score, train_mean = rmse_score(model, X_train_, y_train)
# ridge_estimator = Ridge(alpha= 0.5)
# ridge_estimator.fit(X_train_, y_train)
# test_score_, test_mean_ = rmse_score(ridge_estimator, X_test_,y_test)
# train_score_, train_mean_ = rmse_score(ridge_estimator, X_train_,y_train)
print("Guassian estimator rmse score", rmse_score(model, X_test_, y_test))
print("Linear Model estimator score", rmse_score(ridge_estimator, X_train_, y_train))
print("Linear Model estimator score", rmse_score(ridge_estimator, X_test_, y_test))
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
ax[0].plot(test_score_, "y")
ax[0].plot(train_score_, "r")
ax[0].set_title("cross_val_score")
ax[0].annotate(
"Validation data",
xy=(2.0, 0.410),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[0].annotate(
"training data",
xy=(2.0, 0.30),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[1].plot(test_score, "y")
ax[1].plot(train_score, "r")
ax[1].set_title("cross_val_score")
ax[1].annotate(
"Validation data",
xy=(2.0, 0.410),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[1].annotate(
"training data",
xy=(2.0, 0.30),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
# from graph, it is clear, our model is overfitting data very high, could be fix with l1 or l2
def predict_from_gauss(model, data, num):
p = data.iloc[num]["excerpt"]
p = vectorizer.transform([p])
pred = model.predict(p.toarray())
print(pred)
return pred[0]
def predict_traget_(model, data, save_file=False):
data["target"] = 0
for i in range(data.shape[0]):
data.iloc[i, -1] = predict_from_gauss(model, data, i)
for i in ["url_legal", "license", "excerpt"]:
try:
df_test.drop(i, inplace=True, axis=1)
except KeyError:
pass
if save_file:
data.to_csv("submission.csv", index=False)
return f"Successfully test data submitted or written in output directory\tsubmission.csv"
predict_traget_(model, df_test, save_file=True)
pd.read_csv("submission.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/439/69439002.ipynb
|
glove6b
|
anindya2906
|
[{"Id": 69439002, "ScriptId": 18407915, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5667598, "CreationDate": "07/31/2021 03:27:50", "VersionNumber": 15.0, "Title": "commonLiteratureTarget", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 9.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 251.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92607708, "KernelVersionId": 69439002, "SourceDatasetVersionId": 42887}]
|
[{"Id": 42887, "DatasetId": 32801, "DatasourceVersionId": 45149, "CreatorUserId": 1061267, "LicenseName": "Unknown", "CreationDate": "06/22/2018 11:50:26", "VersionNumber": 1.0, "Title": "GloVe 6B", "Slug": "glove6b", "Subtitle": "GloVe: Global Vectors for Word Representation", "Description": "### Context\n\nGlobal Vector or GloVe is an unsupervised learning algorithm for obtaining vector representations for words\n\n\n### Content\n\nContains 4 files for 4 embedding representations.\n\n 1. glove.6B.50d.txt - 6 Billion token and 50 Features\n 2. glove.6B.100d.txt - 6 Billion token and 100 Features\n 3. glove.6B.200d.txt - 6 Billion token and 200 Features\n 4. glove.6B.300d.txt - 6 Billion token and 300 Features\n\n### Acknowledgements\n\nhttps://nlp.stanford.edu/projects/glove/", "VersionNotes": "Initial release", "TotalCompressedBytes": 2249862459.0, "TotalUncompressedBytes": 885020575.0}]
|
[{"Id": 32801, "CreatorUserId": 1061267, "OwnerUserId": 1061267.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 42887.0, "CurrentDatasourceVersionId": 45149.0, "ForumId": 41166, "Type": 2, "CreationDate": "06/22/2018 11:50:26", "LastActivityDate": "06/22/2018", "TotalViews": 39537, "TotalDownloads": 10684, "TotalVotes": 54, "TotalKernels": 247}]
|
[{"Id": 1061267, "UserName": "anindya2906", "DisplayName": "Anindya Saha", "RegisterDate": "05/06/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
from scipy import spatial
from sklearn.metrics import mean_squared_error, auc, mean_absolute_error
from sklearn.svm import SVC
from sklearn.feature_extraction.text import (
TfidfVectorizer,
TfidfTransformer,
CountVectorizer,
)
from sklearn.model_selection import cross_val_score, train_test_split, cross_val_predict
from sklearn.linear_model import LogisticRegression
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow.keras as keras
from sklearn.metrics import mean_squared_error, roc_auc_score
import time
import statsmodels.api as sm
import statsmodels.formula.api as smf
from sklearn.ensemble import BaggingRegressor
from sklearn.linear_model import (
LogisticRegression,
SGDRegressor,
LinearRegression,
Ridge,
)
from sklearn.metrics.pairwise import cosine_similarity
import seaborn as sns
sns.set_theme()
train_dir = "/kaggle/input/commonlitreadabilityprize"
train_path = os.path.join(train_dir, "train.csv")
test_path = os.path.join(train_dir, "test.csv")
glove_files = [
"glove.6B.300d.txt",
"glove.6B.200d.txt",
"glove.6B.100d.txt",
"glove.6B.50d.txt",
]
glove_path_dir = "../input/glove6b/"
sample = os.path.join(train_dir, "sample_submission.csv")
df = pd.read_csv(train_path)
df.head()
df_test = pd.read_csv(test_path)
df_sample = pd.read_csv(sample)
array = []
glove_embedding = {}
count = 0
sentence = "example sentence for word emnedding"
with open(glove_path_dir + glove_files[-1], "r") as f:
for line in f:
k = line.split()
if k[0].isalpha():
glove_embedding[k[0]] = np.asarray(k[1:], np.float32)
#
def find_closest_embeddings(embedding, embeddings_dict=glove_embedding):
return sorted(
embeddings_dict.keys(),
key=lambda word: spatial.distance.euclidean(embeddings_dict[word], embedding),
)[:10]
embed_list = find_closest_embeddings(glove_embedding["king"])
embed_list
def print_results_original(results, original):
# Convert the results to a string, and word-wrap them.
results = re.sub(r"(.{,70})\s", r"\1\n", results + " ").rstrip()
# Convert the original to a string, and word wrap it.
original = re.sub(r"(.{,70})\s", r"\1\n", original + " ").rstrip()
# Print the results.
print("Original ", len(original))
print("Reults ", len(results))
print("-Original-".center(70).replace(" ", "*").replace("-", " "))
print(original)
print("-Results-".center(70).replace(" ", "*").replace("-", " "))
print(results)
print("*" * 70)
from nltk import wordnet
import regex as re
from nltk import stem
stemmer = stem.PorterStemmer()
treebank = df.iloc[:, 3]
stemmed = []
for i in range(df.shape[0]):
for word in df.iloc[i, 3]:
stemmed.append(stemmer.stem(word))
results = "".join(stemmed)
if len(df.iloc[:, 3]) > len(results):
if i < 1:
print_results_original(results, df.iloc[i, 3])
df.iloc[i, 3] = results
stemmed = []
#
df.iloc[:, 3] = df.iloc[:, 3].str.replace(
"[:;.,<>/-_~`!@#$%^&*?[]{}|-+0-9]", "", regex=True
)
df.iloc[16, 3]
def plot_graph(data):
tar_s = np.unique(data["target"]).shape
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
sns.scatterplot(x=np.arange(tar_s[0]), y=data["target"], ax=ax[0])
sns.scatterplot(x=np.arange(tar_s[0]), y=data["standard_error"], ax=ax[1])
return fig, ax
# plot_graph(df)
# A outlier could easily be drop with IQR and pandas dataframe filter method
# There is one outlier in standard error at index = 0
def get_validate_data(X, y):
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
print(X_train.shape, X_test.shape, y_train.shape, y_test.shape)
return X_train, X_test, y_train, y_test
df_describe = df.describe()
Q3 = df_describe.loc["75%"]
Q1 = df_describe.loc["25%"]
IQR = Q3 - Q1
upper_iqr = Q3 + 1.5 * IQR
lower_iqr = Q1 - 1.5 * IQR
# print(f"Condition for outliers is {df['target']<= upper_iqr and df['target']<= lower_iqr}")
df["target"]
m = map(lambda x: x >= lower_iqr, df["target"])
df_upper = df.groupby("target").filter(
lambda x: (x["standard_error"] > upper_iqr["standard_error"])
)
df_lower = df.groupby("target").filter(
lambda x: (x["standard_error"] < lower_iqr["standard_error"])
)
df_upper2 = df.groupby("standard_error").filter(
lambda x: (x["target"] > upper_iqr["target"])
)
df_lower2 = df.groupby("standard_error").filter(
lambda x: (x["target"] < lower_iqr["target"])
)
df.drop(index=df_upper.index, axis=0, inplace=True)
df.drop(index=df_lower.index, axis=0, inplace=True)
df.drop(index=df_upper2.index, axis=0, inplace=True)
df.drop(index=df_lower2.index, axis=0, inplace=True)
# ploting target data after removing outlier
# plot_graph(df)
df.head(5)
df["target_std"] = np.divide(df.iloc[:, 4], df.iloc[:, 5])
df["target_std_m"] = np.subtract(df.iloc[:, 4], df.iloc[:, 5])
mean_err = np.mean(df.iloc[:, 5])
df["target_std_mean"] = np.divide(df.iloc[:, 4], mean_err)
df.head(5)
def rmse_score(model, X, y):
y_pred = model.predict(X)
score = cross_val_score(model, X, y, n_jobs=-1, cv=5)
m = mean_squared_error(y, y_pred)
print(f"Cross_val_Score for X_train prediction:\t{score}\nmean squared error:\t{m}")
return score, m
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5, stop_words="english")
d = vectorizer.fit_transform(df["excerpt"])
y_train = df.iloc[:, 4]
y_train1 = df.iloc[:, 6]
y_train2 = df.iloc[:, 7]
y_train3 = df.iloc[:, 8]
X_train = d.toarray()
X_train_, X_test_, y_train, y_test = get_validate_data(X_train, y_train)
X_train1, X_test1, y_train1, y_test1 = get_validate_data(X_train, y_train1)
X_train2, X_test2, y_train2, y_test2 = get_validate_data(X_train, y_train2)
X_train3, X_test3, y_train3, y_test3 = get_validate_data(X_train, y_train3)
X_train.shape, y_train.shape
word_dict = {}
words = vectorizer.get_feature_names()
skip_word = 0
X_arr = np.array([])
for i in words:
try:
# X_arr = np.append(X_arr, glove_embedding[i])
word_dict[i] = glove_embedding[i]
except KeyError:
skip_word += 1
print("Skip word\t%s" % skip_word)
print("len of word dict\t%s" % len(word_dict))
print("X array shape\t%s" % X_arr.shape)
# word_array = np.array(list(word_dict.values()))
"""
It is slow model with accuracy of (rmse ) is 0.75, which is not good to enough to get good rank under 200
"""
from sklearn.gaussian_process import GaussianProcessRegressor
model = GaussianProcessRegressor(n_restarts_optimizer=10)
model.fit(X_train_, y_train)
test_score, test_mean = rmse_score(model, X_test_, y_test)
train_score, train_mean = rmse_score(model, X_train_, y_train)
# ridge_estimator = Ridge(alpha= 0.5)
# ridge_estimator.fit(X_train_, y_train)
# test_score_, test_mean_ = rmse_score(ridge_estimator, X_test_,y_test)
# train_score_, train_mean_ = rmse_score(ridge_estimator, X_train_,y_train)
print("Guassian estimator rmse score", rmse_score(model, X_test_, y_test))
print("Linear Model estimator score", rmse_score(ridge_estimator, X_train_, y_train))
print("Linear Model estimator score", rmse_score(ridge_estimator, X_test_, y_test))
fig, ax = plt.subplots(1, 2, figsize=(15, 8))
ax[0].plot(test_score_, "y")
ax[0].plot(train_score_, "r")
ax[0].set_title("cross_val_score")
ax[0].annotate(
"Validation data",
xy=(2.0, 0.410),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[0].annotate(
"training data",
xy=(2.0, 0.30),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[1].plot(test_score, "y")
ax[1].plot(train_score, "r")
ax[1].set_title("cross_val_score")
ax[1].annotate(
"Validation data",
xy=(2.0, 0.410),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
ax[1].annotate(
"training data",
xy=(2.0, 0.30),
arrowprops=dict(facecolor="red", shrink=5),
fontsize=16,
horizontalalignment="left",
)
# from graph, it is clear, our model is overfitting data very high, could be fix with l1 or l2
def predict_from_gauss(model, data, num):
p = data.iloc[num]["excerpt"]
p = vectorizer.transform([p])
pred = model.predict(p.toarray())
print(pred)
return pred[0]
def predict_traget_(model, data, save_file=False):
data["target"] = 0
for i in range(data.shape[0]):
data.iloc[i, -1] = predict_from_gauss(model, data, i)
for i in ["url_legal", "license", "excerpt"]:
try:
df_test.drop(i, inplace=True, axis=1)
except KeyError:
pass
if save_file:
data.to_csv("submission.csv", index=False)
return f"Successfully test data submitted or written in output directory\tsubmission.csv"
predict_traget_(model, df_test, save_file=True)
pd.read_csv("submission.csv")
| false | 0 | 3,218 | 0 | 3,387 | 3,218 |
||
69439305
|
# importing necessary library
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import matplotlib.image as mpimg
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import classification_report, confusion_matrix
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# list the directory
print(os.listdir("/kaggle/input/"))
# load the dataset using pandas
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
# the train dataset
train.head()
# the test dataset
test.head()
# sample submission format
sample_submission.head()
# define X and Y
X = train.drop(["label"], axis=1)
Y = train["label"]
# normalize the data (features)
X = X / 255.0
test = test / 255.0
# convert data to np.array
X = X.values
test = test.values
# reshaing the images
X = X.reshape(-1, 28, 28, 1)
test = test.reshape(-1, 28, 28, 1)
# convert features to categorical
Y = to_categorical(Y, num_classes=10)
# check the shape of the data
print(X.shape, Y.shape)
# split the data into train and test
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, random_state=42)
print(X_train.shape, X_val.shape, Y_train.shape, Y_val.shape)
# defining deep learning classifier
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# defining callback
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("accuracy") > 0.9995:
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
# sample image
plt.imshow(X_train[0])
# sample target of above image
Y_train[0]
# training the data
history = model.fit(
X_train,
Y_train,
epochs=100,
verbose=2,
callbacks=[callbacks],
validation_data=(X_val, Y_val),
)
# plot the validation and training accuracy
fig, axis = plt.subplots(1, 2, figsize=(16, 6))
axis[0].plot(history.history["val_accuracy"], label="val_acc")
axis[0].set_title("Validation Accuracy")
axis[0].set_xlabel("Epochs")
axis[1].plot(history.history["accuracy"], label="acc")
axis[1].set_title("Training Accuracy")
axis[1].set_xlabel("Epochs")
plt.show()
# predict on validation set
Y_pred = model.predict(X_val)
# predicted and oriinal class
Y_pred_class = np.argmax(Y_pred, axis=1)
Y_test_class = np.argmax(Y_val, axis=1)
# accuracy calculation
val_acc = np.mean(Y_pred_class == Y_test_class)
# print the accuracy
print("Validation accuracy: ", val_acc, "\n")
# plot the Confusion Matrix
fig, ax = plt.subplots(figsize=(12, 12))
cm = confusion_matrix(Y_test_class, Y_pred_class, normalize="true")
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
disp = disp.plot(ax=ax, cmap=plt.cm.Blues)
ax.set_title("Confusion Matrix")
plt.show()
print(classification_report(Y_test_class, Y_pred_class))
# prediction on test dataset
y_pred_test = model.predict(test)
prediction = np.argmax(y_pred_test, axis=1)
# create submission DataFrame
submission = pd.DataFrame({"ImageId": range(1, 28001), "Label": list(prediction)})
submission.head()
# create CSV file
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/439/69439305.ipynb
| null | null |
[{"Id": 69439305, "ScriptId": 18959896, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4187042, "CreationDate": "07/31/2021 03:35:13", "VersionNumber": 1.0, "Title": "CNN_Digit_Recognizer", "EvaluationDate": "07/31/2021", "IsChange": true, "TotalLines": 145.0, "LinesInsertedFromPrevious": 145.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# importing necessary library
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import matplotlib.image as mpimg
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.metrics import classification_report, confusion_matrix
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# list the directory
print(os.listdir("/kaggle/input/"))
# load the dataset using pandas
train = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
test = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
sample_submission = pd.read_csv("/kaggle/input/digit-recognizer/sample_submission.csv")
# the train dataset
train.head()
# the test dataset
test.head()
# sample submission format
sample_submission.head()
# define X and Y
X = train.drop(["label"], axis=1)
Y = train["label"]
# normalize the data (features)
X = X / 255.0
test = test / 255.0
# convert data to np.array
X = X.values
test = test.values
# reshaing the images
X = X.reshape(-1, 28, 28, 1)
test = test.reshape(-1, 28, 28, 1)
# convert features to categorical
Y = to_categorical(Y, num_classes=10)
# check the shape of the data
print(X.shape, Y.shape)
# split the data into train and test
X_train, X_val, Y_train, Y_val = train_test_split(X, Y, test_size=0.2, random_state=42)
print(X_train.shape, X_val.shape, Y_train.shape, Y_val.shape)
# defining deep learning classifier
model = tf.keras.models.Sequential(
[
tf.keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(32, (3, 3), activation="relu"),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(512, activation=tf.nn.relu),
tf.keras.layers.Dense(10, activation=tf.nn.softmax),
]
)
model.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
# defining callback
class myCallback(tf.keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs={}):
if logs.get("accuracy") > 0.9995:
print("\nReached 99% accuracy so cancelling training!")
self.model.stop_training = True
callbacks = myCallback()
# sample image
plt.imshow(X_train[0])
# sample target of above image
Y_train[0]
# training the data
history = model.fit(
X_train,
Y_train,
epochs=100,
verbose=2,
callbacks=[callbacks],
validation_data=(X_val, Y_val),
)
# plot the validation and training accuracy
fig, axis = plt.subplots(1, 2, figsize=(16, 6))
axis[0].plot(history.history["val_accuracy"], label="val_acc")
axis[0].set_title("Validation Accuracy")
axis[0].set_xlabel("Epochs")
axis[1].plot(history.history["accuracy"], label="acc")
axis[1].set_title("Training Accuracy")
axis[1].set_xlabel("Epochs")
plt.show()
# predict on validation set
Y_pred = model.predict(X_val)
# predicted and oriinal class
Y_pred_class = np.argmax(Y_pred, axis=1)
Y_test_class = np.argmax(Y_val, axis=1)
# accuracy calculation
val_acc = np.mean(Y_pred_class == Y_test_class)
# print the accuracy
print("Validation accuracy: ", val_acc, "\n")
# plot the Confusion Matrix
fig, ax = plt.subplots(figsize=(12, 12))
cm = confusion_matrix(Y_test_class, Y_pred_class, normalize="true")
disp = ConfusionMatrixDisplay(
confusion_matrix=cm, display_labels=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
)
disp = disp.plot(ax=ax, cmap=plt.cm.Blues)
ax.set_title("Confusion Matrix")
plt.show()
print(classification_report(Y_test_class, Y_pred_class))
# prediction on test dataset
y_pred_test = model.predict(test)
prediction = np.argmax(y_pred_test, axis=1)
# create submission DataFrame
submission = pd.DataFrame({"ImageId": range(1, 28001), "Label": list(prediction)})
submission.head()
# create CSV file
submission.to_csv("submission.csv", index=False)
| false | 0 | 1,328 | 0 | 1,328 | 1,328 |
||
69919875
|
<jupyter_start><jupyter_text>Pima Indians Diabetes Database
## Context
This dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective of the dataset is to diagnostically predict whether or not a patient has diabetes, based on certain diagnostic measurements included in the dataset. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.
## Content
The datasets consists of several medical predictor variables and one target variable, `Outcome`. Predictor variables includes the number of pregnancies the patient has had, their BMI, insulin level, age, and so on.
## Acknowledgements
Smith, J.W., Everhart, J.E., Dickson, W.C., Knowler, W.C., & Johannes, R.S. (1988). [Using the ADAP learning algorithm to forecast the onset of diabetes mellitus][1]. *In Proceedings of the Symposium on Computer Applications and Medical Care* (pp. 261--265). IEEE Computer Society Press.
## Inspiration
Can you build a machine learning model to accurately predict whether or not the patients in the dataset have diabetes or not?
[1]: http://rexa.info/paper/04587c10a7c92baa01948f71f2513d5928fe8e81
Kaggle dataset identifier: pima-indians-diabetes-database
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt # for data visualization and using histogram
import seaborn as sns # for data visualization
from sklearn import metrics #
from sklearn.model_selection import train_test_split
from sklearn.neighbours import KNeighbourClassifier
diab_data = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv")
diab_data.head()
diab_data.info()
diab_data.isnull().sum() # to check null values
diab_corr = diab_data.corr() # to check correlation
sns.heatmap(diab_corr, annot=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919875.ipynb
|
pima-indians-diabetes-database
| null |
[{"Id": 69919875, "ScriptId": 19119724, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7495925, "CreationDate": "08/03/2021 20:59:07", "VersionNumber": 1.0, "Title": "notebook244d88bffc", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 36.0, "LinesInsertedFromPrevious": 36.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93336847, "KernelVersionId": 69919875, "SourceDatasetVersionId": 482}]
|
[{"Id": 482, "DatasetId": 228, "DatasourceVersionId": 482, "CreatorUserId": 1, "LicenseName": "CC0: Public Domain", "CreationDate": "10/06/2016 18:31:56", "VersionNumber": 1.0, "Title": "Pima Indians Diabetes Database", "Slug": "pima-indians-diabetes-database", "Subtitle": "Predict the onset of diabetes based on diagnostic measures", "Description": "## Context\n\nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective of the dataset is to diagnostically predict whether or not a patient has diabetes, based on certain diagnostic measurements included in the dataset. Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.\n\n## Content\n\nThe datasets consists of several medical predictor variables and one target variable, `Outcome`. Predictor variables includes the number of pregnancies the patient has had, their BMI, insulin level, age, and so on.\n\n## Acknowledgements\n\nSmith, J.W., Everhart, J.E., Dickson, W.C., Knowler, W.C., & Johannes, R.S. (1988). [Using the ADAP learning algorithm to forecast the onset of diabetes mellitus][1]. *In Proceedings of the Symposium on Computer Applications and Medical Care* (pp. 261--265). IEEE Computer Society Press.\n\n## Inspiration\n\nCan you build a machine learning model to accurately predict whether or not the patients in the dataset have diabetes or not?\n\n [1]: http://rexa.info/paper/04587c10a7c92baa01948f71f2513d5928fe8e81", "VersionNotes": "Initial release", "TotalCompressedBytes": 23873.0, "TotalUncompressedBytes": 23873.0}]
|
[{"Id": 228, "CreatorUserId": 1, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 482.0, "CurrentDatasourceVersionId": 482.0, "ForumId": 1652, "Type": 2, "CreationDate": "10/06/2016 18:31:56", "LastActivityDate": "02/06/2018", "TotalViews": 2058659, "TotalDownloads": 403536, "TotalVotes": 3736, "TotalKernels": 2589}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt # for data visualization and using histogram
import seaborn as sns # for data visualization
from sklearn import metrics #
from sklearn.model_selection import train_test_split
from sklearn.neighbours import KNeighbourClassifier
diab_data = pd.read_csv("/kaggle/input/pima-indians-diabetes-database/diabetes.csv")
diab_data.head()
diab_data.info()
diab_data.isnull().sum() # to check null values
diab_corr = diab_data.corr() # to check correlation
sns.heatmap(diab_corr, annot=True)
| false | 0 | 336 | 0 | 720 | 336 |
||
69919309
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing important libraries
#
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
import itertools
import plotly
import plotly.express as px
import plotly.graph_objs as go
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
# Reading the file
raw_file = pd.read_csv("/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv")
file = raw_file
file.head()
file.describe()
# # Checking for null values
file.isnull().sum(axis=0)
file.columns
# # Data visualization:
corr = file[
[
"Temperature(�C)",
"Humidity(%)",
"Wind speed (m/s)",
"Visibility (10m)",
"Dew point temperature(�C)",
"Solar Radiation (MJ/m2)",
"Rainfall(mm)",
"Snowfall (cm)",
"y",
]
].corr()
f, axes = plt.subplots(1, 1, figsize=(7, 7))
# sns.heatmap(corr,square=True,annot = True,linewidth = .5,center = 2,ax = axes)
f, axes = plt.subplots(1, 1, figsize=(15, 8))
# sns.lineplot(x ='Date', y = 'Temperature(�C)', data = file, hue = 'Hour',ax = axes,legend = 'full',palette = 'bright')
# ## Season
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Seasons"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ## Holiday
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Holiday"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ## Functioning day
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Functioning Day"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ### hour
f, axes = plt.subplots(1, 3, figsize=(15, 5))
sns.despine(left=True)
x = "Hour"
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[2])
fig = px.pie(
file,
names="Holiday",
title="Holiday",
color_discrete_sequence=px.colors.qualitative.Set1,
)
fig.show()
fig = px.pie(
file,
names="Functioning Day",
title="Functioning Day",
color_discrete_sequence=px.colors.qualitative.Set1,
)
fig.show()
file.sample(10)
# # Data Preparation
# ## Split Date
# First we have to separate the individual date and time for each data point into hour,day,month and year.
file["Day"] = pd.DatetimeIndex(file["Date"]).day
file["Month"] = pd.DatetimeIndex(file["Date"]).month
file["Year"] = pd.DatetimeIndex(file["Date"]).year
file
file["label_day_night"] = file["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
file
# Let's see the repartition of the rents follonwing the moment of the day
file.groupby("label_day_night").sum()["y"].plot.pie()
file["Date"] = pd.to_datetime(file["Date"], format="%d/%m/%Y")
file["WeekDay"] = file["Date"].dt.day_name()
file
# Days with highest rented bike are week days
# file_WeekDays=pd.DataFrame(file.groupby('WeekDay').sum()['y'].sort_values(ascending=False))
# file_WeekDays.style.background_gradient(cmap=sns.light_palette("red", as_cmap=True))
# Korean people like to ride bikes when it is pretty hot around 25°C in average
# file.groupby('Temperature(�C)').mean()['y'].plot()
# boxplotDay_night= file.boxplot(['y'],by='label_day_night',figsize=(5,5))
#########################
## Business Question 1 ##
#########################
# Is there an hour when bikes are rented the most?
# Mean of bikes rented sorted by hour
file.groupby("Hour")["y"].mean()
# Scatter plot of bikes rented by hour.
# sns.catplot('Hour', 'y', data=file)
## On average, less than 150 bikes are rented at 4:00 & 5:00.
## More than 1,000 bikes are rented on average at 8:00 and from 17:00 – 21:00.
# sns.catplot('Holiday', 'y', data=file)
# ## Drop Columns
file = file.drop(
columns=[
"Date",
"Year",
"Day",
"Dew point temperature(�C)",
"Rainfall(mm)",
"Snowfall (cm)",
]
)
file
# ## Encoding
# Seasons-> 1:Winter 2:Spring 3:Summer 4:Autumn
# Holiday-> 0:No Holiday 1:Holiday
# Functioing Day-> 0:No 1:Yes
def encodingHoliday(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Holiday": 1, "No Holiday": 0})
return dataset
def encodingFunctioningDay(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Yes": 1, "No": 0})
return dataset
def encodingSeasons(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace(
{"Winter": 1, "Spring": 2, "Summer": 3, "Autumn": 4}
)
return dataset
def encoding_label_day_night(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Day": 1, "Night": 0})
return dataset
def encodingWeekDay(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace(
{
"Sunday": 0,
"Monday": 1,
"Tuesday": 2,
"Wednesday": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
}
)
return dataset
file = encodingHoliday(file, ["Holiday"])
file = encodingFunctioningDay(file, ["Functioning Day"])
file = encodingSeasons(file, ["Seasons"])
file = encoding_label_day_night(file, ["label_day_night"])
file = encodingWeekDay(file, ["WeekDay"])
file.sample(10)
# ## Normalization
def normalize_column(file, column):
return MinMaxScaler().fit_transform(np.array(file[column]).reshape(-1, 1))
names = [
"Temperature(�C)",
"Humidity(%)",
"Hour",
"Wind speed (m/s)",
"Holiday",
"Visibility (10m)",
"WeekDay",
"Solar Radiation (MJ/m2)",
"Seasons",
"Month",
]
for i in names:
file[i] = normalize_column(file, i)
file
# ## Removing Outliers
df = pd.DataFrame()
df = file.copy()
print(df.shape)
# calculate z-scores of `df`
z_scores = stats.zscore(df)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
new_df = df[filtered_entries]
print(new_df.shape)
file = new_df
file.sample(5)
# ## Remove Duplicates
# sorting by first name
df2 = pd.DataFrame()
df2 = file.copy()
print(df2.shape)
# dropping ALL duplicate values
df2.drop_duplicates(keep=False, inplace=True)
# displaying data
print(df2.shape)
file = df2
file.sample(5)
file.info()
file.columns
file.describe().T
# file.hist()
file.corr()
file = file.drop(columns=["Seasons", "Wind speed (m/s)", "label_day_night", "Month"])
file
raw_file.sample(10)
def PrepareData(rawData):
test_ = rawData
test_["Day"] = pd.DatetimeIndex(test_["Date"]).day
test_["Month"] = pd.DatetimeIndex(test_["Date"]).month
test_["Year"] = pd.DatetimeIndex(test_["Date"]).year
test_["label_day_night"] = test_["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
test_["Date"] = pd.to_datetime(test_["Date"], format="%d/%m/%Y")
test_["WeekDay"] = test_["Date"].dt.day_name()
test_ = test_.drop(
columns=[
"Date",
"Year",
"Day",
"Holiday",
"Solar Radiation (MJ/m2)",
"Dew point temperature(�C)",
"label_day_night",
"Rainfall(mm)",
"Snowfall (cm)",
"Seasons",
"Wind speed (m/s)",
]
)
# test_ = encodingHoliday(test_, ['Holiday'])
test_ = encodingFunctioningDay(test_, ["Functioning Day"])
# test_ = encodingSeasons(test_, ['Seasons'])
# test_ = encoding_label_day_night(test_, ['label_day_night'])
test_ = encodingWeekDay(test_, ["WeekDay"])
names_ = [
"Temperature(�C)",
"Humidity(%)",
"WeekDay",
"Visibility (10m)",
"Hour",
"Month",
]
for i in names_:
test_[i] = normalize_column(test_, i)
return test_
# Combining Features
# # Model Training
file = PrepareData(raw_file)
file.sample(10)
file.corr()
# ## Splitting data into training and validation sets
train_df, val_df = train_test_split(file, test_size=0.20, random_state=42)
x_train = train_df.drop(columns=["ID", "y"])
y_train = train_df["y"]
x_val = val_df.drop(columns=["ID", "y"])
y_val = val_df["y"]
# ## Models:
# RandomForestRegressor Model:
randomForestAlgo = RandomForestRegressor()
param = {
"n_estimators": [4],
"max_depth": [12],
"min_samples_split": [8],
"min_samples_leaf": [2],
"bootstrap": [False],
}
gridSearch_RandomForest = GridSearchCV(randomForestAlgo, param, scoring="r2", cv=5)
gridSearch_RandomForest.fit(x_train, y_train)
predictionsRFR = gridSearch_RandomForest.predict(x_val)
# remove negative values from target
for i in range(len(predictionsRFR)):
predictionsRFR[i] = max(0, predictionsRFR[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error(y_val, predictionsRFR)))
# best parameters: {'bootstrap': True,'ccp_alpha': 0.0,'criterion': 'mse','max_depth': 80,'max_features': 'auto','max_leaf_nodes': None,'max_samples': None,'min_impurity_decrease': 0.0,'min_impurity_split': None,'min_samples_leaf': 2,'min_samples_split': 2,'min_weight_fraction_leaf': 0.0,'n_estimators': 100,'n_jobs': None,'oob_score': False,'random_state': None,'verbose': 0,'warm_start': False}
# test for RandomForestRegressor best parametes
"""
for depth in range(1,100):
param = {'n_estimators' : [4],
'max_depth' : [12],
'min_samples_split':[8],
'min_samples_leaf':[2],
'bootstrap' : [False]
}
gridSearch_RandomForest=GridSearchCV(randomForestAlgo,param,scoring='r2',cv=5)
gridSearch_RandomForest.fit(x_train,y_train)
predictionsRFR = gridSearch_RandomForest.predict(x_val)
for i in range(len(predictionsRFR)):
predictionsRFR[i] = max(0,predictionsRFR[i])
print("max_depth= ",depth," RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRFR )))
"""
# remove test file from working directory
# os.remove('./test.csv')
# RandomForestClassifier Model:
"""
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=32, random_state=0)
# Train the classifier
classifier = classifier.fit(x_train, y_train)
predictionsRFC = classifier.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRFC)):
predictionsRFC[i] = max(0,predictionsRFC[i])
#print("The accuracy of the classifier on the validation set is ", (classifier.score(x_val, y_val)))
print("RMSLE: ", np.sqrt(mean_squared_log_error(y_val, predictionsRFC)))
"""
# test for RandomForestClassifier best parametes
"""
for depth in range(1,2):
classifier = RandomForestClassifier(max_depth=depth, random_state=0)
classifier = classifier.fit(x_train, y_train)
predictionsRF = classifier.predict(x_val)
for i in range(len(predictionsRF)):
predictionsRF[i] = max(0,predictionsRF[i])
print("max_depth= ",depth," ,RMSLE= ", np.sqrt(mean_squared_log_error(y_val, predictionsRF)))
"""
# best depth over 100 depths: max_depth= 32 ,RMSLE= 0.4826878794953576 ,and get constant error at 58 depth
"""
cv = KFold(n_splits=5, random_state=1, shuffle=True)
#build multiple linear regression model
X = file.drop(columns=['ID','y'])
y = file['y']
#use LOOCV to evaluate model
scores = cross_val_score(classifier, X, y, cv=cv, n_jobs=-1)
print(scores)
"""
# LinearReggression Model:
"""
# Using polynomial LinearRegression on the dataset
reg = LinearRegression().fit(x_train, y_train)
predictionsLiR = reg.predict(x_val)
#remove negative values from target
for i in range(len(predictionsLiR)):
predictionsLiR[i] = max(0,predictionsLiR[i])
#RMSLE
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLiR )))
"""
# test for LogisticRegression best parametes
"""
for depth in range(100,500,100):
LoR = LogisticRegression(max_iter= depth,random_state=42)
LoR.fit(x_train, y_train)
predictionsLoR = LoR.predict(x_val)
for i in range(len(predictionsLoR)):
predictionsLoR[i] = max(0,predictionsLoR[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLoR )))
"""
# Logistic regression with default setting.
"""
LoR = LogisticRegression(max_iter=500,random_state=42)
LoR.fit(x_train, y_train)
predictionsLoR = LoR.predict(x_val)
#remove negative values from target
for i in range(len(predictionsLoR)):
predictionsLoR[i] = max(0,predictionsLoR[i])
#RMSLE
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLoR )))
"""
# test for SVR best parametes
"""
for depth in [6800,7000]:
param = {'C' : [depth]}
gridSearchSVR=GridSearchCV(svr_Model, param, scoring='r2', cv=5)
gridSearchSVR.fit(x_train, y_train)
predictionsSVR = gridSearchSVR.predict(x_val)
for i in range(len(predictionsSVR)):
predictionsSVR[i] = max(0,predictionsSVR[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsSVR )))
"""
# SVR regression model:
"""
svr_Model=SVR()
param = {'C' : [7000]}
gridSearchSVR=GridSearchCV(svr_Model, param, scoring='r2', cv=5)
gridSearchSVR.fit(x_train, y_train)
predictionsSVR = gridSearchSVR.predict(x_val)
#remove negative values from target
for i in range(len(predictionsSVR)):
predictionsSVR[i] = max(0,predictionsSVR[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsSVR )))
"""
# test for Ridge best parametes
"""
for depth in range(50,150,1):
ridge=Ridge()
parameters={'alpha':[depth]}
gridSearchRidge=GridSearchCV(ridge, parameters, scoring='r2', cv=3)
gridSearchRidge.fit(x_train,y_train)
predictionsRg = gridSearchRidge.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRg)):
predictionsRg[i] = max(0,predictionsRg[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRg )))
"""
# Ridge regression model:
"""
ridge=Ridge()
parameters={'alpha':[100]}
gridSearchRidge=GridSearchCV(ridge, parameters, scoring='r2', cv=3)
gridSearchRidge.fit(x_train,y_train)
predictionsRg = gridSearchRidge.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRg)):
predictionsRg[i] = max(0,predictionsRg[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRg )))
"""
# Comparing Model's RMSLE Scores:
"""
df = pd.DataFrame()
df['Y'] = y_val
df['RFC'] = predictionsRFC
df['LiR'] = predictionsLiR
#df['LoR'] = predictionsLoR
df['RFR'] = predictionsRFR
#df['SVR'] = predictionsSVR
# Ensemble methods: regression
df['Merged'] = (predictionsRFR + predictionsSVR)/2
df
"""
df.corr()
# print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, ((predictionsRFR + predictionsSVR + predictionsRFC)/3) )))
# Since we know that the output is never less than 1 we replace all negative values with 1 before appending in pre to calculate error.
# plt = sns.residplot(x = predictionsRFR, y = y_val, lowess = True,color = 'r')
# ## Reading the test file
raw_test = pd.read_csv("/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv")
test = raw_test
test
test.describe()
# ## Test set processing
test["Day"] = pd.DatetimeIndex(test["Date"]).day
test["Month"] = pd.DatetimeIndex(test["Date"]).month
test["Year"] = pd.DatetimeIndex(test["Date"]).year
test.sample(10)
test["label_day_night"] = test["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
test.sample(10)
test["Date"] = pd.to_datetime(test["Date"], format="%d/%m/%Y")
test["WeekDay"] = test["Date"].dt.day_name()
test
test = test.drop(
columns=[
"Date",
"Year",
"Day",
"Month",
"Dew point temperature(�C)",
"Holiday",
"label_day_night",
"Rainfall(mm)",
"Snowfall (cm)",
"Seasons",
"WeekDay",
"Wind speed (m/s)",
]
)
test.sample(10)
# test = encodingHoliday(test, ['Holiday'])
test = encodingFunctioningDay(test, ["Functioning Day"])
# test = encodingSeasons(test, ['Seasons'])
# test = encoding_label_day_night(test, ['label_day_night'])
# test = encodingWeekDay(test, ['WeekDay'])
test.sample(10)
names = [
"Temperature(�C)",
"Humidity(%)",
"Visibility (10m)",
"Solar Radiation (MJ/m2)",
"Hour",
]
for i in names:
test[i] = normalize_column(test, i)
test
test = PrepareData(raw_test)
# ## Exporting output to csv
# Model Choosed: RFR
# columns_dropped=['Date', 'Dew point temperature(�C)', 'Rainfall(mm)', 'Snowfall (cm)']
X_test = test
X_test = X_test.drop(columns=["ID"])
y_test_predicted = gridSearch_RandomForest.predict(X_test)
# remove negative values from target
for i in range(len(y_test_predicted)):
y_test_predicted[i] = max(0, y_test_predicted[i])
test["y"] = y_test_predicted
test
test[["ID", "y"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919309.ipynb
| null | null |
[{"Id": 69919309, "ScriptId": 18991482, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7812884, "CreationDate": "08/03/2021 20:55:02", "VersionNumber": 13.0, "Title": "LamyaaZayed_notebooke6e2cd0a37", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 576.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 575.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Importing important libraries
#
import matplotlib.pyplot as plt
import os
import numpy as np
import pandas as pd
from scipy import stats
import seaborn as sns
import itertools
import plotly
import plotly.express as px
import plotly.graph_objs as go
from sklearn.model_selection import train_test_split
from sklearn.svm import SVR
from sklearn.linear_model import Ridge
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import r2_score, mean_squared_error, mean_squared_log_error
from sklearn.preprocessing import OneHotEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.preprocessing import MinMaxScaler
import warnings
warnings.filterwarnings("ignore")
# Reading the file
raw_file = pd.read_csv("/kaggle/input/seoul-bike-rental-ai-pro-iti/train.csv")
file = raw_file
file.head()
file.describe()
# # Checking for null values
file.isnull().sum(axis=0)
file.columns
# # Data visualization:
corr = file[
[
"Temperature(�C)",
"Humidity(%)",
"Wind speed (m/s)",
"Visibility (10m)",
"Dew point temperature(�C)",
"Solar Radiation (MJ/m2)",
"Rainfall(mm)",
"Snowfall (cm)",
"y",
]
].corr()
f, axes = plt.subplots(1, 1, figsize=(7, 7))
# sns.heatmap(corr,square=True,annot = True,linewidth = .5,center = 2,ax = axes)
f, axes = plt.subplots(1, 1, figsize=(15, 8))
# sns.lineplot(x ='Date', y = 'Temperature(�C)', data = file, hue = 'Hour',ax = axes,legend = 'full',palette = 'bright')
# ## Season
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Seasons"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ## Holiday
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Holiday"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ## Functioning day
f, axes = plt.subplots(1, 4, figsize=(15, 5))
sns.despine(left=True)
x = "Functioning Day"
# sns.barplot(x = x , y = 'Solar Radiation (MJ/m2)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[2])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[3])
# ### hour
f, axes = plt.subplots(1, 3, figsize=(15, 5))
sns.despine(left=True)
x = "Hour"
# sns.barplot(x = x , y = 'Visibility (10m)' , data = file, saturation = 1, ax = axes[0])
# sns.barplot(x = x , y = 'Wind speed (m/s)' , data = file, saturation = 1, ax = axes[1])
# sns.barplot(x = x , y = 'y' , data = file, saturation = 1, ax = axes[2])
fig = px.pie(
file,
names="Holiday",
title="Holiday",
color_discrete_sequence=px.colors.qualitative.Set1,
)
fig.show()
fig = px.pie(
file,
names="Functioning Day",
title="Functioning Day",
color_discrete_sequence=px.colors.qualitative.Set1,
)
fig.show()
file.sample(10)
# # Data Preparation
# ## Split Date
# First we have to separate the individual date and time for each data point into hour,day,month and year.
file["Day"] = pd.DatetimeIndex(file["Date"]).day
file["Month"] = pd.DatetimeIndex(file["Date"]).month
file["Year"] = pd.DatetimeIndex(file["Date"]).year
file
file["label_day_night"] = file["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
file
# Let's see the repartition of the rents follonwing the moment of the day
file.groupby("label_day_night").sum()["y"].plot.pie()
file["Date"] = pd.to_datetime(file["Date"], format="%d/%m/%Y")
file["WeekDay"] = file["Date"].dt.day_name()
file
# Days with highest rented bike are week days
# file_WeekDays=pd.DataFrame(file.groupby('WeekDay').sum()['y'].sort_values(ascending=False))
# file_WeekDays.style.background_gradient(cmap=sns.light_palette("red", as_cmap=True))
# Korean people like to ride bikes when it is pretty hot around 25°C in average
# file.groupby('Temperature(�C)').mean()['y'].plot()
# boxplotDay_night= file.boxplot(['y'],by='label_day_night',figsize=(5,5))
#########################
## Business Question 1 ##
#########################
# Is there an hour when bikes are rented the most?
# Mean of bikes rented sorted by hour
file.groupby("Hour")["y"].mean()
# Scatter plot of bikes rented by hour.
# sns.catplot('Hour', 'y', data=file)
## On average, less than 150 bikes are rented at 4:00 & 5:00.
## More than 1,000 bikes are rented on average at 8:00 and from 17:00 – 21:00.
# sns.catplot('Holiday', 'y', data=file)
# ## Drop Columns
file = file.drop(
columns=[
"Date",
"Year",
"Day",
"Dew point temperature(�C)",
"Rainfall(mm)",
"Snowfall (cm)",
]
)
file
# ## Encoding
# Seasons-> 1:Winter 2:Spring 3:Summer 4:Autumn
# Holiday-> 0:No Holiday 1:Holiday
# Functioing Day-> 0:No 1:Yes
def encodingHoliday(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Holiday": 1, "No Holiday": 0})
return dataset
def encodingFunctioningDay(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Yes": 1, "No": 0})
return dataset
def encodingSeasons(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace(
{"Winter": 1, "Spring": 2, "Summer": 3, "Autumn": 4}
)
return dataset
def encoding_label_day_night(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace({"Day": 1, "Night": 0})
return dataset
def encodingWeekDay(dataset, cols):
for col_name in cols:
dataset[col_name] = dataset[col_name].replace(
{
"Sunday": 0,
"Monday": 1,
"Tuesday": 2,
"Wednesday": 3,
"Thursday": 4,
"Friday": 5,
"Saturday": 6,
}
)
return dataset
file = encodingHoliday(file, ["Holiday"])
file = encodingFunctioningDay(file, ["Functioning Day"])
file = encodingSeasons(file, ["Seasons"])
file = encoding_label_day_night(file, ["label_day_night"])
file = encodingWeekDay(file, ["WeekDay"])
file.sample(10)
# ## Normalization
def normalize_column(file, column):
return MinMaxScaler().fit_transform(np.array(file[column]).reshape(-1, 1))
names = [
"Temperature(�C)",
"Humidity(%)",
"Hour",
"Wind speed (m/s)",
"Holiday",
"Visibility (10m)",
"WeekDay",
"Solar Radiation (MJ/m2)",
"Seasons",
"Month",
]
for i in names:
file[i] = normalize_column(file, i)
file
# ## Removing Outliers
df = pd.DataFrame()
df = file.copy()
print(df.shape)
# calculate z-scores of `df`
z_scores = stats.zscore(df)
abs_z_scores = np.abs(z_scores)
filtered_entries = (abs_z_scores < 3).all(axis=1)
new_df = df[filtered_entries]
print(new_df.shape)
file = new_df
file.sample(5)
# ## Remove Duplicates
# sorting by first name
df2 = pd.DataFrame()
df2 = file.copy()
print(df2.shape)
# dropping ALL duplicate values
df2.drop_duplicates(keep=False, inplace=True)
# displaying data
print(df2.shape)
file = df2
file.sample(5)
file.info()
file.columns
file.describe().T
# file.hist()
file.corr()
file = file.drop(columns=["Seasons", "Wind speed (m/s)", "label_day_night", "Month"])
file
raw_file.sample(10)
def PrepareData(rawData):
test_ = rawData
test_["Day"] = pd.DatetimeIndex(test_["Date"]).day
test_["Month"] = pd.DatetimeIndex(test_["Date"]).month
test_["Year"] = pd.DatetimeIndex(test_["Date"]).year
test_["label_day_night"] = test_["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
test_["Date"] = pd.to_datetime(test_["Date"], format="%d/%m/%Y")
test_["WeekDay"] = test_["Date"].dt.day_name()
test_ = test_.drop(
columns=[
"Date",
"Year",
"Day",
"Holiday",
"Solar Radiation (MJ/m2)",
"Dew point temperature(�C)",
"label_day_night",
"Rainfall(mm)",
"Snowfall (cm)",
"Seasons",
"Wind speed (m/s)",
]
)
# test_ = encodingHoliday(test_, ['Holiday'])
test_ = encodingFunctioningDay(test_, ["Functioning Day"])
# test_ = encodingSeasons(test_, ['Seasons'])
# test_ = encoding_label_day_night(test_, ['label_day_night'])
test_ = encodingWeekDay(test_, ["WeekDay"])
names_ = [
"Temperature(�C)",
"Humidity(%)",
"WeekDay",
"Visibility (10m)",
"Hour",
"Month",
]
for i in names_:
test_[i] = normalize_column(test_, i)
return test_
# Combining Features
# # Model Training
file = PrepareData(raw_file)
file.sample(10)
file.corr()
# ## Splitting data into training and validation sets
train_df, val_df = train_test_split(file, test_size=0.20, random_state=42)
x_train = train_df.drop(columns=["ID", "y"])
y_train = train_df["y"]
x_val = val_df.drop(columns=["ID", "y"])
y_val = val_df["y"]
# ## Models:
# RandomForestRegressor Model:
randomForestAlgo = RandomForestRegressor()
param = {
"n_estimators": [4],
"max_depth": [12],
"min_samples_split": [8],
"min_samples_leaf": [2],
"bootstrap": [False],
}
gridSearch_RandomForest = GridSearchCV(randomForestAlgo, param, scoring="r2", cv=5)
gridSearch_RandomForest.fit(x_train, y_train)
predictionsRFR = gridSearch_RandomForest.predict(x_val)
# remove negative values from target
for i in range(len(predictionsRFR)):
predictionsRFR[i] = max(0, predictionsRFR[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error(y_val, predictionsRFR)))
# best parameters: {'bootstrap': True,'ccp_alpha': 0.0,'criterion': 'mse','max_depth': 80,'max_features': 'auto','max_leaf_nodes': None,'max_samples': None,'min_impurity_decrease': 0.0,'min_impurity_split': None,'min_samples_leaf': 2,'min_samples_split': 2,'min_weight_fraction_leaf': 0.0,'n_estimators': 100,'n_jobs': None,'oob_score': False,'random_state': None,'verbose': 0,'warm_start': False}
# test for RandomForestRegressor best parametes
"""
for depth in range(1,100):
param = {'n_estimators' : [4],
'max_depth' : [12],
'min_samples_split':[8],
'min_samples_leaf':[2],
'bootstrap' : [False]
}
gridSearch_RandomForest=GridSearchCV(randomForestAlgo,param,scoring='r2',cv=5)
gridSearch_RandomForest.fit(x_train,y_train)
predictionsRFR = gridSearch_RandomForest.predict(x_val)
for i in range(len(predictionsRFR)):
predictionsRFR[i] = max(0,predictionsRFR[i])
print("max_depth= ",depth," RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRFR )))
"""
# remove test file from working directory
# os.remove('./test.csv')
# RandomForestClassifier Model:
"""
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=32, random_state=0)
# Train the classifier
classifier = classifier.fit(x_train, y_train)
predictionsRFC = classifier.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRFC)):
predictionsRFC[i] = max(0,predictionsRFC[i])
#print("The accuracy of the classifier on the validation set is ", (classifier.score(x_val, y_val)))
print("RMSLE: ", np.sqrt(mean_squared_log_error(y_val, predictionsRFC)))
"""
# test for RandomForestClassifier best parametes
"""
for depth in range(1,2):
classifier = RandomForestClassifier(max_depth=depth, random_state=0)
classifier = classifier.fit(x_train, y_train)
predictionsRF = classifier.predict(x_val)
for i in range(len(predictionsRF)):
predictionsRF[i] = max(0,predictionsRF[i])
print("max_depth= ",depth," ,RMSLE= ", np.sqrt(mean_squared_log_error(y_val, predictionsRF)))
"""
# best depth over 100 depths: max_depth= 32 ,RMSLE= 0.4826878794953576 ,and get constant error at 58 depth
"""
cv = KFold(n_splits=5, random_state=1, shuffle=True)
#build multiple linear regression model
X = file.drop(columns=['ID','y'])
y = file['y']
#use LOOCV to evaluate model
scores = cross_val_score(classifier, X, y, cv=cv, n_jobs=-1)
print(scores)
"""
# LinearReggression Model:
"""
# Using polynomial LinearRegression on the dataset
reg = LinearRegression().fit(x_train, y_train)
predictionsLiR = reg.predict(x_val)
#remove negative values from target
for i in range(len(predictionsLiR)):
predictionsLiR[i] = max(0,predictionsLiR[i])
#RMSLE
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLiR )))
"""
# test for LogisticRegression best parametes
"""
for depth in range(100,500,100):
LoR = LogisticRegression(max_iter= depth,random_state=42)
LoR.fit(x_train, y_train)
predictionsLoR = LoR.predict(x_val)
for i in range(len(predictionsLoR)):
predictionsLoR[i] = max(0,predictionsLoR[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLoR )))
"""
# Logistic regression with default setting.
"""
LoR = LogisticRegression(max_iter=500,random_state=42)
LoR.fit(x_train, y_train)
predictionsLoR = LoR.predict(x_val)
#remove negative values from target
for i in range(len(predictionsLoR)):
predictionsLoR[i] = max(0,predictionsLoR[i])
#RMSLE
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsLoR )))
"""
# test for SVR best parametes
"""
for depth in [6800,7000]:
param = {'C' : [depth]}
gridSearchSVR=GridSearchCV(svr_Model, param, scoring='r2', cv=5)
gridSearchSVR.fit(x_train, y_train)
predictionsSVR = gridSearchSVR.predict(x_val)
for i in range(len(predictionsSVR)):
predictionsSVR[i] = max(0,predictionsSVR[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsSVR )))
"""
# SVR regression model:
"""
svr_Model=SVR()
param = {'C' : [7000]}
gridSearchSVR=GridSearchCV(svr_Model, param, scoring='r2', cv=5)
gridSearchSVR.fit(x_train, y_train)
predictionsSVR = gridSearchSVR.predict(x_val)
#remove negative values from target
for i in range(len(predictionsSVR)):
predictionsSVR[i] = max(0,predictionsSVR[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsSVR )))
"""
# test for Ridge best parametes
"""
for depth in range(50,150,1):
ridge=Ridge()
parameters={'alpha':[depth]}
gridSearchRidge=GridSearchCV(ridge, parameters, scoring='r2', cv=3)
gridSearchRidge.fit(x_train,y_train)
predictionsRg = gridSearchRidge.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRg)):
predictionsRg[i] = max(0,predictionsRg[i])
print("max_depth= ",depth,"RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRg )))
"""
# Ridge regression model:
"""
ridge=Ridge()
parameters={'alpha':[100]}
gridSearchRidge=GridSearchCV(ridge, parameters, scoring='r2', cv=3)
gridSearchRidge.fit(x_train,y_train)
predictionsRg = gridSearchRidge.predict(x_val)
#remove negative values from target
for i in range(len(predictionsRg)):
predictionsRg[i] = max(0,predictionsRg[i])
print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, predictionsRg )))
"""
# Comparing Model's RMSLE Scores:
"""
df = pd.DataFrame()
df['Y'] = y_val
df['RFC'] = predictionsRFC
df['LiR'] = predictionsLiR
#df['LoR'] = predictionsLoR
df['RFR'] = predictionsRFR
#df['SVR'] = predictionsSVR
# Ensemble methods: regression
df['Merged'] = (predictionsRFR + predictionsSVR)/2
df
"""
df.corr()
# print("RMSLE= ", np.sqrt(mean_squared_log_error( y_val, ((predictionsRFR + predictionsSVR + predictionsRFC)/3) )))
# Since we know that the output is never less than 1 we replace all negative values with 1 before appending in pre to calculate error.
# plt = sns.residplot(x = predictionsRFR, y = y_val, lowess = True,color = 'r')
# ## Reading the test file
raw_test = pd.read_csv("/kaggle/input/seoul-bike-rental-ai-pro-iti/test.csv")
test = raw_test
test
test.describe()
# ## Test set processing
test["Day"] = pd.DatetimeIndex(test["Date"]).day
test["Month"] = pd.DatetimeIndex(test["Date"]).month
test["Year"] = pd.DatetimeIndex(test["Date"]).year
test.sample(10)
test["label_day_night"] = test["Hour"].apply(
lambda x: "Night" if (x > 20 or x < 5) else ("Day")
)
test.sample(10)
test["Date"] = pd.to_datetime(test["Date"], format="%d/%m/%Y")
test["WeekDay"] = test["Date"].dt.day_name()
test
test = test.drop(
columns=[
"Date",
"Year",
"Day",
"Month",
"Dew point temperature(�C)",
"Holiday",
"label_day_night",
"Rainfall(mm)",
"Snowfall (cm)",
"Seasons",
"WeekDay",
"Wind speed (m/s)",
]
)
test.sample(10)
# test = encodingHoliday(test, ['Holiday'])
test = encodingFunctioningDay(test, ["Functioning Day"])
# test = encodingSeasons(test, ['Seasons'])
# test = encoding_label_day_night(test, ['label_day_night'])
# test = encodingWeekDay(test, ['WeekDay'])
test.sample(10)
names = [
"Temperature(�C)",
"Humidity(%)",
"Visibility (10m)",
"Solar Radiation (MJ/m2)",
"Hour",
]
for i in names:
test[i] = normalize_column(test, i)
test
test = PrepareData(raw_test)
# ## Exporting output to csv
# Model Choosed: RFR
# columns_dropped=['Date', 'Dew point temperature(�C)', 'Rainfall(mm)', 'Snowfall (cm)']
X_test = test
X_test = X_test.drop(columns=["ID"])
y_test_predicted = gridSearch_RandomForest.predict(X_test)
# remove negative values from target
for i in range(len(y_test_predicted)):
y_test_predicted[i] = max(0, y_test_predicted[i])
test["y"] = y_test_predicted
test
test[["ID", "y"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 6,390 | 0 | 6,390 | 6,390 |
||
69919443
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_ds = pd.read_csv("/kaggle/input/titanic/train.csv")
test_ds = pd.read_csv("/kaggle/input/titanic/test.csv")
passenger_id = test_ds["PassengerId"]
train_ds.head()
test_ds.head()
plt.bar(
list(train_ds["Survived"].value_counts().keys()),
list(train_ds["Survived"].value_counts()),
)
plt.show()
plt.bar(
list(train_ds["Pclass"].value_counts().keys()),
list(train_ds["Pclass"].value_counts()),
)
plt.show()
plt.bar(
list(train_ds["Sex"].value_counts().keys()), list(train_ds["Sex"].value_counts())
)
plt.show()
plt.hist(train_ds["Age"])
plt.ylabel("Age")
plt.show()
train_ds = train_ds.drop(
["PassengerId", "Name", "Ticket", "Fare", "Cabin", "Embarked"], axis=1
)
test_ds = test_ds.drop(
["PassengerId", "Name", "Ticket", "Fare", "Cabin", "Embarked"], axis=1
)
train_ds.isnull().sum()
test_ds.isnull().sum()
x_train = train_ds.iloc[:, 1:4].values
y_train = train_ds.iloc[:, 0].values
x_test = test_ds.iloc[:, :3].values
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
x_train[:, 2:3] = imputer.fit_transform(x_train[:, 2:3])
x_test[:, 2:3] = imputer.fit_transform(x_test[:, 2:3])
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x_train[:, 1] = le.fit_transform(x_train[:, 1])
x_test[:, 1] = le.fit_transform(x_test[:, 1])
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.fit_transform(x_test)
from sklearn.tree import DecisionTreeClassifier
clsfr = DecisionTreeClassifier(criterion="entropy")
clsfr.fit(x_train, y_train)
prediction = clsfr.predict(x_test)
output = pd.DataFrame({"PassengerId": passenger_id, "Survived": prediction})
output.to_csv("my_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919443.ipynb
| null | null |
[{"Id": 69919443, "ScriptId": 19118593, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6804135, "CreationDate": "08/03/2021 20:56:02", "VersionNumber": 4.0, "Title": "notebook4b778ffce8", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 78.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 76.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
sns.set()
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_ds = pd.read_csv("/kaggle/input/titanic/train.csv")
test_ds = pd.read_csv("/kaggle/input/titanic/test.csv")
passenger_id = test_ds["PassengerId"]
train_ds.head()
test_ds.head()
plt.bar(
list(train_ds["Survived"].value_counts().keys()),
list(train_ds["Survived"].value_counts()),
)
plt.show()
plt.bar(
list(train_ds["Pclass"].value_counts().keys()),
list(train_ds["Pclass"].value_counts()),
)
plt.show()
plt.bar(
list(train_ds["Sex"].value_counts().keys()), list(train_ds["Sex"].value_counts())
)
plt.show()
plt.hist(train_ds["Age"])
plt.ylabel("Age")
plt.show()
train_ds = train_ds.drop(
["PassengerId", "Name", "Ticket", "Fare", "Cabin", "Embarked"], axis=1
)
test_ds = test_ds.drop(
["PassengerId", "Name", "Ticket", "Fare", "Cabin", "Embarked"], axis=1
)
train_ds.isnull().sum()
test_ds.isnull().sum()
x_train = train_ds.iloc[:, 1:4].values
y_train = train_ds.iloc[:, 0].values
x_test = test_ds.iloc[:, :3].values
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(missing_values=np.nan, strategy="mean")
x_train[:, 2:3] = imputer.fit_transform(x_train[:, 2:3])
x_test[:, 2:3] = imputer.fit_transform(x_test[:, 2:3])
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
x_train[:, 1] = le.fit_transform(x_train[:, 1])
x_test[:, 1] = le.fit_transform(x_test[:, 1])
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_train = sc.fit_transform(x_train)
x_test = sc.fit_transform(x_test)
from sklearn.tree import DecisionTreeClassifier
clsfr = DecisionTreeClassifier(criterion="entropy")
clsfr.fit(x_train, y_train)
prediction = clsfr.predict(x_test)
output = pd.DataFrame({"PassengerId": passenger_id, "Survived": prediction})
output.to_csv("my_submission.csv", index=False)
| false | 0 | 847 | 0 | 847 | 847 |
||
69919834
|
<jupyter_start><jupyter_text>Heart Attack Analysis & Prediction Dataset
## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below.
[Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor)
[Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba)
[Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach)
[Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray)
[Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross)
[Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019)
[17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017)
## About this dataset
- Age : Age of the patient
- Sex : Sex of the patient
- exang: exercise induced angina (1 = yes; 0 = no)
- ca: number of major vessels (0-3)
- cp : Chest Pain type chest pain type
- Value 1: typical angina
- Value 2: atypical angina
- Value 3: non-anginal pain
- Value 4: asymptomatic
- trtbps : resting blood pressure (in mm Hg)
- chol : cholestoral in mg/dl fetched via BMI sensor
- fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)
- rest_ecg : resting electrocardiographic results
- Value 0: normal
- Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)
- Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria
- thalach : maximum heart rate achieved
- target : 0= less chance of heart attack 1= more chance of heart attack
n
Kaggle dataset identifier: heart-attack-analysis-prediction-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('heart-attack-analysis-prediction-dataset/heart.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 303 entries, 0 to 302
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 303 non-null int64
1 sex 303 non-null int64
2 cp 303 non-null int64
3 trtbps 303 non-null int64
4 chol 303 non-null int64
5 fbs 303 non-null int64
6 restecg 303 non-null int64
7 thalachh 303 non-null int64
8 exng 303 non-null int64
9 oldpeak 303 non-null float64
10 slp 303 non-null int64
11 caa 303 non-null int64
12 thall 303 non-null int64
13 output 303 non-null int64
dtypes: float64(1), int64(13)
memory usage: 33.3 KB
<jupyter_text>Examples:
{
"age": 63.0,
"sex": 1.0,
"cp": 3.0,
"trtbps": 145.0,
"chol": 233.0,
"fbs": 1.0,
"restecg": 0.0,
"thalachh": 150.0,
"exng": 0.0,
"oldpeak": 2.3,
"slp": 0.0,
"caa": 0.0,
"thall": 1.0,
"output": 1.0
}
{
"age": 37.0,
"sex": 1.0,
"cp": 2.0,
"trtbps": 130.0,
"chol": 250.0,
"fbs": 0.0,
"restecg": 1.0,
"thalachh": 187.0,
"exng": 0.0,
"oldpeak": 3.5,
"slp": 0.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
{
"age": 41.0,
"sex": 0.0,
"cp": 1.0,
"trtbps": 130.0,
"chol": 204.0,
"fbs": 0.0,
"restecg": 0.0,
"thalachh": 172.0,
"exng": 0.0,
"oldpeak": 1.4,
"slp": 2.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
{
"age": 56.0,
"sex": 1.0,
"cp": 1.0,
"trtbps": 120.0,
"chol": 236.0,
"fbs": 0.0,
"restecg": 1.0,
"thalachh": 178.0,
"exng": 0.0,
"oldpeak": 0.8,
"slp": 2.0,
"caa": 0.0,
"thall": 2.0,
"output": 1.0
}
<jupyter_script># Project Specific Libraries are imported (2)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
import statistics
from sklearn.model_selection import train_test_split, KFold
# Dataset for heart attack prediction is read and put into the dataframe structure. (3)
# The following data is considered as the train set
# The first 10 values are displayed (4)
df = pd.read_csv("../input/heart-attack-analysis-prediction-dataset/heart.csv")
display(df.head(10))
print("\n\n")
print(df.shape)
# The train dataset is split to form another dataset where the
# output column(likeliness of heart attack) is dropped as seen below to create
# test dataset which consistes of all the other columns except the output column
# In the test set X variables are the the other characterstics variables and based on
# these the target variable output which is the Y variable will do the supervised
# machine learning. (1)
features_num = ["age", "trtbps", "chol", "thalachh", "oldpeak"]
features_cat = ["sex", "exng", "caa", "cp", "fbs", "restecg", "slp", "thall"]
scaler = StandardScaler()
ohe = OneHotEncoder(sparse=False)
scaled_columns = scaler.fit_transform(df[features_num])
encoded_columns = ohe.fit_transform(df[features_cat])
X = np.concatenate([scaled_columns, encoded_columns], axis=1)
y = df["output"]
# This part specifically splits the dataset to train and test sets.
# The data divided by the split is based on the relationship
# 75% as our training data and test our model on the remaining 25%.
# The Scikit-learn's train_test_split function enables this.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
pff = pd.DataFrame({"output": X_test[:, 29]})
display(pff)
# pff.to_csv('testpff.csv')
# A single column (output coulmn in this case) is accessed from the train set and viewed
print(df["output"]) # access data frame specific column in this way OR
print(df.output) # access data frame this way
print(df.output.value_counts()) # counts the unique different values in the column
# checking to see if anny columns have any data missing and it shows no
# missing entries in any column as no NaN. (5,12,13,14)
df.isnull().sum()
df.info() # information of the dataframe presented ((5,12,13,14))
# boxplot and outlier detection for the training dataset is done using seaborn (6,7)
fig, (subplot1, subplot2) = plt.subplots(1, 2) # 1 row 2 column row created
# input argument is defined as df(which is the dataframe here)
# x=cp/exng is considered for the boxplot (where cp is divided into
# 3 bxplts and exng is divided into 2 bxplts as they have these specified in their data)
# y=age is taken from the df (dataframe) for botht eh boxplots
sns.boxplot(x="cp", y="age", data=df, ax=subplot1) # chest pains
sns.boxplot(x="exng", y="age", data=df, ax=subplot2) # exercise induced angina
plt.show()
# Previously it was seen in the boxplot for
# exercise induced angima and age that people with age over 70 had outlier these are
# shown here. (6,7)
df.loc[df.age > 70]
# The data types are checked initially.
# Correlation matrix is created for the training
# to show which of the columns are more dependent on the
# other from which as idea can be generated to understand which
# values lead to be the more influential. (8)
print(df.dtypes)
print("\n\n")
TrainingDataset_NumericalOnly = df.select_dtypes(include=["int64", "float64"])
corr_mat = TrainingDataset_NumericalOnly.corr()
corr_mat.head(n=5) # first 5 values are checked
# The heat map for the training dataset is created which is technically the visual representation
# of the correlation matrix where the more dependent columns show a higher heat/ warmer
# color referring to the more dependency on each other.(9)
plt.figure(figsize=(17, 10))
sns.heatmap(
corr_mat,
cmap=plt.cm.RdBu,
vmax=1,
linewidth=0.1,
linecolor="white",
square=True,
annot=True,
)
# Pairplot shows pairwise relationship between each column in the training dataset.
# This results in creating a grid of axes so that each numeric value
# is shared across the x and y axis. The diagonal plots here act differently
# it shows a univariate distribution plot is drawn
# to show the marginal distribution of each data for the respective columns. (10)
# Data for output(heart attack) is in blue
plt.figure(figsize=(20, 20))
sns.pairplot(TrainingDataset_NumericalOnly, hue="output")
# The countplot shows the specifics data for a column in divided
# manner based on the individual outcomes for that column
# In the training set of how many males/females are likely to have a heart attack is shown in
# this count plotplot.The blue color mean more likey to heart failure. (11)
sns.countplot(x="sex", data=df, hue="output")
plt.xticks(ticks=[0, 1], labels=["female", "male"])
# Supervised machine learning model created using Logistic regression to predict the output cloumn(heart attack chances)
# using the test set that was created based using the training model.
# The model accuracy is also generated herefrom the newly established test dataset.
# The new output generated from the test set is put into a dataframe and shown.
# A 5-fold cross validation is done also over the logistic regressive model which ensures to take 5 different layers
# of data from test and train dataset to create the target column in test dataset.
# This ensures to produce different accuracy results based off the different spliting of the dataset
# as a result producing an array of 5 scores based on which average accuracy score and mean is
# taken for the model results
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
k = 5
kf = KFold(n_splits=k, random_state=None)
model = LogisticRegression(solver="liblinear")
acc_score = []
pred_val_stack = []
for train_index, test_index in kf.split(X):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
pred_values = model.predict(X_test)
ptt = np.transpose(pred_values)
conv_ptt = pd.DataFrame(ptt, columns=["output"])
display(conv_ptt)
acc = accuracy_score(pred_values, y_test)
acc_score.append(acc)
avg_acc_score = sum(acc_score) / k
std_div = statistics.stdev(acc_score)
print("accuracy of each fold - {}".format(acc_score))
print("Avg accuracy : {}".format(avg_acc_score))
print("Standard Deviation : {}".format(std_div))
conv_ptt.to_csv("heart_out_prediction.csv", mode="a", header=True)
conv_ptt.to_excel("output.xlsx")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919834.ipynb
|
heart-attack-analysis-prediction-dataset
|
rashikrahmanpritom
|
[{"Id": 69919834, "ScriptId": 18975210, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8019645, "CreationDate": "08/03/2021 20:58:50", "VersionNumber": 10.0, "Title": "When Will Your Heart Fail You", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 185.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 183.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93336834, "KernelVersionId": 69919834, "SourceDatasetVersionId": 2047221}]
|
[{"Id": 2047221, "DatasetId": 1226038, "DatasourceVersionId": 2087216, "CreatorUserId": 4730101, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2021 11:40:59", "VersionNumber": 2.0, "Title": "Heart Attack Analysis & Prediction Dataset", "Slug": "heart-attack-analysis-prediction-dataset", "Subtitle": "A dataset for heart attack classification", "Description": "## Hone your analytical and ML skills by participating in tasks of my other dataset's. Given below.\n\n\n[Data Science Job Posting on Glassdoor](https://www.kaggle.com/rashikrahmanpritom/data-science-job-posting-on-glassdoor)\n\n[Groceries dataset for Market Basket Analysis(MBA)](https://www.kaggle.com/rashikrahmanpritom/groceries-dataset-for-market-basket-analysismba)\n\n[Dataset for Facial recognition using ML approach](https://www.kaggle.com/rashikrahmanpritom/dataset-for-facial-recognition-using-ml-approach)\n\n[Covid_w/wo_Pneumonia Chest Xray](https://www.kaggle.com/rashikrahmanpritom/covid-wwo-pneumonia-chest-xray)\n\n[Disney Movies 1937-2016 Gross Income](https://www.kaggle.com/rashikrahmanpritom/disney-movies-19372016-total-gross)\n\n[Bollywood Movie data from 2000 to 2019](https://www.kaggle.com/rashikrahmanpritom/bollywood-movie-data-from-2000-to-2019)\n\n[17.7K English song data from 2008-2017](https://www.kaggle.com/rashikrahmanpritom/177k-english-song-data-from-20082017)\n\n## About this dataset\n\n- Age : Age of the patient\n\n- Sex : Sex of the patient\n\n- exang: exercise induced angina (1 = yes; 0 = no)\n\n- ca: number of major vessels (0-3)\n\n- cp : Chest Pain type chest pain type\n - Value 1: typical angina\n - Value 2: atypical angina\n - Value 3: non-anginal pain\n - Value 4: asymptomatic\n \n- trtbps : resting blood pressure (in mm Hg)\n- chol : cholestoral in mg/dl fetched via BMI sensor\n- fbs : (fasting blood sugar > 120 mg/dl) (1 = true; 0 = false)\n- rest_ecg : resting electrocardiographic results\n - Value 0: normal\n - Value 1: having ST-T wave abnormality (T wave inversions and/or ST elevation or depression of > 0.05 mV)\n - Value 2: showing probable or definite left ventricular hypertrophy by Estes' criteria\n \n- thalach : maximum heart rate achieved\n- target : 0= less chance of heart attack 1= more chance of heart attack\n\nn", "VersionNotes": "heart csv update", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1226038, "CreatorUserId": 4730101, "OwnerUserId": 4730101.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2047221.0, "CurrentDatasourceVersionId": 2087216.0, "ForumId": 1244179, "Type": 2, "CreationDate": "03/22/2021 08:19:12", "LastActivityDate": "03/22/2021", "TotalViews": 870835, "TotalDownloads": 138216, "TotalVotes": 3197, "TotalKernels": 1050}]
|
[{"Id": 4730101, "UserName": "rashikrahmanpritom", "DisplayName": "Rashik Rahman", "RegisterDate": "03/24/2020", "PerformanceTier": 3}]
|
# Project Specific Libraries are imported (2)
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
import statistics
from sklearn.model_selection import train_test_split, KFold
# Dataset for heart attack prediction is read and put into the dataframe structure. (3)
# The following data is considered as the train set
# The first 10 values are displayed (4)
df = pd.read_csv("../input/heart-attack-analysis-prediction-dataset/heart.csv")
display(df.head(10))
print("\n\n")
print(df.shape)
# The train dataset is split to form another dataset where the
# output column(likeliness of heart attack) is dropped as seen below to create
# test dataset which consistes of all the other columns except the output column
# In the test set X variables are the the other characterstics variables and based on
# these the target variable output which is the Y variable will do the supervised
# machine learning. (1)
features_num = ["age", "trtbps", "chol", "thalachh", "oldpeak"]
features_cat = ["sex", "exng", "caa", "cp", "fbs", "restecg", "slp", "thall"]
scaler = StandardScaler()
ohe = OneHotEncoder(sparse=False)
scaled_columns = scaler.fit_transform(df[features_num])
encoded_columns = ohe.fit_transform(df[features_cat])
X = np.concatenate([scaled_columns, encoded_columns], axis=1)
y = df["output"]
# This part specifically splits the dataset to train and test sets.
# The data divided by the split is based on the relationship
# 75% as our training data and test our model on the remaining 25%.
# The Scikit-learn's train_test_split function enables this.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
pff = pd.DataFrame({"output": X_test[:, 29]})
display(pff)
# pff.to_csv('testpff.csv')
# A single column (output coulmn in this case) is accessed from the train set and viewed
print(df["output"]) # access data frame specific column in this way OR
print(df.output) # access data frame this way
print(df.output.value_counts()) # counts the unique different values in the column
# checking to see if anny columns have any data missing and it shows no
# missing entries in any column as no NaN. (5,12,13,14)
df.isnull().sum()
df.info() # information of the dataframe presented ((5,12,13,14))
# boxplot and outlier detection for the training dataset is done using seaborn (6,7)
fig, (subplot1, subplot2) = plt.subplots(1, 2) # 1 row 2 column row created
# input argument is defined as df(which is the dataframe here)
# x=cp/exng is considered for the boxplot (where cp is divided into
# 3 bxplts and exng is divided into 2 bxplts as they have these specified in their data)
# y=age is taken from the df (dataframe) for botht eh boxplots
sns.boxplot(x="cp", y="age", data=df, ax=subplot1) # chest pains
sns.boxplot(x="exng", y="age", data=df, ax=subplot2) # exercise induced angina
plt.show()
# Previously it was seen in the boxplot for
# exercise induced angima and age that people with age over 70 had outlier these are
# shown here. (6,7)
df.loc[df.age > 70]
# The data types are checked initially.
# Correlation matrix is created for the training
# to show which of the columns are more dependent on the
# other from which as idea can be generated to understand which
# values lead to be the more influential. (8)
print(df.dtypes)
print("\n\n")
TrainingDataset_NumericalOnly = df.select_dtypes(include=["int64", "float64"])
corr_mat = TrainingDataset_NumericalOnly.corr()
corr_mat.head(n=5) # first 5 values are checked
# The heat map for the training dataset is created which is technically the visual representation
# of the correlation matrix where the more dependent columns show a higher heat/ warmer
# color referring to the more dependency on each other.(9)
plt.figure(figsize=(17, 10))
sns.heatmap(
corr_mat,
cmap=plt.cm.RdBu,
vmax=1,
linewidth=0.1,
linecolor="white",
square=True,
annot=True,
)
# Pairplot shows pairwise relationship between each column in the training dataset.
# This results in creating a grid of axes so that each numeric value
# is shared across the x and y axis. The diagonal plots here act differently
# it shows a univariate distribution plot is drawn
# to show the marginal distribution of each data for the respective columns. (10)
# Data for output(heart attack) is in blue
plt.figure(figsize=(20, 20))
sns.pairplot(TrainingDataset_NumericalOnly, hue="output")
# The countplot shows the specifics data for a column in divided
# manner based on the individual outcomes for that column
# In the training set of how many males/females are likely to have a heart attack is shown in
# this count plotplot.The blue color mean more likey to heart failure. (11)
sns.countplot(x="sex", data=df, hue="output")
plt.xticks(ticks=[0, 1], labels=["female", "male"])
# Supervised machine learning model created using Logistic regression to predict the output cloumn(heart attack chances)
# using the test set that was created based using the training model.
# The model accuracy is also generated herefrom the newly established test dataset.
# The new output generated from the test set is put into a dataframe and shown.
# A 5-fold cross validation is done also over the logistic regressive model which ensures to take 5 different layers
# of data from test and train dataset to create the target column in test dataset.
# This ensures to produce different accuracy results based off the different spliting of the dataset
# as a result producing an array of 5 scores based on which average accuracy score and mean is
# taken for the model results
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
k = 5
kf = KFold(n_splits=k, random_state=None)
model = LogisticRegression(solver="liblinear")
acc_score = []
pred_val_stack = []
for train_index, test_index in kf.split(X):
X_train, X_test = X.iloc[train_index, :], X.iloc[test_index, :]
y_train, y_test = y[train_index], y[test_index]
model.fit(X_train, y_train)
pred_values = model.predict(X_test)
ptt = np.transpose(pred_values)
conv_ptt = pd.DataFrame(ptt, columns=["output"])
display(conv_ptt)
acc = accuracy_score(pred_values, y_test)
acc_score.append(acc)
avg_acc_score = sum(acc_score) / k
std_div = statistics.stdev(acc_score)
print("accuracy of each fold - {}".format(acc_score))
print("Avg accuracy : {}".format(avg_acc_score))
print("Standard Deviation : {}".format(std_div))
conv_ptt.to_csv("heart_out_prediction.csv", mode="a", header=True)
conv_ptt.to_excel("output.xlsx")
|
[{"heart-attack-analysis-prediction-dataset/heart.csv": {"column_names": "[\"age\", \"sex\", \"cp\", \"trtbps\", \"chol\", \"fbs\", \"restecg\", \"thalachh\", \"exng\", \"oldpeak\", \"slp\", \"caa\", \"thall\", \"output\"]", "column_data_types": "{\"age\": \"int64\", \"sex\": \"int64\", \"cp\": \"int64\", \"trtbps\": \"int64\", \"chol\": \"int64\", \"fbs\": \"int64\", \"restecg\": \"int64\", \"thalachh\": \"int64\", \"exng\": \"int64\", \"oldpeak\": \"float64\", \"slp\": \"int64\", \"caa\": \"int64\", \"thall\": \"int64\", \"output\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 303 entries, 0 to 302\nData columns (total 14 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 age 303 non-null int64 \n 1 sex 303 non-null int64 \n 2 cp 303 non-null int64 \n 3 trtbps 303 non-null int64 \n 4 chol 303 non-null int64 \n 5 fbs 303 non-null int64 \n 6 restecg 303 non-null int64 \n 7 thalachh 303 non-null int64 \n 8 exng 303 non-null int64 \n 9 oldpeak 303 non-null float64\n 10 slp 303 non-null int64 \n 11 caa 303 non-null int64 \n 12 thall 303 non-null int64 \n 13 output 303 non-null int64 \ndtypes: float64(1), int64(13)\nmemory usage: 33.3 KB\n", "summary": "{\"age\": {\"count\": 303.0, \"mean\": 54.366336633663366, \"std\": 9.082100989837857, \"min\": 29.0, \"25%\": 47.5, \"50%\": 55.0, \"75%\": 61.0, \"max\": 77.0}, \"sex\": {\"count\": 303.0, \"mean\": 0.6831683168316832, \"std\": 0.46601082333962385, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"cp\": {\"count\": 303.0, \"mean\": 0.966996699669967, \"std\": 1.0320524894832985, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 3.0}, \"trtbps\": {\"count\": 303.0, \"mean\": 131.62376237623764, \"std\": 17.5381428135171, \"min\": 94.0, \"25%\": 120.0, \"50%\": 130.0, \"75%\": 140.0, \"max\": 200.0}, \"chol\": {\"count\": 303.0, \"mean\": 246.26402640264027, \"std\": 51.83075098793003, \"min\": 126.0, \"25%\": 211.0, \"50%\": 240.0, \"75%\": 274.5, \"max\": 564.0}, \"fbs\": {\"count\": 303.0, \"mean\": 0.1485148514851485, \"std\": 0.35619787492797644, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"restecg\": {\"count\": 303.0, \"mean\": 0.528052805280528, \"std\": 0.525859596359298, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 2.0}, \"thalachh\": {\"count\": 303.0, \"mean\": 149.64686468646866, \"std\": 22.905161114914094, \"min\": 71.0, \"25%\": 133.5, \"50%\": 153.0, \"75%\": 166.0, \"max\": 202.0}, \"exng\": {\"count\": 303.0, \"mean\": 0.32673267326732675, \"std\": 0.4697944645223165, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"oldpeak\": {\"count\": 303.0, \"mean\": 1.0396039603960396, \"std\": 1.1610750220686348, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.8, \"75%\": 1.6, \"max\": 6.2}, \"slp\": {\"count\": 303.0, \"mean\": 1.3993399339933994, \"std\": 0.6162261453459619, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 2.0, \"max\": 2.0}, \"caa\": {\"count\": 303.0, \"mean\": 0.7293729372937293, \"std\": 1.022606364969327, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 4.0}, \"thall\": {\"count\": 303.0, \"mean\": 2.3135313531353137, \"std\": 0.6122765072781409, \"min\": 0.0, \"25%\": 2.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 3.0}, \"output\": {\"count\": 303.0, \"mean\": 0.5445544554455446, \"std\": 0.4988347841643913, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"age\":{\"0\":63,\"1\":37,\"2\":41,\"3\":56},\"sex\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"cp\":{\"0\":3,\"1\":2,\"2\":1,\"3\":1},\"trtbps\":{\"0\":145,\"1\":130,\"2\":130,\"3\":120},\"chol\":{\"0\":233,\"1\":250,\"2\":204,\"3\":236},\"fbs\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"restecg\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"thalachh\":{\"0\":150,\"1\":187,\"2\":172,\"3\":178},\"exng\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"oldpeak\":{\"0\":2.3,\"1\":3.5,\"2\":1.4,\"3\":0.8},\"slp\":{\"0\":0,\"1\":0,\"2\":2,\"3\":2},\"caa\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"thall\":{\"0\":1,\"1\":2,\"2\":2,\"3\":2},\"output\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1}}"}}]
| true | 1 |
<start_data_description><data_path>heart-attack-analysis-prediction-dataset/heart.csv:
<column_names>
['age', 'sex', 'cp', 'trtbps', 'chol', 'fbs', 'restecg', 'thalachh', 'exng', 'oldpeak', 'slp', 'caa', 'thall', 'output']
<column_types>
{'age': 'int64', 'sex': 'int64', 'cp': 'int64', 'trtbps': 'int64', 'chol': 'int64', 'fbs': 'int64', 'restecg': 'int64', 'thalachh': 'int64', 'exng': 'int64', 'oldpeak': 'float64', 'slp': 'int64', 'caa': 'int64', 'thall': 'int64', 'output': 'int64'}
<dataframe_Summary>
{'age': {'count': 303.0, 'mean': 54.366336633663366, 'std': 9.082100989837857, 'min': 29.0, '25%': 47.5, '50%': 55.0, '75%': 61.0, 'max': 77.0}, 'sex': {'count': 303.0, 'mean': 0.6831683168316832, 'std': 0.46601082333962385, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'cp': {'count': 303.0, 'mean': 0.966996699669967, 'std': 1.0320524894832985, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 2.0, 'max': 3.0}, 'trtbps': {'count': 303.0, 'mean': 131.62376237623764, 'std': 17.5381428135171, 'min': 94.0, '25%': 120.0, '50%': 130.0, '75%': 140.0, 'max': 200.0}, 'chol': {'count': 303.0, 'mean': 246.26402640264027, 'std': 51.83075098793003, 'min': 126.0, '25%': 211.0, '50%': 240.0, '75%': 274.5, 'max': 564.0}, 'fbs': {'count': 303.0, 'mean': 0.1485148514851485, 'std': 0.35619787492797644, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'restecg': {'count': 303.0, 'mean': 0.528052805280528, 'std': 0.525859596359298, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 2.0}, 'thalachh': {'count': 303.0, 'mean': 149.64686468646866, 'std': 22.905161114914094, 'min': 71.0, '25%': 133.5, '50%': 153.0, '75%': 166.0, 'max': 202.0}, 'exng': {'count': 303.0, 'mean': 0.32673267326732675, 'std': 0.4697944645223165, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'oldpeak': {'count': 303.0, 'mean': 1.0396039603960396, 'std': 1.1610750220686348, 'min': 0.0, '25%': 0.0, '50%': 0.8, '75%': 1.6, 'max': 6.2}, 'slp': {'count': 303.0, 'mean': 1.3993399339933994, 'std': 0.6162261453459619, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 2.0, 'max': 2.0}, 'caa': {'count': 303.0, 'mean': 0.7293729372937293, 'std': 1.022606364969327, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 4.0}, 'thall': {'count': 303.0, 'mean': 2.3135313531353137, 'std': 0.6122765072781409, 'min': 0.0, '25%': 2.0, '50%': 2.0, '75%': 3.0, 'max': 3.0}, 'output': {'count': 303.0, 'mean': 0.5445544554455446, 'std': 0.4988347841643913, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 303 entries, 0 to 302
Data columns (total 14 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 age 303 non-null int64
1 sex 303 non-null int64
2 cp 303 non-null int64
3 trtbps 303 non-null int64
4 chol 303 non-null int64
5 fbs 303 non-null int64
6 restecg 303 non-null int64
7 thalachh 303 non-null int64
8 exng 303 non-null int64
9 oldpeak 303 non-null float64
10 slp 303 non-null int64
11 caa 303 non-null int64
12 thall 303 non-null int64
13 output 303 non-null int64
dtypes: float64(1), int64(13)
memory usage: 33.3 KB
<some_examples>
{'age': {'0': 63, '1': 37, '2': 41, '3': 56}, 'sex': {'0': 1, '1': 1, '2': 0, '3': 1}, 'cp': {'0': 3, '1': 2, '2': 1, '3': 1}, 'trtbps': {'0': 145, '1': 130, '2': 130, '3': 120}, 'chol': {'0': 233, '1': 250, '2': 204, '3': 236}, 'fbs': {'0': 1, '1': 0, '2': 0, '3': 0}, 'restecg': {'0': 0, '1': 1, '2': 0, '3': 1}, 'thalachh': {'0': 150, '1': 187, '2': 172, '3': 178}, 'exng': {'0': 0, '1': 0, '2': 0, '3': 0}, 'oldpeak': {'0': 2.3, '1': 3.5, '2': 1.4, '3': 0.8}, 'slp': {'0': 0, '1': 0, '2': 2, '3': 2}, 'caa': {'0': 0, '1': 0, '2': 0, '3': 0}, 'thall': {'0': 1, '1': 2, '2': 2, '3': 2}, 'output': {'0': 1, '1': 1, '2': 1, '3': 1}}
<end_description>
| 1,968 | 0 | 3,662 | 1,968 |
69919825
|
<jupyter_start><jupyter_text>Huggingface BERT
This dataset contains many popular BERT weights retrieved directly on [Hugging Face's model repository](https://huggingface.co/models), and hosted on Kaggle. It will be automatically updated every month to ensure that the latest version is available to the user. By making it a dataset, it is significantly faster to load the weights since you can directly attach a Kaggle dataset to the notebook rather than downloading the data every time. See the [speed comparison notebook](https://www.kaggle.com/xhlulu/loading-bert-speed-comparison).
*The banner was adapted from figures by [Jimmy Lin](https://twitter.com/lintool) ([tweet](https://twitter.com/lintool/status/1285599163024125959); [slide](https://docs.google.com/presentation/d/1HMng1RWuY1molsGamwIpRnxNZNEqEPRVODcXpm-pX4c/)) released under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). BERT has an Apache 2.0 license according to the model repository.*
### Quick Start
To use this dataset, simply attach it the your notebook and specify the path to the dataset. For example:
```python
from transformers import AutoTokenizer, AutoModelForMaskedLM
MODEL_DIR = "/kaggle/input/huggingface-bert/"
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR + "bert-large-uncased")
model = AutoModelForMaskedLM.from_pretrained(MODEL_DIR + "bert-large-uncased")
```
Kaggle dataset identifier: huggingface-bert
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# DataFrame
import pandas as pd
# Matplot
import matplotlib.pyplot as plt
# Scikit-learn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfVectorizer
# Summary
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
# from sklearn.preprocessing import Imputer
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from scipy.stats import randint
# tensor-Keras
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Activation,
Dense,
Dropout,
Embedding,
Flatten,
Conv1D,
MaxPooling1D,
LSTM,
)
from tensorflow.keras import utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
# nltk
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
# Word2vec
import gensim
# Utility
import re
import numpy as np
import os
from collections import Counter
import logging
import time
import pickle
import itertools
from sklearn.impute import SimpleImputer
import warnings
import scipy.io
print("TensorFlow version: ", tf.__version__)
warnings.filterwarnings("ignore")
# Import the hashing vectorizer
from sklearn.feature_extraction.text import HashingVectorizer
# Import functional utilities
from sklearn.preprocessing import FunctionTransformer, MaxAbsScaler
from sklearn.pipeline import FeatureUnion
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.base import BaseEstimator, TransformerMixin
from scipy import sparse
from itertools import combinations
import string
print(tf.test.gpu_device_name())
# See https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
# # NLP PIPELINE
# There are mainly 4 stages of an NLP pipeline :
# ## Exploratory Data Analysis
# ## Text Processing
# Cleaning
# Normalization
# Tokenize
# Stop word removal
# Stemming and Lemmatization
# POS and NER
#
# ## Feature Extraction
# Bag of Words
# TF-IDF
# word2vec
# Glove
# ## Modeling
# Model
# Train
# Predict
# Import the datasets
train_raw = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
test_raw = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
# # Text Preprocessing
# Data preprocessing is the phase of preparing raw data to make it suitable for a machine learning model.
# For NLP, that includes text cleaning, stopwords removal, stemming and lemmatization.
# Text cleaning steps vary according to the type of data and the required task. Generally, the string is converted to lowercase and punctuation is removed before text gets tokenized. Tokenization is the process of splitting a string into a list of strings (or “tokens”).
# I will put all those preprocessing steps into a single function and apply it to the whole dataset
# ## Cleaning
# Before we start using the tweets' text we clean it. We'll do the this in the class CleanText:
# - remove the **mentions**, as we want to make the model generalisable.
# - remove the **hash tag sign** (#) but not the actual tag as this may contain information
# - set all words to **lowercase**
# - remove all **punctuations**, including the question and exclamation marks
# - remove the **urls** as they do not contain useful information and we did not notice a distinction in the number of urls used between the sentiment classes
# - make sure the converted **emojis** are kept as one word.
# - remove **digits**
# - remove **stopwords**
# - apply the **Lem** to keep the lem of the words
# https://srinivas-yeeda.medium.com/preprocessing-for-natural-language-processing-498df071ab6e
import spacy
nlp = spacy.load("en_core_web_sm")
class CleanText(BaseEstimator, TransformerMixin):
def remove_mentions(self, input_text):
return re.sub(r"@\w+", "", input_text)
def remove_urls(self, input_text):
return re.sub(r"http.?://[^\s]+[\s]?", "", input_text)
def emoji_oneword(self, input_text):
# By compressing the underscore, the emoji is kept as one word
return input_text.replace("_", "")
def remove_punctuation(self, input_text):
# Make translation table
punct = string.punctuation
trantab = str.maketrans(
punct, len(punct) * " "
) # Every punctuation symbol will be replaced by a space
return input_text.translate(trantab)
def remove_digits(self, input_text):
return re.sub("\d+", "", input_text)
def to_lower(self, input_text):
return input_text.lower()
def remove_stopwords(self, input_text):
stopwords_list = stopwords.words("english")
# Some words which might indicate a certain sentiment are kept via a whitelist
whitelist = ["n't", "not", "no"]
words = input_text.split()
clean_words = [
word
for word in words
if (word not in stopwords_list or word in whitelist) and len(word) > 1
]
return " ".join(clean_words)
def Lemmatizing(self, input_text):
# Create our list of stopwords
# Lemmatizing each token and converting each token into lowercase
# use word_tokenize to tokenize the sentences
mytokens = nlp(input_text)
mytokens = [
word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_
for word in mytokens
]
return " ".join(mytokens)
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **transform_params):
clean_X = (
X.apply(self.remove_mentions)
.apply(self.remove_urls)
.apply(self.emoji_oneword)
.apply(self.remove_punctuation)
.apply(self.remove_digits)
.apply(self.to_lower)
.apply(self.remove_stopwords)
.apply(self.Lemmatizing)
)
return clean_X
ct = CleanText()
train_clean = ct.fit_transform(train_raw["excerpt"])
empty_clean = train_clean == ""
print(
"{} records have no words left after text cleaning".format(
train_clean[empty_clean].count()
)
)
train_clean.loc[empty_clean] = "[no_text]"
df_model = train_raw.copy()
df_model["clean_text"] = train_clean
df_model.columns.tolist()
# # Vocab Size
# Transform the list of sentences into a list of words
all_words = " ".join(df_model["clean_text"]).split(" ")
# all_words=[w for w in DOC ]
# Get number of unique words
vocab_size = len(set(all_words))
print(vocab_size)
# # X and y
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import tensorflow as tf
features = df_model["clean_text"]
target = df_model["target"].values
# # Transfer learning + XGBR
# ## Bert Embedding
# ### Preparation and Feature Extraction for bert model :
# #### Tokenization
# We have our text data in the textcolumn, which we now need to tokenize. We will use the BERT tokenizer, because we will use a BERT transformer later.
# Train Data
# feature Extraction X :
# https://www.kaggle.com/colearninglounge/vectorization-embeddings-elmo-bert-gpt
import pandas as pd
from transformers import TFBertModel
from transformers import AutoTokenizer
SEQ_LEN = 128 # we will cut/pad our sequences to a length of 128 tokens
tokenizer = AutoTokenizer.from_pretrained("../input/huggingface-bert/bert-base-uncased")
def tokenize(sentence):
tokens = tokenizer.encode_plus(
sentence,
max_length=SEQ_LEN,
truncation=True,
padding="max_length",
add_special_tokens=True,
return_attention_mask=True,
return_token_type_ids=False,
return_tensors="tf",
)
return tokens["input_ids"], tokens["attention_mask"]
# initialize two arrays for input tensors
Xids = np.zeros((len(train_raw), SEQ_LEN))
Xmask = np.zeros((len(train_raw), SEQ_LEN))
for i, sentence in enumerate(train_raw["excerpt"]):
Xids[i, :], Xmask[i, :] = tokenize(sentence)
if i % 10000 == 0:
print(i) # do this so we can see some progress
import tensorflow as tf
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained("../input/huggingface-bert/bert-base-uncased")
bert_model = TFBertModel.from_pretrained("../input/huggingface-bert/bert-base-uncased")
input_ids = tf.keras.layers.Input(shape=(128,), name="input_token", dtype="int32")
input_masks_ids = tf.keras.layers.Input(
shape=(128,), name="masked_token", dtype="int32"
)
bert_output = bert_model([input_ids, input_masks_ids])[0][:, 0, :]
model = tf.keras.Model(inputs=[input_ids, input_masks_ids], outputs=[bert_output])
model.summary()
Xids.shape
# !
cls_emb = model.predict([Xids, Xmask])
cls_emb.shape
cls_emb.shape
# # XGBR + Bert Embedding
from sklearn.base import BaseEstimator, TransformerMixin
from scipy import sparse
from itertools import combinations
class SparseInteractions(BaseEstimator, TransformerMixin):
def __init__(self, degree=2, feature_name_separator="_"):
self.degree = degree
self.feature_name_separator = feature_name_separator
def fit(self, X, y=None):
return self
def transform(self, X):
if not sparse.isspmatrix_csc(X):
X = sparse.csc_matrix(X)
if hasattr(X, "columns"):
self.orig_col_names = X.columns
else:
self.orig_col_names = np.array([str(i) for i in range(X.shape[1])])
spi = self._create_sparse_interactions(X)
return spi
def get_feature_names(self):
return self.feature_names
def _create_sparse_interactions(self, X):
out_mat = []
self.feature_names = self.orig_col_names.tolist()
for sub_degree in range(2, self.degree + 1):
for col_ixs in combinations(range(X.shape[1]), sub_degree):
# add name for new column
name = self.feature_name_separator.join(
self.orig_col_names[list(col_ixs)]
)
self.feature_names.append(name)
# get column multiplications value
out = X[:, col_ixs[0]]
for j in col_ixs[1:]:
out = out.multiply(X[:, j])
out_mat.append(out)
return sparse.hstack([X] + out_mat)
# Outlier Handle
class OutlierReplace(BaseEstimator, TransformerMixin):
def __init__(self, factor=1.5):
self.factor = factor
def outlier_removal(self, X, y=None):
X = pd.Series(X).copy()
qmin = X.quantile(0.05)
qmax = X.quantile(0.95)
q1 = X.quantile(0.25)
q3 = X.quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (self.factor * iqr)
upper_bound = q3 + (self.factor * iqr)
# X.loc[((X < lower_bound) | (X > upper_bound))] = np.nan
X.loc[X < lower_bound] = qmin
X.loc[X > upper_bound] = qmax
return pd.Series(X)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X.apply(self.outlier_removal)
# ## Complete Pipe
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
cross_validation_design = KFold(n_splits=3, shuffle=True, random_state=77)
cross_validation_design
from xgboost import XGBRegressor
from numpy import absolute
from numpy import mean
XGB_pipe = Pipeline(
[
("Scaler", StandardScaler()),
# ('dim_red', SelectKBest(f_regression, k=50)),
# ('int', SparseInteractions(degree=2)),
("XGB", XGBRegressor(verbosity=0, n_estimators=120, n_jobs=6)),
]
)
target.shape
# scores = cross_val_score(XGB_pipe, cls_emb, target, scoring='neg_mean_squared_error', cv=cross_validation_design, n_jobs=-1)
# convert scores to positive
# scores = absolute(scores)
# summarize the result
# s_mean = mean(scores)
# print('Mean mean_squared_error: %.3f' % (s_mean))
# XGB_pipe.fit(cls_emb,target)
# XGB_pipe.score(cls_emb,target)
# # Deep Learning Approch
# # Simple Stack LSTM
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
optimizer = tf.keras.optimizers.Adam(0.01)
# loss = tf.keras.losses.CategoricalCrossentropy() # categorical = one-hot
best_weights_file = "./weights.h5"
batch_size = 16
max_epochs = 1000
m_ckpt = ModelCheckpoint(
best_weights_file,
monitor="val_root_mean_squared_error",
mode="max",
verbose=2,
save_weights_only=True,
save_best_only=True,
)
es = EarlyStopping(monitor="loss", min_delta=0.0000000000000000001, patience=10)
rmse = RootMeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5)
# Build and compile the model
model_RNN = tf.keras.Sequential(name="model_RNN")
model_RNN.add(
tf.keras.layers.LSTM(
128,
input_shape=(1, 768),
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1,
)
)
model_RNN.add(
tf.keras.layers.LSTM(
128, return_sequences=False, dropout=0.1, recurrent_dropout=0.1
)
)
model_RNN.add(tf.keras.layers.Dropout(rate=0.2))
model_RNN.add(tf.keras.layers.Dense(16))
model_RNN.add(tf.keras.layers.Dense(1, name="outputs"))
model_RNN.compile(optimizer=optimizer, loss="mse", metrics=[rmse])
# configure early stopping
es = EarlyStopping(monitor="loss", min_delta=0.0000000000000000001, patience=10)
# fit model using our gpu
with tf.device("/gpu:0"):
history_model_RNN = model_RNN.fit(
cls_emb.reshape(-1, 1, 768),
target.reshape(-1, 1, 1),
batch_size=16,
epochs=1000,
verbose=0,
callbacks=[es],
validation_split=0.1,
)
import matplotlib.pyplot as plt
plt.style.use("ggplot")
def plot_history(history):
acc = history.history["root_mean_squared_error"]
val_acc = history.history["val_root_mean_squared_error"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, "b", label="Training root_mean_squared_error")
plt.plot(x, val_acc, "r", label="Validation root_mean_squared_error")
plt.title("Training and validation root_mean_squared_error")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, "b", label="Training loss")
plt.plot(x, val_loss, "r", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plot_history(history_model_RNN)
loss_RNN, root_mean_squared_error_RNN = model_RNN.evaluate(
cls_emb.reshape(-1, 1, 768), target.reshape(-1, 1, 1), verbose=0
)
print("root_mean_squared_error_model: %f" % (root_mean_squared_error_RNN * 100))
print("loss_model: %f" % (loss_RNN * 100))
# # CNN+LSTM
# # Sumbmission:
# initialize two arrays for input tensors
Xids_test = np.zeros((len(test_raw), SEQ_LEN))
Xmask_test = np.zeros((len(test_raw), SEQ_LEN))
for i, sentence in enumerate(test_raw["excerpt"]):
Xids_test[i, :], Xmask_test[i, :] = tokenize(sentence)
if i % 10000 == 0:
print(i) # do this so we can see some progress
cls_emb_test = model.predict([Xids_test, Xmask_test])
cls_emb_test.shape
preds = model_RNN.predict(cls_emb_test.reshape(-1, 1, 768))
# use the method pipeline.predict on X_test data to predict the labels
my_submission = pd.DataFrame({"id": test_raw.id, "target": preds.ravel()})
# you could use any filename. We choose submission here
my_submission.to_csv("submission.csv", index=False)
my_submission
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919825.ipynb
|
huggingface-bert
|
xhlulu
|
[{"Id": 69919825, "ScriptId": 19095480, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4473832, "CreationDate": "08/03/2021 20:58:47", "VersionNumber": 3.0, "Title": "transferlearning+classifier_Bert/Glove+XGB/LSTM", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 512.0, "LinesInsertedFromPrevious": 84.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 428.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93336832, "KernelVersionId": 69919825, "SourceDatasetVersionId": 2481788}, {"Id": 93336831, "KernelVersionId": 69919825, "SourceDatasetVersionId": 3176}]
|
[{"Id": 2481788, "DatasetId": 934701, "DatasourceVersionId": 2524326, "CreatorUserId": 2352583, "LicenseName": "Other (specified in description)", "CreationDate": "07/30/2021 21:19:17", "VersionNumber": 35.0, "Title": "Huggingface BERT", "Slug": "huggingface-bert", "Subtitle": "BERT models directly retrieved and updated from: https://huggingface.co/", "Description": "This dataset contains many popular BERT weights retrieved directly on [Hugging Face's model repository](https://huggingface.co/models), and hosted on Kaggle. It will be automatically updated every month to ensure that the latest version is available to the user. By making it a dataset, it is significantly faster to load the weights since you can directly attach a Kaggle dataset to the notebook rather than downloading the data every time. See the [speed comparison notebook](https://www.kaggle.com/xhlulu/loading-bert-speed-comparison).\n\n\n*The banner was adapted from figures by [Jimmy Lin](https://twitter.com/lintool) ([tweet](https://twitter.com/lintool/status/1285599163024125959); [slide](https://docs.google.com/presentation/d/1HMng1RWuY1molsGamwIpRnxNZNEqEPRVODcXpm-pX4c/)) released under [CC BY 4.0](https://creativecommons.org/licenses/by/4.0/). BERT has an Apache 2.0 license according to the model repository.*\n\n\n### Quick Start\n\nTo use this dataset, simply attach it the your notebook and specify the path to the dataset. For example:\n\n```python\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\n\nMODEL_DIR = \"/kaggle/input/huggingface-bert/\"\ntokenizer = AutoTokenizer.from_pretrained(MODEL_DIR + \"bert-large-uncased\")\nmodel = AutoModelForMaskedLM.from_pretrained(MODEL_DIR + \"bert-large-uncased\")\n```\n\n\n### Acknowledgements\n\nAll the copyrights and IP relating to BERT belong to the original authors (Devlin et. al 2019) and Google. All copyrights relating to the transformers library belong to Hugging Face. The banner image was created thanks to Jimmy Lin so any modification of this figure should mention the original author and respect the conditions of the license; all copyrights related to the images belong to him.\n\nSome of the models are community created or trained. Please reach out directly to the authors if you have questions regarding licenses and usage.", "VersionNotes": "Automatic Update 2021-07-30", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 934701, "CreatorUserId": 2352583, "OwnerUserId": 2352583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6249964.0, "CurrentDatasourceVersionId": 6329724.0, "ForumId": 950702, "Type": 2, "CreationDate": "10/22/2020 16:49:13", "LastActivityDate": "10/22/2020", "TotalViews": 26300, "TotalDownloads": 1468, "TotalVotes": 143, "TotalKernels": 147}]
|
[{"Id": 2352583, "UserName": "xhlulu", "DisplayName": "xhlulu", "RegisterDate": "10/12/2018", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# DataFrame
import pandas as pd
# Matplot
import matplotlib.pyplot as plt
# Scikit-learn
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import confusion_matrix, classification_report, accuracy_score
from sklearn.manifold import TSNE
from sklearn.feature_extraction.text import TfidfVectorizer
# Summary
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Ridge
from sklearn.linear_model import Lasso
from sklearn.linear_model import ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import roc_curve
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import RandomizedSearchCV
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
# from sklearn.preprocessing import Imputer
from sklearn.preprocessing import scale
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from scipy.stats import randint
# tensor-Keras
import tensorflow as tf
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import (
Activation,
Dense,
Dropout,
Embedding,
Flatten,
Conv1D,
MaxPooling1D,
LSTM,
)
from tensorflow.keras import utils
from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping
# nltk
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
# Word2vec
import gensim
# Utility
import re
import numpy as np
import os
from collections import Counter
import logging
import time
import pickle
import itertools
from sklearn.impute import SimpleImputer
import warnings
import scipy.io
print("TensorFlow version: ", tf.__version__)
warnings.filterwarnings("ignore")
# Import the hashing vectorizer
from sklearn.feature_extraction.text import HashingVectorizer
# Import functional utilities
from sklearn.preprocessing import FunctionTransformer, MaxAbsScaler
from sklearn.pipeline import FeatureUnion
from sklearn.linear_model import LinearRegression
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.base import BaseEstimator, TransformerMixin
from scipy import sparse
from itertools import combinations
import string
print(tf.test.gpu_device_name())
# See https://www.tensorflow.org/tutorials/using_gpu#allowing_gpu_memory_growth
# # NLP PIPELINE
# There are mainly 4 stages of an NLP pipeline :
# ## Exploratory Data Analysis
# ## Text Processing
# Cleaning
# Normalization
# Tokenize
# Stop word removal
# Stemming and Lemmatization
# POS and NER
#
# ## Feature Extraction
# Bag of Words
# TF-IDF
# word2vec
# Glove
# ## Modeling
# Model
# Train
# Predict
# Import the datasets
train_raw = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
test_raw = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
# # Text Preprocessing
# Data preprocessing is the phase of preparing raw data to make it suitable for a machine learning model.
# For NLP, that includes text cleaning, stopwords removal, stemming and lemmatization.
# Text cleaning steps vary according to the type of data and the required task. Generally, the string is converted to lowercase and punctuation is removed before text gets tokenized. Tokenization is the process of splitting a string into a list of strings (or “tokens”).
# I will put all those preprocessing steps into a single function and apply it to the whole dataset
# ## Cleaning
# Before we start using the tweets' text we clean it. We'll do the this in the class CleanText:
# - remove the **mentions**, as we want to make the model generalisable.
# - remove the **hash tag sign** (#) but not the actual tag as this may contain information
# - set all words to **lowercase**
# - remove all **punctuations**, including the question and exclamation marks
# - remove the **urls** as they do not contain useful information and we did not notice a distinction in the number of urls used between the sentiment classes
# - make sure the converted **emojis** are kept as one word.
# - remove **digits**
# - remove **stopwords**
# - apply the **Lem** to keep the lem of the words
# https://srinivas-yeeda.medium.com/preprocessing-for-natural-language-processing-498df071ab6e
import spacy
nlp = spacy.load("en_core_web_sm")
class CleanText(BaseEstimator, TransformerMixin):
def remove_mentions(self, input_text):
return re.sub(r"@\w+", "", input_text)
def remove_urls(self, input_text):
return re.sub(r"http.?://[^\s]+[\s]?", "", input_text)
def emoji_oneword(self, input_text):
# By compressing the underscore, the emoji is kept as one word
return input_text.replace("_", "")
def remove_punctuation(self, input_text):
# Make translation table
punct = string.punctuation
trantab = str.maketrans(
punct, len(punct) * " "
) # Every punctuation symbol will be replaced by a space
return input_text.translate(trantab)
def remove_digits(self, input_text):
return re.sub("\d+", "", input_text)
def to_lower(self, input_text):
return input_text.lower()
def remove_stopwords(self, input_text):
stopwords_list = stopwords.words("english")
# Some words which might indicate a certain sentiment are kept via a whitelist
whitelist = ["n't", "not", "no"]
words = input_text.split()
clean_words = [
word
for word in words
if (word not in stopwords_list or word in whitelist) and len(word) > 1
]
return " ".join(clean_words)
def Lemmatizing(self, input_text):
# Create our list of stopwords
# Lemmatizing each token and converting each token into lowercase
# use word_tokenize to tokenize the sentences
mytokens = nlp(input_text)
mytokens = [
word.lemma_.lower().strip() if word.lemma_ != "-PRON-" else word.lower_
for word in mytokens
]
return " ".join(mytokens)
def fit(self, X, y=None, **fit_params):
return self
def transform(self, X, **transform_params):
clean_X = (
X.apply(self.remove_mentions)
.apply(self.remove_urls)
.apply(self.emoji_oneword)
.apply(self.remove_punctuation)
.apply(self.remove_digits)
.apply(self.to_lower)
.apply(self.remove_stopwords)
.apply(self.Lemmatizing)
)
return clean_X
ct = CleanText()
train_clean = ct.fit_transform(train_raw["excerpt"])
empty_clean = train_clean == ""
print(
"{} records have no words left after text cleaning".format(
train_clean[empty_clean].count()
)
)
train_clean.loc[empty_clean] = "[no_text]"
df_model = train_raw.copy()
df_model["clean_text"] = train_clean
df_model.columns.tolist()
# # Vocab Size
# Transform the list of sentences into a list of words
all_words = " ".join(df_model["clean_text"]).split(" ")
# all_words=[w for w in DOC ]
# Get number of unique words
vocab_size = len(set(all_words))
print(vocab_size)
# # X and y
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import tensorflow as tf
features = df_model["clean_text"]
target = df_model["target"].values
# # Transfer learning + XGBR
# ## Bert Embedding
# ### Preparation and Feature Extraction for bert model :
# #### Tokenization
# We have our text data in the textcolumn, which we now need to tokenize. We will use the BERT tokenizer, because we will use a BERT transformer later.
# Train Data
# feature Extraction X :
# https://www.kaggle.com/colearninglounge/vectorization-embeddings-elmo-bert-gpt
import pandas as pd
from transformers import TFBertModel
from transformers import AutoTokenizer
SEQ_LEN = 128 # we will cut/pad our sequences to a length of 128 tokens
tokenizer = AutoTokenizer.from_pretrained("../input/huggingface-bert/bert-base-uncased")
def tokenize(sentence):
tokens = tokenizer.encode_plus(
sentence,
max_length=SEQ_LEN,
truncation=True,
padding="max_length",
add_special_tokens=True,
return_attention_mask=True,
return_token_type_ids=False,
return_tensors="tf",
)
return tokens["input_ids"], tokens["attention_mask"]
# initialize two arrays for input tensors
Xids = np.zeros((len(train_raw), SEQ_LEN))
Xmask = np.zeros((len(train_raw), SEQ_LEN))
for i, sentence in enumerate(train_raw["excerpt"]):
Xids[i, :], Xmask[i, :] = tokenize(sentence)
if i % 10000 == 0:
print(i) # do this so we can see some progress
import tensorflow as tf
from transformers import BertTokenizer, TFBertModel
tokenizer = BertTokenizer.from_pretrained("../input/huggingface-bert/bert-base-uncased")
bert_model = TFBertModel.from_pretrained("../input/huggingface-bert/bert-base-uncased")
input_ids = tf.keras.layers.Input(shape=(128,), name="input_token", dtype="int32")
input_masks_ids = tf.keras.layers.Input(
shape=(128,), name="masked_token", dtype="int32"
)
bert_output = bert_model([input_ids, input_masks_ids])[0][:, 0, :]
model = tf.keras.Model(inputs=[input_ids, input_masks_ids], outputs=[bert_output])
model.summary()
Xids.shape
# !
cls_emb = model.predict([Xids, Xmask])
cls_emb.shape
cls_emb.shape
# # XGBR + Bert Embedding
from sklearn.base import BaseEstimator, TransformerMixin
from scipy import sparse
from itertools import combinations
class SparseInteractions(BaseEstimator, TransformerMixin):
def __init__(self, degree=2, feature_name_separator="_"):
self.degree = degree
self.feature_name_separator = feature_name_separator
def fit(self, X, y=None):
return self
def transform(self, X):
if not sparse.isspmatrix_csc(X):
X = sparse.csc_matrix(X)
if hasattr(X, "columns"):
self.orig_col_names = X.columns
else:
self.orig_col_names = np.array([str(i) for i in range(X.shape[1])])
spi = self._create_sparse_interactions(X)
return spi
def get_feature_names(self):
return self.feature_names
def _create_sparse_interactions(self, X):
out_mat = []
self.feature_names = self.orig_col_names.tolist()
for sub_degree in range(2, self.degree + 1):
for col_ixs in combinations(range(X.shape[1]), sub_degree):
# add name for new column
name = self.feature_name_separator.join(
self.orig_col_names[list(col_ixs)]
)
self.feature_names.append(name)
# get column multiplications value
out = X[:, col_ixs[0]]
for j in col_ixs[1:]:
out = out.multiply(X[:, j])
out_mat.append(out)
return sparse.hstack([X] + out_mat)
# Outlier Handle
class OutlierReplace(BaseEstimator, TransformerMixin):
def __init__(self, factor=1.5):
self.factor = factor
def outlier_removal(self, X, y=None):
X = pd.Series(X).copy()
qmin = X.quantile(0.05)
qmax = X.quantile(0.95)
q1 = X.quantile(0.25)
q3 = X.quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (self.factor * iqr)
upper_bound = q3 + (self.factor * iqr)
# X.loc[((X < lower_bound) | (X > upper_bound))] = np.nan
X.loc[X < lower_bound] = qmin
X.loc[X > upper_bound] = qmax
return pd.Series(X)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X.apply(self.outlier_removal)
# ## Complete Pipe
from sklearn.model_selection import KFold
from sklearn.model_selection import GridSearchCV
cross_validation_design = KFold(n_splits=3, shuffle=True, random_state=77)
cross_validation_design
from xgboost import XGBRegressor
from numpy import absolute
from numpy import mean
XGB_pipe = Pipeline(
[
("Scaler", StandardScaler()),
# ('dim_red', SelectKBest(f_regression, k=50)),
# ('int', SparseInteractions(degree=2)),
("XGB", XGBRegressor(verbosity=0, n_estimators=120, n_jobs=6)),
]
)
target.shape
# scores = cross_val_score(XGB_pipe, cls_emb, target, scoring='neg_mean_squared_error', cv=cross_validation_design, n_jobs=-1)
# convert scores to positive
# scores = absolute(scores)
# summarize the result
# s_mean = mean(scores)
# print('Mean mean_squared_error: %.3f' % (s_mean))
# XGB_pipe.fit(cls_emb,target)
# XGB_pipe.score(cls_emb,target)
# # Deep Learning Approch
# # Simple Stack LSTM
from tensorflow.keras.metrics import RootMeanSquaredError
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
optimizer = tf.keras.optimizers.Adam(0.01)
# loss = tf.keras.losses.CategoricalCrossentropy() # categorical = one-hot
best_weights_file = "./weights.h5"
batch_size = 16
max_epochs = 1000
m_ckpt = ModelCheckpoint(
best_weights_file,
monitor="val_root_mean_squared_error",
mode="max",
verbose=2,
save_weights_only=True,
save_best_only=True,
)
es = EarlyStopping(monitor="loss", min_delta=0.0000000000000000001, patience=10)
rmse = RootMeanSquaredError()
optimizer = tf.keras.optimizers.Adam(learning_rate=2e-5)
# Build and compile the model
model_RNN = tf.keras.Sequential(name="model_RNN")
model_RNN.add(
tf.keras.layers.LSTM(
128,
input_shape=(1, 768),
return_sequences=True,
dropout=0.1,
recurrent_dropout=0.1,
)
)
model_RNN.add(
tf.keras.layers.LSTM(
128, return_sequences=False, dropout=0.1, recurrent_dropout=0.1
)
)
model_RNN.add(tf.keras.layers.Dropout(rate=0.2))
model_RNN.add(tf.keras.layers.Dense(16))
model_RNN.add(tf.keras.layers.Dense(1, name="outputs"))
model_RNN.compile(optimizer=optimizer, loss="mse", metrics=[rmse])
# configure early stopping
es = EarlyStopping(monitor="loss", min_delta=0.0000000000000000001, patience=10)
# fit model using our gpu
with tf.device("/gpu:0"):
history_model_RNN = model_RNN.fit(
cls_emb.reshape(-1, 1, 768),
target.reshape(-1, 1, 1),
batch_size=16,
epochs=1000,
verbose=0,
callbacks=[es],
validation_split=0.1,
)
import matplotlib.pyplot as plt
plt.style.use("ggplot")
def plot_history(history):
acc = history.history["root_mean_squared_error"]
val_acc = history.history["val_root_mean_squared_error"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
x = range(1, len(acc) + 1)
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1)
plt.plot(x, acc, "b", label="Training root_mean_squared_error")
plt.plot(x, val_acc, "r", label="Validation root_mean_squared_error")
plt.title("Training and validation root_mean_squared_error")
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(x, loss, "b", label="Training loss")
plt.plot(x, val_loss, "r", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plot_history(history_model_RNN)
loss_RNN, root_mean_squared_error_RNN = model_RNN.evaluate(
cls_emb.reshape(-1, 1, 768), target.reshape(-1, 1, 1), verbose=0
)
print("root_mean_squared_error_model: %f" % (root_mean_squared_error_RNN * 100))
print("loss_model: %f" % (loss_RNN * 100))
# # CNN+LSTM
# # Sumbmission:
# initialize two arrays for input tensors
Xids_test = np.zeros((len(test_raw), SEQ_LEN))
Xmask_test = np.zeros((len(test_raw), SEQ_LEN))
for i, sentence in enumerate(test_raw["excerpt"]):
Xids_test[i, :], Xmask_test[i, :] = tokenize(sentence)
if i % 10000 == 0:
print(i) # do this so we can see some progress
cls_emb_test = model.predict([Xids_test, Xmask_test])
cls_emb_test.shape
preds = model_RNN.predict(cls_emb_test.reshape(-1, 1, 768))
# use the method pipeline.predict on X_test data to predict the labels
my_submission = pd.DataFrame({"id": test_raw.id, "target": preds.ravel()})
# you could use any filename. We choose submission here
my_submission.to_csv("submission.csv", index=False)
my_submission
| false | 2 | 5,063 | 0 | 5,470 | 5,063 |
||
69919512
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.tree import (
export_graphviz,
) # for visualization of Decision Tree Classifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data.info()
train_data.head()
# ### Statistic: for Men & Women survival rate
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(
n_estimators=100, max_depth=5, random_state=1, criterion="gini"
)
model.fit(X, y)
predictions = model.predict(X_test)
list(X_test.columns)
print(f"Total number of Forests generated = {len(model.estimators_)}")
# ### Visualizing one of the Decision Tree created by RandomForestClassifier
# Extract single tree
estimator = model.estimators_[51]
export_graphviz(
estimator,
out_file="tree.dot",
feature_names=list(X_test.columns),
class_names=list(X_test.columns),
rounded=True,
proportion=False,
precision=2,
filled=True,
)
from IPython.display import Image
Image(filename="tree.png")
u_output = pd.DataFrame(
{
"PassengerId": test_data.PassengerId,
"Pclass": test_data.Pclass,
"Sex": test_data.Sex,
"SibSp": test_data.SibSp,
"Parch": test_data.Parch,
"Survived": predictions,
}
)
u_output[u_output["Sex"] == "male"].sort_values(by=["Survived"], ascending=False)
# ### Output for submission
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/919/69919512.ipynb
| null | null |
[{"Id": 69919512, "ScriptId": 19117770, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4248934, "CreationDate": "08/03/2021 20:56:34", "VersionNumber": 1.0, "Title": "Titanic | Getting Started", "EvaluationDate": "08/03/2021", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 84.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.tree import (
export_graphviz,
) # for visualization of Decision Tree Classifier
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/titanic/train.csv")
test_data = pd.read_csv("/kaggle/input/titanic/test.csv")
train_data.info()
train_data.head()
# ### Statistic: for Men & Women survival rate
women = train_data.loc[train_data.Sex == "female"]["Survived"]
rate_women = sum(women) / len(women)
print("% of women who survived:", rate_women)
men = train_data.loc[train_data.Sex == "male"]["Survived"]
rate_men = sum(men) / len(men)
print("% of men who survived:", rate_men)
from sklearn.ensemble import RandomForestClassifier
y = train_data["Survived"]
features = ["Pclass", "Sex", "SibSp", "Parch"]
X = pd.get_dummies(train_data[features])
X_test = pd.get_dummies(test_data[features])
model = RandomForestClassifier(
n_estimators=100, max_depth=5, random_state=1, criterion="gini"
)
model.fit(X, y)
predictions = model.predict(X_test)
list(X_test.columns)
print(f"Total number of Forests generated = {len(model.estimators_)}")
# ### Visualizing one of the Decision Tree created by RandomForestClassifier
# Extract single tree
estimator = model.estimators_[51]
export_graphviz(
estimator,
out_file="tree.dot",
feature_names=list(X_test.columns),
class_names=list(X_test.columns),
rounded=True,
proportion=False,
precision=2,
filled=True,
)
from IPython.display import Image
Image(filename="tree.png")
u_output = pd.DataFrame(
{
"PassengerId": test_data.PassengerId,
"Pclass": test_data.Pclass,
"Sex": test_data.Sex,
"SibSp": test_data.SibSp,
"Parch": test_data.Parch,
"Survived": predictions,
}
)
u_output[u_output["Sex"] == "male"].sort_values(by=["Survived"], ascending=False)
# ### Output for submission
output = pd.DataFrame({"PassengerId": test_data.PassengerId, "Survived": predictions})
output.to_csv("my_submission.csv", index=False)
print("Your submission was successfully saved!")
| false | 0 | 832 | 0 | 832 | 832 |
||
69543596
|
# ## Summary
# This notebook aim to create Models to make predictions on Titanic - Machine Learning from Disaster dataset. And I will try to explore following problems:
# - Which Model can achieve a better performance?
# - How to preprocess the data to get the optimal results?
# ## Import Packages
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
# ## Common Functions
# **Save results**
def save_results(Survived, path):
submission = pd.DataFrame(
{"PassengerId": test["PassengerId"], "Survived": Survived}
)
submission.to_csv(path, index=False)
# ## Import datasets
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train.head()
test.head()
# ## Data Wrangling and Preprocessing
# As we can see Age, Cabin and Fare information contains missing values, so we need to apply Missing Value Imputation to them.
train.isnull().sum()
test.isnull().sum()
train["Cabin"] = train["Cabin"].replace(np.NAN, "Others")
test["Cabin"] = test["Cabin"].replace(np.NAN, "Others")
train["Embarked"] = train["Embarked"].replace(np.NAN, train["Embarked"].mode()[0])
train["Age"] = train["Age"].replace(np.NAN, train["Age"].mean())
test["Age"] = test["Age"].replace(np.NAN, test["Age"].mean())
test["Fare"] = test["Fare"].replace(np.NAN, test["Fare"].mean())
# We can infer passengers's status by their names, so we can add extra columns to indicate whether they are Miss, Mr, Mrs, or Master.
train["Name"].head(30)
for data in [train, test]:
data["is_Miss"] = data["Name"].apply(lambda name: 1 if "Miss" in name else 0)
data["is_Mr"] = data["Name"].apply(
lambda name: 1 if ("Mr" in name or "Master" in name) else 0
)
data["is_Mrs"] = data["Name"].apply(lambda name: 1 if "Mrs" in name else 0)
data["is_Master"] = data["Name"].apply(lambda name: 1 if "Master" in name else 0)
# Let's see the Cabin labels, there are so many of them. But I make an assmptionn that the First Alphabet matters, it indicates the location and class of the passengers so it has an impact to survive.
cabin_labels = sorted(set(list(train["Cabin"].unique()) + list(test["Cabin"].unique())))
print(cabin_labels[:30])
train["Cabin_type"] = train["Cabin"].apply(lambda cabin: cabin[0])
test["Cabin_type"] = test["Cabin"].apply(lambda cabin: cabin[0])
# ## Handle Categorical Features
categorical_features = ["Sex", "Cabin_type", "Embarked"]
categorical_label_dictionary = dict()
for feature in categorical_features:
unique_labels = sorted(
set(list(train[feature].unique()) + list(test[feature].unique()))
)
for data in [train, test]:
categorical_label_dictionary[feature] = unique_labels
data[feature + "_value"] = data[feature].apply(
lambda item: unique_labels.index(item)
)
# Let's see after we preprocess, what does the data look like?
train.head(30)
# ## Exploratory Data Analysis
# ### Basic Statistic infos
train.info()
train.describe()
# ### What's the factor to survive?
# As we can see it's related to Gender, PClass, Status, Fare Cabin and Embarked.
train.corr()["Survived"].sort_values(ascending=False)
related_features = list(train.corr()[train.corr()["Survived"].abs() > 0.05].index)
related_features.remove("Survived")
print(related_features)
# ### Preprocess Data
train_test = pd.concat([train, test])[related_features]
train_test.head()
for feature in ["Sex", "Cabin_type", "Embarked"]:
items = pd.get_dummies(train_test[feature + "_value"])
labels = categorical_label_dictionary[feature]
items.columns = [feature + "_" + labels[column] for column in list(items.columns)]
train_test[items.columns] = items
train_test.pop(feature + "_value")
train_test.head()
train_features = train_test.iloc[0 : len(train)]
test_features = train_test.iloc[len(train) :]
train_features.head()
test_features.head()
# ### Train Validation Split
from sklearn import model_selection
(
train_features,
validation_features,
train_targets,
validation_targets,
) = model_selection.train_test_split(
train_features, train["Survived"], test_size=0.2, random_state=88
)
print(
train_features.shape,
validation_features.shape,
train_targets.shape,
validation_targets.shape,
)
# ## Model Development and Evaluation
best_score = 0
best_path = ""
# ### Using Deep Neural Network
import tensorflow as tf
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(train_features.shape[1])),
tf.keras.layers.Dense(
16, activation="relu", kernel_regularizer=tf.keras.regularizers.L2(1e-5)
),
tf.keras.layers.Dense(2, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
early_stop = tf.keras.callbacks.EarlyStopping(patience=10)
history = model.fit(
train_features,
train_targets,
epochs=100,
validation_data=(validation_features, validation_targets),
callbacks=[early_stop],
verbose=0,
)
pd.DataFrame(history.history).plot()
score = history.history["val_accuracy"][-1]
print("Accuracy Score: ", score)
Survived = np.argmax(model.predict(test_features), axis=-1)
path = "submission_dnn.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(2)
knn.fit(train_features, train_targets)
score = knn.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = np.argmax(model.predict(test_features), axis=-1)
path = "submission_knn.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ### Using Decision Tree
from sklearn.tree import DecisionTreeClassifier
tree = sklearn.tree.DecisionTreeClassifier(max_depth=5)
tree.fit(train_features, train_targets)
score = tree.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = tree.predict(test_features)
print(Survived[:10])
path = "submission_decision_tree.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using Gradient Boosting Classifier
gbc = GradientBoostingClassifier(
n_estimators=3, learning_rate=0.1, max_depth=4, random_state=1345
)
gbc.fit(train_features, train_targets)
score = gbc.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = gbc.predict(test_features)
path = "submission_gbc.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using Random Forest
from sklearn.ensemble import RandomForestClassifier
best_forest = None
best_max_depth = 4
best_n_estimators = 3
best_forest_score = 0
print("Find best number of estimators")
for n_estimators in list(range(3, 40, 2)):
forest = RandomForestClassifier(
n_estimators=n_estimators, max_depth=best_max_depth, random_state=84
)
forest.fit(train_features, train_targets)
score = forest.score(validation_features, validation_targets)
print("Score: ", score)
if score > best_forest_score:
best_n_estimators = n_estimators
best_forest_score = score
best_forest = forest
print("Best Number of Estimator:", best_n_estimators)
for max_depth in range(4, 15):
forest = RandomForestClassifier(
n_estimators=best_n_estimators, max_depth=max_depth, random_state=886
)
forest.fit(train_features, train_targets)
score = forest.score(validation_features, validation_targets)
print("Score: ", score)
if score > best_forest_score:
best_max_depth = max_depth
best_forest_score = best_score
best_forest = forest
print("Best Max Depth:", best_max_depth, "\nBest score:", best_forest_score)
score = best_forest.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = np.argmax(best_forest.predict(test_features), axis=-1)
path = "submission_forest.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Submit best Model
print("Best path:", best_path)
print("Best Score", best_score)
submission = pd.read_csv(best_path)
print(submission.head())
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543596.ipynb
| null | null |
[{"Id": 69543596, "ScriptId": 18984265, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4562457, "CreationDate": "08/01/2021 10:25:46", "VersionNumber": 8.0, "Title": "Classification with SKLearn and Tensorflow", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 258.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 246.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Summary
# This notebook aim to create Models to make predictions on Titanic - Machine Learning from Disaster dataset. And I will try to explore following problems:
# - Which Model can achieve a better performance?
# - How to preprocess the data to get the optimal results?
# ## Import Packages
import pandas as pd
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
# ## Common Functions
# **Save results**
def save_results(Survived, path):
submission = pd.DataFrame(
{"PassengerId": test["PassengerId"], "Survived": Survived}
)
submission.to_csv(path, index=False)
# ## Import datasets
train = pd.read_csv("/kaggle/input/titanic/train.csv")
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train.head()
test.head()
# ## Data Wrangling and Preprocessing
# As we can see Age, Cabin and Fare information contains missing values, so we need to apply Missing Value Imputation to them.
train.isnull().sum()
test.isnull().sum()
train["Cabin"] = train["Cabin"].replace(np.NAN, "Others")
test["Cabin"] = test["Cabin"].replace(np.NAN, "Others")
train["Embarked"] = train["Embarked"].replace(np.NAN, train["Embarked"].mode()[0])
train["Age"] = train["Age"].replace(np.NAN, train["Age"].mean())
test["Age"] = test["Age"].replace(np.NAN, test["Age"].mean())
test["Fare"] = test["Fare"].replace(np.NAN, test["Fare"].mean())
# We can infer passengers's status by their names, so we can add extra columns to indicate whether they are Miss, Mr, Mrs, or Master.
train["Name"].head(30)
for data in [train, test]:
data["is_Miss"] = data["Name"].apply(lambda name: 1 if "Miss" in name else 0)
data["is_Mr"] = data["Name"].apply(
lambda name: 1 if ("Mr" in name or "Master" in name) else 0
)
data["is_Mrs"] = data["Name"].apply(lambda name: 1 if "Mrs" in name else 0)
data["is_Master"] = data["Name"].apply(lambda name: 1 if "Master" in name else 0)
# Let's see the Cabin labels, there are so many of them. But I make an assmptionn that the First Alphabet matters, it indicates the location and class of the passengers so it has an impact to survive.
cabin_labels = sorted(set(list(train["Cabin"].unique()) + list(test["Cabin"].unique())))
print(cabin_labels[:30])
train["Cabin_type"] = train["Cabin"].apply(lambda cabin: cabin[0])
test["Cabin_type"] = test["Cabin"].apply(lambda cabin: cabin[0])
# ## Handle Categorical Features
categorical_features = ["Sex", "Cabin_type", "Embarked"]
categorical_label_dictionary = dict()
for feature in categorical_features:
unique_labels = sorted(
set(list(train[feature].unique()) + list(test[feature].unique()))
)
for data in [train, test]:
categorical_label_dictionary[feature] = unique_labels
data[feature + "_value"] = data[feature].apply(
lambda item: unique_labels.index(item)
)
# Let's see after we preprocess, what does the data look like?
train.head(30)
# ## Exploratory Data Analysis
# ### Basic Statistic infos
train.info()
train.describe()
# ### What's the factor to survive?
# As we can see it's related to Gender, PClass, Status, Fare Cabin and Embarked.
train.corr()["Survived"].sort_values(ascending=False)
related_features = list(train.corr()[train.corr()["Survived"].abs() > 0.05].index)
related_features.remove("Survived")
print(related_features)
# ### Preprocess Data
train_test = pd.concat([train, test])[related_features]
train_test.head()
for feature in ["Sex", "Cabin_type", "Embarked"]:
items = pd.get_dummies(train_test[feature + "_value"])
labels = categorical_label_dictionary[feature]
items.columns = [feature + "_" + labels[column] for column in list(items.columns)]
train_test[items.columns] = items
train_test.pop(feature + "_value")
train_test.head()
train_features = train_test.iloc[0 : len(train)]
test_features = train_test.iloc[len(train) :]
train_features.head()
test_features.head()
# ### Train Validation Split
from sklearn import model_selection
(
train_features,
validation_features,
train_targets,
validation_targets,
) = model_selection.train_test_split(
train_features, train["Survived"], test_size=0.2, random_state=88
)
print(
train_features.shape,
validation_features.shape,
train_targets.shape,
validation_targets.shape,
)
# ## Model Development and Evaluation
best_score = 0
best_path = ""
# ### Using Deep Neural Network
import tensorflow as tf
model = tf.keras.Sequential(
[
tf.keras.layers.Input(shape=(train_features.shape[1])),
tf.keras.layers.Dense(
16, activation="relu", kernel_regularizer=tf.keras.regularizers.L2(1e-5)
),
tf.keras.layers.Dense(2, activation="softmax"),
]
)
model.compile(
loss="sparse_categorical_crossentropy", optimizer="adam", metrics=["accuracy"]
)
early_stop = tf.keras.callbacks.EarlyStopping(patience=10)
history = model.fit(
train_features,
train_targets,
epochs=100,
validation_data=(validation_features, validation_targets),
callbacks=[early_stop],
verbose=0,
)
pd.DataFrame(history.history).plot()
score = history.history["val_accuracy"][-1]
print("Accuracy Score: ", score)
Survived = np.argmax(model.predict(test_features), axis=-1)
path = "submission_dnn.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using KNN
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier(2)
knn.fit(train_features, train_targets)
score = knn.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = np.argmax(model.predict(test_features), axis=-1)
path = "submission_knn.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ### Using Decision Tree
from sklearn.tree import DecisionTreeClassifier
tree = sklearn.tree.DecisionTreeClassifier(max_depth=5)
tree.fit(train_features, train_targets)
score = tree.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = tree.predict(test_features)
print(Survived[:10])
path = "submission_decision_tree.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using Gradient Boosting Classifier
gbc = GradientBoostingClassifier(
n_estimators=3, learning_rate=0.1, max_depth=4, random_state=1345
)
gbc.fit(train_features, train_targets)
score = gbc.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = gbc.predict(test_features)
path = "submission_gbc.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Using Random Forest
from sklearn.ensemble import RandomForestClassifier
best_forest = None
best_max_depth = 4
best_n_estimators = 3
best_forest_score = 0
print("Find best number of estimators")
for n_estimators in list(range(3, 40, 2)):
forest = RandomForestClassifier(
n_estimators=n_estimators, max_depth=best_max_depth, random_state=84
)
forest.fit(train_features, train_targets)
score = forest.score(validation_features, validation_targets)
print("Score: ", score)
if score > best_forest_score:
best_n_estimators = n_estimators
best_forest_score = score
best_forest = forest
print("Best Number of Estimator:", best_n_estimators)
for max_depth in range(4, 15):
forest = RandomForestClassifier(
n_estimators=best_n_estimators, max_depth=max_depth, random_state=886
)
forest.fit(train_features, train_targets)
score = forest.score(validation_features, validation_targets)
print("Score: ", score)
if score > best_forest_score:
best_max_depth = max_depth
best_forest_score = best_score
best_forest = forest
print("Best Max Depth:", best_max_depth, "\nBest score:", best_forest_score)
score = best_forest.score(validation_features, validation_targets)
print("Accuracy Score: ", score)
Survived = np.argmax(best_forest.predict(test_features), axis=-1)
path = "submission_forest.csv"
save_results(Survived, path)
if score > best_score:
best_score = score
best_path = path
# ## Submit best Model
print("Best path:", best_path)
print("Best Score", best_score)
submission = pd.read_csv(best_path)
print(submission.head())
submission.to_csv("submission.csv", index=False)
| false | 0 | 2,527 | 0 | 2,527 | 2,527 |
||
69543341
|
<jupyter_start><jupyter_text>microsoft-challenge-4
Kaggle dataset identifier: microsoftchallenge4
<jupyter_code>import pandas as pd
df = pd.read_csv('microsoftchallenge4/clusters.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 A 1000 non-null float64
1 B 1000 non-null float64
2 C 1000 non-null float64
dtypes: float64(3)
memory usage: 23.6 KB
<jupyter_text>Examples:
{
"A": -0.08749163410000001,
"B": 0.39800022150000003,
"C": 0.0142745649
}
{
"A": -1.0717046075,
"B": -0.5464732266,
"C": 0.0724244567
}
{
"A": 2.7470749768,
"B": 2.0126494714,
"C": 3.0839641597
}
{
"A": 3.2179130137,
"B": 2.2137723798,
"C": 4.2603124089
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
data = pd.read_csv("/kaggle/input/microsoftchallenge4/clusters.csv")
data.sample(10)
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
scaled_features = StandardScaler().fit_transform(data)
pca = PCA(n_components=2).fit(scaled_features)
features_2d = pca.transform(scaled_features)
features_2d[0:10]
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
scaled_features = MinMaxScaler().fit_transform(data)
pca = PCA(n_components=2).fit(scaled_features)
features_2d = pca.transform(scaled_features)
features_2d[0:10]
import matplotlib.pyplot as plt
plt.scatter(features_2d[:, 0], features_2d[:, 1])
plt.xlabel("Dimension 1")
plt.ylabel("Dimension 2")
plt.title("Data")
plt.show()
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i)
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("WCSS by Clusters")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
from sklearn.cluster import KMeans
model = KMeans(n_clusters=4, init="k-means++", n_init=500, max_iter=1500)
km_clusters = model.fit_predict(data)
km_clusters
def plot_clusters(samples, clusters):
col_dic = {0: "blue", 1: "green", 2: "orange", 3: "cyan"}
mrk_dic = {0: "*", 1: "x", 2: "+", 3: "."}
colors = [col_dic[x] for x in clusters]
markers = [mrk_dic[x] for x in clusters]
for sample in range(len(clusters)):
plt.scatter(
samples[sample][0],
samples[sample][1],
color=colors[sample],
marker=markers[sample],
s=100,
)
plt.xlabel("Dimension 1")
plt.ylabel("Dimension 2")
plt.title("Assignments")
plt.show()
plot_clusters(features_2d, km_clusters)
from sklearn.cluster import AgglomerativeClustering
agg_model = AgglomerativeClustering(n_clusters=4)
agg_clusters = agg_model.fit_predict(data)
agg_clusters
plot_clusters(features_2d, agg_clusters)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543341.ipynb
|
microsoftchallenge4
|
malwyshihab
|
[{"Id": 69543341, "ScriptId": 18990264, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4526565, "CreationDate": "08/01/2021 10:21:45", "VersionNumber": 1.0, "Title": "Clustering", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 90.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 92850081, "KernelVersionId": 69543341, "SourceDatasetVersionId": 2058202}]
|
[{"Id": 2058202, "DatasetId": 1233376, "DatasourceVersionId": 2098349, "CreatorUserId": 3692015, "LicenseName": "Unknown", "CreationDate": "03/26/2021 03:41:45", "VersionNumber": 1.0, "Title": "microsoft-challenge-4", "Slug": "microsoftchallenge4", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1233376, "CreatorUserId": 3692015, "OwnerUserId": 3692015.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2058202.0, "CurrentDatasourceVersionId": 2098349.0, "ForumId": 1251587, "Type": 2, "CreationDate": "03/26/2021 03:41:45", "LastActivityDate": "03/26/2021", "TotalViews": 791, "TotalDownloads": 13, "TotalVotes": 1, "TotalKernels": 2}]
|
[{"Id": 3692015, "UserName": "malwyshihab", "DisplayName": "Muhammad Alwy Shihab", "RegisterDate": "09/11/2019", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
data = pd.read_csv("/kaggle/input/microsoftchallenge4/clusters.csv")
data.sample(10)
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
scaled_features = StandardScaler().fit_transform(data)
pca = PCA(n_components=2).fit(scaled_features)
features_2d = pca.transform(scaled_features)
features_2d[0:10]
from sklearn.preprocessing import MinMaxScaler
from sklearn.decomposition import PCA
scaled_features = MinMaxScaler().fit_transform(data)
pca = PCA(n_components=2).fit(scaled_features)
features_2d = pca.transform(scaled_features)
features_2d[0:10]
import matplotlib.pyplot as plt
plt.scatter(features_2d[:, 0], features_2d[:, 1])
plt.xlabel("Dimension 1")
plt.ylabel("Dimension 2")
plt.title("Data")
plt.show()
from sklearn.cluster import KMeans
wcss = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i)
kmeans.fit(data)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("WCSS by Clusters")
plt.xlabel("Number of clusters")
plt.ylabel("WCSS")
plt.show()
from sklearn.cluster import KMeans
model = KMeans(n_clusters=4, init="k-means++", n_init=500, max_iter=1500)
km_clusters = model.fit_predict(data)
km_clusters
def plot_clusters(samples, clusters):
col_dic = {0: "blue", 1: "green", 2: "orange", 3: "cyan"}
mrk_dic = {0: "*", 1: "x", 2: "+", 3: "."}
colors = [col_dic[x] for x in clusters]
markers = [mrk_dic[x] for x in clusters]
for sample in range(len(clusters)):
plt.scatter(
samples[sample][0],
samples[sample][1],
color=colors[sample],
marker=markers[sample],
s=100,
)
plt.xlabel("Dimension 1")
plt.ylabel("Dimension 2")
plt.title("Assignments")
plt.show()
plot_clusters(features_2d, km_clusters)
from sklearn.cluster import AgglomerativeClustering
agg_model = AgglomerativeClustering(n_clusters=4)
agg_clusters = agg_model.fit_predict(data)
agg_clusters
plot_clusters(features_2d, agg_clusters)
|
[{"microsoftchallenge4/clusters.csv": {"column_names": "[\"A\", \"B\", \"C\"]", "column_data_types": "{\"A\": \"float64\", \"B\": \"float64\", \"C\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 3 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 A 1000 non-null float64\n 1 B 1000 non-null float64\n 2 C 1000 non-null float64\ndtypes: float64(3)\nmemory usage: 23.6 KB\n", "summary": "{\"A\": {\"count\": 1000.0, \"mean\": 1.5130805719573142, \"std\": 1.2508413032303112, \"min\": -1.6491210415172972, \"25%\": 0.5421607576867005, \"50%\": 1.5204024649177432, \"75%\": 2.483222081539748, \"max\": 4.497580985951126}, \"B\": {\"count\": 1000.0, \"mean\": 1.5207761038047725, \"std\": 1.2475869588009736, \"min\": -1.6310064346212445, \"25%\": 0.5617204594105818, \"50%\": 1.5262018466080474, \"75%\": 2.5051692107242394, \"max\": 4.4427273147431645}, \"C\": {\"count\": 1000.0, \"mean\": 1.4983513200746723, \"std\": 1.2670398227472313, \"min\": -1.9216505275777052, \"25%\": 0.48317218926753086, \"50%\": 1.4668409399765272, \"75%\": 2.4854097638344594, \"max\": 4.678731107534028}}", "examples": "{\"A\":{\"0\":-0.0874916341,\"1\":-1.0717046075,\"2\":2.7470749768,\"3\":3.2179130137},\"B\":{\"0\":0.3980002215,\"1\":-0.5464732266,\"2\":2.0126494714,\"3\":2.2137723798},\"C\":{\"0\":0.0142745649,\"1\":0.0724244567,\"2\":3.0839641597,\"3\":4.2603124089}}"}}]
| true | 1 |
<start_data_description><data_path>microsoftchallenge4/clusters.csv:
<column_names>
['A', 'B', 'C']
<column_types>
{'A': 'float64', 'B': 'float64', 'C': 'float64'}
<dataframe_Summary>
{'A': {'count': 1000.0, 'mean': 1.5130805719573142, 'std': 1.2508413032303112, 'min': -1.6491210415172972, '25%': 0.5421607576867005, '50%': 1.5204024649177432, '75%': 2.483222081539748, 'max': 4.497580985951126}, 'B': {'count': 1000.0, 'mean': 1.5207761038047725, 'std': 1.2475869588009736, 'min': -1.6310064346212445, '25%': 0.5617204594105818, '50%': 1.5262018466080474, '75%': 2.5051692107242394, 'max': 4.4427273147431645}, 'C': {'count': 1000.0, 'mean': 1.4983513200746723, 'std': 1.2670398227472313, 'min': -1.9216505275777052, '25%': 0.48317218926753086, '50%': 1.4668409399765272, '75%': 2.4854097638344594, 'max': 4.678731107534028}}
<dataframe_info>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 3 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 A 1000 non-null float64
1 B 1000 non-null float64
2 C 1000 non-null float64
dtypes: float64(3)
memory usage: 23.6 KB
<some_examples>
{'A': {'0': -0.0874916341, '1': -1.0717046075, '2': 2.7470749768, '3': 3.2179130137}, 'B': {'0': 0.3980002215, '1': -0.5464732266, '2': 2.0126494714, '3': 2.2137723798}, 'C': {'0': 0.0142745649, '1': 0.0724244567, '2': 3.0839641597, '3': 4.2603124089}}
<end_description>
| 854 | 2 | 1,280 | 854 |
69543805
|
<jupyter_start><jupyter_text>CommonLit Roberta Model Set
Kaggle dataset identifier: commonlit-roberta-0467
<jupyter_script># # Overview
# This notebook combines three models.
import os
import math
import random
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers import AutoModel
from transformers import AutoConfig
from sklearn.model_selection import KFold
from sklearn.svm import SVR
import gc
gc.enable()
import transformers
from transformers import BertTokenizer
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import KFold
import lightgbm as lgb
from fastprogress.fastprogress import progress_bar
from sklearn.metrics import mean_squared_error
from lightautoml.automl.presets.text_presets import TabularAutoML
from lightautoml.tasks import Task
BATCH_SIZE = 32
MAX_LEN = 248
EVAL_SCHEDULE = [(0.5, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1, 1)]
ROBERTA_PATH = "../input/roberta-transformers-pytorch/roberta-base"
TOKENIZER_PATH = "../input/roberta-transformers-pytorch/roberta-base"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DEVICE
train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)
# # Dataset
class LitDataset(Dataset):
def __init__(self, df, inference_only=False):
super().__init__()
self.df = df
self.inference_only = inference_only
self.text = df.excerpt.tolist()
# self.text = [text.replace("\n", " ") for text in self.text]
if not self.inference_only:
self.target = torch.tensor(df.target.values, dtype=torch.float32)
self.encoded = tokenizer.batch_encode_plus(
self.text,
padding="max_length",
max_length=MAX_LEN,
truncation=True,
return_attention_mask=True,
)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
input_ids = torch.tensor(self.encoded["input_ids"][index])
attention_mask = torch.tensor(self.encoded["attention_mask"][index])
if self.inference_only:
return (input_ids, attention_mask)
else:
target = self.target[index]
return (input_ids, attention_mask, target)
# # Model 1
# Inspired from: https://www.kaggle.com/maunish/clrp-roberta-svm
class LitModel(nn.Module):
def __init__(self):
super().__init__()
config = AutoConfig.from_pretrained(ROBERTA_PATH)
config.update(
{
"output_hidden_states": True,
"hidden_dropout_prob": 0.25,
"layer_norm_eps": 1e-7,
}
)
self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config)
self.attention = nn.Sequential(
nn.Linear(768, 512), nn.Tanh(), nn.Linear(512, 1), nn.Softmax(dim=1)
)
self.regressor = nn.Sequential(nn.Linear(768, 1))
def forward(self, input_ids, attention_mask):
roberta_output = self.roberta(
input_ids=input_ids, attention_mask=attention_mask
)
# There are a total of 13 layers of hidden states.
# 1 for the embedding layer, and 12 for the 12 Roberta layers.
# We take the hidden states from the last Roberta layer.
last_layer_hidden_states = roberta_output.hidden_states[-1]
# The number of cells is MAX_LEN.
# The size of the hidden state of each cell is 768 (for roberta-base).
# In order to condense hidden states of all cells to a context vector,
# we compute a weighted average of the hidden states of all cells.
# We compute the weight of each cell, using the attention neural network.
weights = self.attention(last_layer_hidden_states)
# weights.shape is BATCH_SIZE x MAX_LEN x 1
# last_layer_hidden_states.shape is BATCH_SIZE x MAX_LEN x 768
# Now we compute context_vector as the weighted average.
# context_vector.shape is BATCH_SIZE x 768
context_vector = torch.sum(weights * last_layer_hidden_states, dim=1)
# Now we reduce the context vector to the prediction score.
return self.regressor(context_vector)
def predict(model, data_loader):
"""Returns an np.array with predictions of the |model| on |data_loader|"""
model.eval()
result = np.zeros(len(data_loader.dataset))
index = 0
with torch.no_grad():
for batch_num, (input_ids, attention_mask) in enumerate(data_loader):
input_ids = input_ids.to(DEVICE)
attention_mask = attention_mask.to(DEVICE)
pred = model(input_ids, attention_mask)
result[index : index + pred.shape[0]] = pred.flatten().to("cpu")
index += pred.shape[0]
return result
# # Inference
### train ###
NUM_MODELS = 5
train_all_predictions = np.zeros((NUM_MODELS, len(train_df)))
train_dataset = LitDataset(train_df, inference_only=True)
train_loader = DataLoader(
train_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2
)
for model_index in tqdm(range(NUM_MODELS)):
model_path = f"../input/commonlit-roberta-0467/model_{model_index + 1}.pth"
print(f"\nUsing {model_path}")
model = LitModel()
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
model.to(DEVICE)
train_all_predictions[model_index] = predict(model, train_loader)
del model
gc.collect()
train_model1_predictions = train_all_predictions.mean(axis=0)
### test ###
NUM_MODELS = 5
all_predictions = np.zeros((NUM_MODELS, len(test_df)))
test_dataset = LitDataset(test_df, inference_only=True)
test_loader = DataLoader(
test_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2
)
for model_index in tqdm(range(NUM_MODELS)):
model_path = f"../input/commonlit-roberta-0467/model_{model_index + 1}.pth"
print(f"\nUsing {model_path}")
model = LitModel()
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
model.to(DEVICE)
all_predictions[model_index] = predict(model, test_loader)
del model
gc.collect()
model1_predictions = all_predictions.mean(axis=0)
# # Model 2
# Inspired from: [https://www.kaggle.com/rhtsingh/commonlit-readability-prize-roberta-torch-infer-3](https://www.kaggle.com/rhtsingh/commonlit-readability-prize-roberta-torch-infer-3)
test = test_df
train = train_df
from glob import glob
import os
import matplotlib.pyplot as plt
import json
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import Dataset, DataLoader, SequentialSampler, RandomSampler
from transformers import RobertaConfig
from transformers import (
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
from transformers import RobertaTokenizer
from transformers import RobertaModel
from IPython.display import clear_output
# # Dataset
def convert_examples_to_features(data, tokenizer, max_len, is_test=False):
data = data.replace("\n", "")
tok = tokenizer.encode_plus(
data,
max_length=max_len,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
)
curr_sent = {}
padding_length = max_len - len(tok["input_ids"])
curr_sent["input_ids"] = tok["input_ids"] + ([0] * padding_length)
curr_sent["token_type_ids"] = tok["token_type_ids"] + ([0] * padding_length)
curr_sent["attention_mask"] = tok["attention_mask"] + ([0] * padding_length)
return curr_sent
class DatasetRetriever(Dataset):
def __init__(self, data, tokenizer, max_len, is_test=False):
self.data = data
self.excerpts = self.data.excerpt.values.tolist()
self.tokenizer = tokenizer
self.is_test = is_test
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, item):
if not self.is_test:
excerpt, label = self.excerpts[item], self.targets[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
"label": torch.tensor(label, dtype=torch.double),
}
else:
excerpt = self.excerpts[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
}
class CommonLitModel(nn.Module):
def __init__(
self, model_name, config, multisample_dropout=False, output_hidden_states=False
):
super(CommonLitModel, self).__init__()
self.config = config
self.roberta = RobertaModel.from_pretrained(
model_name, output_hidden_states=output_hidden_states
)
self.layer_norm = nn.LayerNorm(config.hidden_size)
if multisample_dropout:
self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)])
else:
self.dropouts = nn.ModuleList([nn.Dropout(0.3)])
self.regressor = nn.Linear(config.hidden_size, 1)
self._init_weights(self.layer_norm)
self._init_weights(self.regressor)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(
self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
sequence_output = outputs[1]
sequence_output = self.layer_norm(sequence_output)
# multi-sample dropout
for i, dropout in enumerate(self.dropouts):
if i == 0:
logits = self.regressor(dropout(sequence_output))
else:
logits += self.regressor(dropout(sequence_output))
logits /= len(self.dropouts)
# calculate loss
loss = None
if labels is not None:
loss_fn = torch.nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = torch.sqrt(loss_fn(logits, labels.view(-1)))
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
def make_model(model_name, num_labels=1):
tokenizer = RobertaTokenizer.from_pretrained(model_name)
config = RobertaConfig.from_pretrained(model_name)
config.update({"num_labels": num_labels})
model = CommonLitModel(model_name, config=config)
return model, tokenizer
def make_loader(
data,
tokenizer,
max_len,
batch_size,
):
test_dataset = DatasetRetriever(data, tokenizer, max_len, is_test=True)
test_sampler = SequentialSampler(test_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size // 2,
sampler=test_sampler,
pin_memory=False,
drop_last=False,
num_workers=0,
)
return test_loader
class Evaluator:
def __init__(self, model, scalar=None):
self.model = model
self.scalar = scalar
def evaluate(self, data_loader, tokenizer):
preds = []
self.model.eval()
total_loss = 0
with torch.no_grad():
for batch_idx, batch_data in enumerate(data_loader):
input_ids, attention_mask, token_type_ids = (
batch_data["input_ids"],
batch_data["attention_mask"],
batch_data["token_type_ids"],
)
input_ids, attention_mask, token_type_ids = (
input_ids.cuda(),
attention_mask.cuda(),
token_type_ids.cuda(),
)
if self.scalar is not None:
with torch.cuda.amp.autocast():
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
else:
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
logits = outputs[0].detach().cpu().numpy().squeeze().tolist()
preds += logits
return preds
def config(fold, model_name, load_model_path, test_flag=True):
torch.manual_seed(2021)
torch.cuda.manual_seed(2021)
torch.cuda.manual_seed_all(2021)
max_len = 250
batch_size = 8
model, tokenizer = make_model(model_name=model_name, num_labels=1)
model.load_state_dict(torch.load(f"{load_model_path}/model{fold}.bin"))
if test_flag:
test_loader = make_loader(
test, tokenizer, max_len=max_len, batch_size=batch_size
)
else:
test_loader = make_loader(
train, tokenizer, max_len=max_len, batch_size=batch_size
)
if torch.cuda.device_count() >= 1:
print(
"Model pushed to {} GPU(s), type {}.".format(
torch.cuda.device_count(), torch.cuda.get_device_name(0)
)
)
model = model.cuda()
else:
raise ValueError("CPU training is not supported")
# scaler = torch.cuda.amp.GradScaler()
scaler = None
return (model, tokenizer, test_loader, scaler)
# # Inference
import time
def run(fold=0, model_name=None, load_model_path=None, test_flag=True):
model, tokenizer, test_loader, scaler = config(
fold, model_name, load_model_path, test_flag
)
evaluator = Evaluator(model, scaler)
test_time_list = []
torch.cuda.synchronize()
tic1 = time.time()
preds = evaluator.evaluate(test_loader, tokenizer)
torch.cuda.synchronize()
tic2 = time.time()
test_time_list.append(tic2 - tic1)
del model, tokenizer, test_loader, scaler
gc.collect()
torch.cuda.empty_cache()
return preds
pred_df1 = pd.DataFrame()
pred_df2 = pd.DataFrame()
pred_df3 = pd.DataFrame()
for fold in tqdm(range(5)):
pred_df1[f"fold{fold}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-base/",
"../input/commonlit-roberta-base-i/",
)
pred_df2[f"fold{fold+5}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/roberta-large-itptfit/",
)
pred_df3[f"fold{fold+10}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/commonlit-roberta-large-ii/",
)
pred_df1 = np.array(pred_df1)
pred_df2 = np.array(pred_df2)
pred_df3 = np.array(pred_df3)
model2_predictions = (
(pred_df2.mean(axis=1) * 0.5)
+ (pred_df1.mean(axis=1) * 0.3)
+ (pred_df3.mean(axis=1) * 0.2)
)
#### train ###
train_pred_df1 = pd.DataFrame()
train_pred_df2 = pd.DataFrame()
train_pred_df3 = pd.DataFrame()
for fold in tqdm(range(5)):
train_pred_df1[f"fold{fold}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-base/",
"../input/commonlit-roberta-base-i/",
test_flag=False,
)
train_pred_df2[f"fold{fold+5}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/roberta-large-itptfit/",
test_flag=False,
)
train_pred_df3[f"fold{fold+10}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/commonlit-roberta-large-ii/",
test_flag=False,
)
train_pred_df1 = np.array(train_pred_df1)
train_pred_df2 = np.array(train_pred_df2)
train_pred_df3 = np.array(train_pred_df3)
train_model2_predictions = (
(train_pred_df2.mean(axis=1) * 0.5)
+ (train_pred_df1.mean(axis=1) * 0.3)
+ (train_pred_df3.mean(axis=1) * 0.2)
)
# ## Model 3
# Inspired from: https://www.kaggle.com/jcesquiveld/best-transformer-representations
import os
import numpy as np
import pandas as pd
import random
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AdamW,
get_linear_schedule_with_warmup,
logging,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import (
Dataset,
TensorDataset,
SequentialSampler,
RandomSampler,
DataLoader,
)
from tqdm.notebook import tqdm
import gc
gc.enable()
from IPython.display import clear_output
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
logging.set_verbosity_error()
INPUT_DIR = "../input/commonlitreadabilityprize"
MODEL_DIR = "../input/roberta-transformers-pytorch/roberta-large"
CHECKPOINT_DIR = "../input/clrp-mean-pooling/"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_LENGTH = 248
TEST_BATCH_SIZE = 1
HIDDEN_SIZE = 1024
NUM_FOLDS = 5
SEEDS = [113]
test = pd.read_csv(os.path.join(INPUT_DIR, "test.csv"))
train = pd.read_csv(os.path.join(INPUT_DIR, "train.csv"))
class MeanPoolingModel(nn.Module):
def __init__(self, model_name):
super().__init__()
config = AutoConfig.from_pretrained(model_name)
self.model = AutoModel.from_pretrained(model_name, config=config)
self.linear = nn.Linear(HIDDEN_SIZE, 1)
self.loss = nn.MSELoss()
def forward(self, input_ids, attention_mask, labels=None):
outputs = self.model(input_ids, attention_mask)
last_hidden_state = outputs[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
)
sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
mean_embeddings = sum_embeddings / sum_mask
logits = self.linear(mean_embeddings)
preds = logits.squeeze(-1).squeeze(-1)
if labels is not None:
loss = self.loss(preds.view(-1).float(), labels.view(-1).float())
return loss
else:
return preds
def get_test_loader(data):
x_test = data.excerpt.tolist()
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
encoded_test = tokenizer.batch_encode_plus(
x_test,
add_special_tokens=True,
return_attention_mask=True,
padding="max_length",
truncation=True,
max_length=MAX_LENGTH,
return_tensors="pt",
)
dataset_test = TensorDataset(
encoded_test["input_ids"], encoded_test["attention_mask"]
)
dataloader_test = DataLoader(
dataset_test,
sampler=SequentialSampler(dataset_test),
batch_size=TEST_BATCH_SIZE,
)
return dataloader_test
test_dataloader = get_test_loader(test)
train_dataloader = get_test_loader(train)
all_predictions = []
for seed in SEEDS:
fold_predictions = []
for fold in tqdm(range(NUM_FOLDS)):
model_path = f"model_{seed + 1}_{fold + 1}.pth"
print(f"\nUsing {model_path}")
model_path = CHECKPOINT_DIR + f"model_{seed + 1}_{fold + 1}.pth"
model = MeanPoolingModel(MODEL_DIR)
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
model.eval()
predictions = []
for batch in test_dataloader:
batch = tuple(b.to(DEVICE) for b in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": None,
}
preds = model(**inputs).item()
predictions.append(preds)
del model
gc.collect()
fold_predictions.append(predictions)
all_predictions.append(np.mean(fold_predictions, axis=0).tolist())
model3_predictions = np.mean(all_predictions, axis=0)
train_all_predictions = []
for seed in SEEDS:
fold_predictions = []
for fold in tqdm(range(NUM_FOLDS)):
model_path = f"model_{seed + 1}_{fold + 1}.pth"
print(f"\nUsing {model_path}")
model_path = CHECKPOINT_DIR + f"model_{seed + 1}_{fold + 1}.pth"
model = MeanPoolingModel(MODEL_DIR)
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
model.eval()
predictions = []
for batch in train_dataloader:
batch = tuple(b.to(DEVICE) for b in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": None,
}
preds = model(**inputs).item()
predictions.append(preds)
del model
gc.collect()
fold_predictions.append(predictions)
train_all_predictions.append(np.mean(fold_predictions, axis=0).tolist())
train_model3_predictions = np.mean(train_all_predictions, axis=0)
# # predictions = model1_predictions * 0.5 + model2_predictions * 0.3 + model3_predictions * 0.2 # 0.461
# # predictions = model1_predictions * 0.45 + model2_predictions * 0.35 + model3_predictions * 0.2 # 0.461
# predictions = model1_predictions * 0.40 + model2_predictions * 0.25 + model3_predictions * 0.35 #
# predictions
# train_predictions = train_model1_predictions * 0.40 + train_model2_predictions * 0.25 + train_model3_predictions * 0.35 #
# train_predictions
train_results = pd.DataFrame(
np.vstack(
(train_model1_predictions, train_model2_predictions, train_model3_predictions)
).transpose(),
columns=["model1", "model2", "model3"],
)
train_results["target"] = train_df["target"]
train_results.head()
test_results = pd.DataFrame(
np.vstack((model1_predictions, model2_predictions, model3_predictions)).transpose(),
columns=["model1", "model2", "model3"],
)
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
# Create linear regression object
regr = LinearRegression()
# Train the model using the training sets
regr.fit(train_results.drop("target", axis=1), train_results.target)
# Make predictions using the testing set
y_pred = regr.predict(test_results)
# The coefficients
print("Coefficients: \n", regr.coef_)
print("Coefficients Sum: \n", sum(regr.coef_))
print("Coefficients: \n", regr.coef_)
M1, M2, M3 = regr.coef_
print(M1, M2, M3)
# # Stacking with LAMA
# TIMEOUT = 15_000 # Time in seconds for automl run
# TARGET_NAME = 'target' # Target column name
# def rmse(x, y): return np.sqrt(mean_squared_error(x, y))
# task = Task('reg', metric=rmse)
# roles = {
# 'target': TARGET_NAME,
# }
# automl = TabularAutoML(task=task,
# timeout=TIMEOUT,
# general_params={'nested_cv': False, 'use_algos': [['linear_l2']]},
# reader_params={'cv': 5},
# selection_params={'mode': 1},
# )
# oof_pred = automl.fit_predict(train_results, roles=roles)
# print('')
# print(rmse(train_results[TARGET_NAME], oof_pred.data[:, 0]))
# predictions = automl.predict(test_results).data[:, 0]
submission_df.target = y_pred # predictions
submission_df
submission_df.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543805.ipynb
|
commonlit-roberta-0467
|
andretugan
|
[{"Id": 69543805, "ScriptId": 18974291, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3847072, "CreationDate": "08/01/2021 10:29:03", "VersionNumber": 4.0, "Title": "New_clrp_ensemble_3x_inference", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 810.0, "LinesInsertedFromPrevious": 52.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 758.0, "LinesInsertedFromFork": 198.0, "LinesDeletedFromFork": 24.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 612.0, "TotalVotes": 0}]
|
[{"Id": 92851245, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2383189}, {"Id": 92851242, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2263042}, {"Id": 92851241, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2225001}, {"Id": 92851246, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2441389}, {"Id": 92851243, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2279832}, {"Id": 92851244, "KernelVersionId": 69543805, "SourceDatasetVersionId": 2279836}, {"Id": 92851240, "KernelVersionId": 69543805, "SourceDatasetVersionId": 906797}]
|
[{"Id": 2383189, "DatasetId": 1440491, "DatasourceVersionId": 2425105, "CreatorUserId": 6027330, "LicenseName": "Unknown", "CreationDate": "06/30/2021 15:03:46", "VersionNumber": 1.0, "Title": "CommonLit Roberta Model Set", "Slug": "commonlit-roberta-0467", "Subtitle": "5 Roberta models, mean of the predictions gets the score of 0.467", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1440491, "CreatorUserId": 6027330, "OwnerUserId": 6027330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2383189.0, "CurrentDatasourceVersionId": 2425105.0, "ForumId": 1459974, "Type": 2, "CreationDate": "06/30/2021 15:03:46", "LastActivityDate": "06/30/2021", "TotalViews": 1480, "TotalDownloads": 113, "TotalVotes": 12, "TotalKernels": 32}]
|
[{"Id": 6027330, "UserName": "andretugan", "DisplayName": "Andrey Tuganov", "RegisterDate": "10/24/2020", "PerformanceTier": 1}]
|
# # Overview
# This notebook combines three models.
import os
import math
import random
import time
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torch.nn as nn
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers import AutoModel
from transformers import AutoConfig
from sklearn.model_selection import KFold
from sklearn.svm import SVR
import gc
gc.enable()
import transformers
from transformers import BertTokenizer
from sklearn.metrics import mean_squared_error as mse
from sklearn.model_selection import KFold
import lightgbm as lgb
from fastprogress.fastprogress import progress_bar
from sklearn.metrics import mean_squared_error
from lightautoml.automl.presets.text_presets import TabularAutoML
from lightautoml.tasks import Task
BATCH_SIZE = 32
MAX_LEN = 248
EVAL_SCHEDULE = [(0.5, 16), (0.49, 8), (0.48, 4), (0.47, 2), (-1, 1)]
ROBERTA_PATH = "../input/roberta-transformers-pytorch/roberta-base"
TOKENIZER_PATH = "../input/roberta-transformers-pytorch/roberta-base"
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
DEVICE
train_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
test_df = pd.read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
submission_df = pd.read_csv(
"/kaggle/input/commonlitreadabilityprize/sample_submission.csv"
)
tokenizer = AutoTokenizer.from_pretrained(TOKENIZER_PATH)
# # Dataset
class LitDataset(Dataset):
def __init__(self, df, inference_only=False):
super().__init__()
self.df = df
self.inference_only = inference_only
self.text = df.excerpt.tolist()
# self.text = [text.replace("\n", " ") for text in self.text]
if not self.inference_only:
self.target = torch.tensor(df.target.values, dtype=torch.float32)
self.encoded = tokenizer.batch_encode_plus(
self.text,
padding="max_length",
max_length=MAX_LEN,
truncation=True,
return_attention_mask=True,
)
def __len__(self):
return len(self.df)
def __getitem__(self, index):
input_ids = torch.tensor(self.encoded["input_ids"][index])
attention_mask = torch.tensor(self.encoded["attention_mask"][index])
if self.inference_only:
return (input_ids, attention_mask)
else:
target = self.target[index]
return (input_ids, attention_mask, target)
# # Model 1
# Inspired from: https://www.kaggle.com/maunish/clrp-roberta-svm
class LitModel(nn.Module):
def __init__(self):
super().__init__()
config = AutoConfig.from_pretrained(ROBERTA_PATH)
config.update(
{
"output_hidden_states": True,
"hidden_dropout_prob": 0.25,
"layer_norm_eps": 1e-7,
}
)
self.roberta = AutoModel.from_pretrained(ROBERTA_PATH, config=config)
self.attention = nn.Sequential(
nn.Linear(768, 512), nn.Tanh(), nn.Linear(512, 1), nn.Softmax(dim=1)
)
self.regressor = nn.Sequential(nn.Linear(768, 1))
def forward(self, input_ids, attention_mask):
roberta_output = self.roberta(
input_ids=input_ids, attention_mask=attention_mask
)
# There are a total of 13 layers of hidden states.
# 1 for the embedding layer, and 12 for the 12 Roberta layers.
# We take the hidden states from the last Roberta layer.
last_layer_hidden_states = roberta_output.hidden_states[-1]
# The number of cells is MAX_LEN.
# The size of the hidden state of each cell is 768 (for roberta-base).
# In order to condense hidden states of all cells to a context vector,
# we compute a weighted average of the hidden states of all cells.
# We compute the weight of each cell, using the attention neural network.
weights = self.attention(last_layer_hidden_states)
# weights.shape is BATCH_SIZE x MAX_LEN x 1
# last_layer_hidden_states.shape is BATCH_SIZE x MAX_LEN x 768
# Now we compute context_vector as the weighted average.
# context_vector.shape is BATCH_SIZE x 768
context_vector = torch.sum(weights * last_layer_hidden_states, dim=1)
# Now we reduce the context vector to the prediction score.
return self.regressor(context_vector)
def predict(model, data_loader):
"""Returns an np.array with predictions of the |model| on |data_loader|"""
model.eval()
result = np.zeros(len(data_loader.dataset))
index = 0
with torch.no_grad():
for batch_num, (input_ids, attention_mask) in enumerate(data_loader):
input_ids = input_ids.to(DEVICE)
attention_mask = attention_mask.to(DEVICE)
pred = model(input_ids, attention_mask)
result[index : index + pred.shape[0]] = pred.flatten().to("cpu")
index += pred.shape[0]
return result
# # Inference
### train ###
NUM_MODELS = 5
train_all_predictions = np.zeros((NUM_MODELS, len(train_df)))
train_dataset = LitDataset(train_df, inference_only=True)
train_loader = DataLoader(
train_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2
)
for model_index in tqdm(range(NUM_MODELS)):
model_path = f"../input/commonlit-roberta-0467/model_{model_index + 1}.pth"
print(f"\nUsing {model_path}")
model = LitModel()
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
model.to(DEVICE)
train_all_predictions[model_index] = predict(model, train_loader)
del model
gc.collect()
train_model1_predictions = train_all_predictions.mean(axis=0)
### test ###
NUM_MODELS = 5
all_predictions = np.zeros((NUM_MODELS, len(test_df)))
test_dataset = LitDataset(test_df, inference_only=True)
test_loader = DataLoader(
test_dataset, batch_size=BATCH_SIZE, drop_last=False, shuffle=False, num_workers=2
)
for model_index in tqdm(range(NUM_MODELS)):
model_path = f"../input/commonlit-roberta-0467/model_{model_index + 1}.pth"
print(f"\nUsing {model_path}")
model = LitModel()
model.load_state_dict(torch.load(model_path, map_location=DEVICE))
model.to(DEVICE)
all_predictions[model_index] = predict(model, test_loader)
del model
gc.collect()
model1_predictions = all_predictions.mean(axis=0)
# # Model 2
# Inspired from: [https://www.kaggle.com/rhtsingh/commonlit-readability-prize-roberta-torch-infer-3](https://www.kaggle.com/rhtsingh/commonlit-readability-prize-roberta-torch-infer-3)
test = test_df
train = train_df
from glob import glob
import os
import matplotlib.pyplot as plt
import json
from collections import defaultdict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.optim.optimizer import Optimizer
import torch.optim.lr_scheduler as lr_scheduler
from torch.utils.data import Dataset, DataLoader, SequentialSampler, RandomSampler
from transformers import RobertaConfig
from transformers import (
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
)
from transformers import RobertaTokenizer
from transformers import RobertaModel
from IPython.display import clear_output
# # Dataset
def convert_examples_to_features(data, tokenizer, max_len, is_test=False):
data = data.replace("\n", "")
tok = tokenizer.encode_plus(
data,
max_length=max_len,
truncation=True,
return_attention_mask=True,
return_token_type_ids=True,
)
curr_sent = {}
padding_length = max_len - len(tok["input_ids"])
curr_sent["input_ids"] = tok["input_ids"] + ([0] * padding_length)
curr_sent["token_type_ids"] = tok["token_type_ids"] + ([0] * padding_length)
curr_sent["attention_mask"] = tok["attention_mask"] + ([0] * padding_length)
return curr_sent
class DatasetRetriever(Dataset):
def __init__(self, data, tokenizer, max_len, is_test=False):
self.data = data
self.excerpts = self.data.excerpt.values.tolist()
self.tokenizer = tokenizer
self.is_test = is_test
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, item):
if not self.is_test:
excerpt, label = self.excerpts[item], self.targets[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
"label": torch.tensor(label, dtype=torch.double),
}
else:
excerpt = self.excerpts[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
}
class CommonLitModel(nn.Module):
def __init__(
self, model_name, config, multisample_dropout=False, output_hidden_states=False
):
super(CommonLitModel, self).__init__()
self.config = config
self.roberta = RobertaModel.from_pretrained(
model_name, output_hidden_states=output_hidden_states
)
self.layer_norm = nn.LayerNorm(config.hidden_size)
if multisample_dropout:
self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)])
else:
self.dropouts = nn.ModuleList([nn.Dropout(0.3)])
self.regressor = nn.Linear(config.hidden_size, 1)
self._init_weights(self.layer_norm)
self._init_weights(self.regressor)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(
self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
sequence_output = outputs[1]
sequence_output = self.layer_norm(sequence_output)
# multi-sample dropout
for i, dropout in enumerate(self.dropouts):
if i == 0:
logits = self.regressor(dropout(sequence_output))
else:
logits += self.regressor(dropout(sequence_output))
logits /= len(self.dropouts)
# calculate loss
loss = None
if labels is not None:
loss_fn = torch.nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = torch.sqrt(loss_fn(logits, labels.view(-1)))
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
def make_model(model_name, num_labels=1):
tokenizer = RobertaTokenizer.from_pretrained(model_name)
config = RobertaConfig.from_pretrained(model_name)
config.update({"num_labels": num_labels})
model = CommonLitModel(model_name, config=config)
return model, tokenizer
def make_loader(
data,
tokenizer,
max_len,
batch_size,
):
test_dataset = DatasetRetriever(data, tokenizer, max_len, is_test=True)
test_sampler = SequentialSampler(test_dataset)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size // 2,
sampler=test_sampler,
pin_memory=False,
drop_last=False,
num_workers=0,
)
return test_loader
class Evaluator:
def __init__(self, model, scalar=None):
self.model = model
self.scalar = scalar
def evaluate(self, data_loader, tokenizer):
preds = []
self.model.eval()
total_loss = 0
with torch.no_grad():
for batch_idx, batch_data in enumerate(data_loader):
input_ids, attention_mask, token_type_ids = (
batch_data["input_ids"],
batch_data["attention_mask"],
batch_data["token_type_ids"],
)
input_ids, attention_mask, token_type_ids = (
input_ids.cuda(),
attention_mask.cuda(),
token_type_ids.cuda(),
)
if self.scalar is not None:
with torch.cuda.amp.autocast():
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
else:
outputs = self.model(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
logits = outputs[0].detach().cpu().numpy().squeeze().tolist()
preds += logits
return preds
def config(fold, model_name, load_model_path, test_flag=True):
torch.manual_seed(2021)
torch.cuda.manual_seed(2021)
torch.cuda.manual_seed_all(2021)
max_len = 250
batch_size = 8
model, tokenizer = make_model(model_name=model_name, num_labels=1)
model.load_state_dict(torch.load(f"{load_model_path}/model{fold}.bin"))
if test_flag:
test_loader = make_loader(
test, tokenizer, max_len=max_len, batch_size=batch_size
)
else:
test_loader = make_loader(
train, tokenizer, max_len=max_len, batch_size=batch_size
)
if torch.cuda.device_count() >= 1:
print(
"Model pushed to {} GPU(s), type {}.".format(
torch.cuda.device_count(), torch.cuda.get_device_name(0)
)
)
model = model.cuda()
else:
raise ValueError("CPU training is not supported")
# scaler = torch.cuda.amp.GradScaler()
scaler = None
return (model, tokenizer, test_loader, scaler)
# # Inference
import time
def run(fold=0, model_name=None, load_model_path=None, test_flag=True):
model, tokenizer, test_loader, scaler = config(
fold, model_name, load_model_path, test_flag
)
evaluator = Evaluator(model, scaler)
test_time_list = []
torch.cuda.synchronize()
tic1 = time.time()
preds = evaluator.evaluate(test_loader, tokenizer)
torch.cuda.synchronize()
tic2 = time.time()
test_time_list.append(tic2 - tic1)
del model, tokenizer, test_loader, scaler
gc.collect()
torch.cuda.empty_cache()
return preds
pred_df1 = pd.DataFrame()
pred_df2 = pd.DataFrame()
pred_df3 = pd.DataFrame()
for fold in tqdm(range(5)):
pred_df1[f"fold{fold}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-base/",
"../input/commonlit-roberta-base-i/",
)
pred_df2[f"fold{fold+5}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/roberta-large-itptfit/",
)
pred_df3[f"fold{fold+10}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/commonlit-roberta-large-ii/",
)
pred_df1 = np.array(pred_df1)
pred_df2 = np.array(pred_df2)
pred_df3 = np.array(pred_df3)
model2_predictions = (
(pred_df2.mean(axis=1) * 0.5)
+ (pred_df1.mean(axis=1) * 0.3)
+ (pred_df3.mean(axis=1) * 0.2)
)
#### train ###
train_pred_df1 = pd.DataFrame()
train_pred_df2 = pd.DataFrame()
train_pred_df3 = pd.DataFrame()
for fold in tqdm(range(5)):
train_pred_df1[f"fold{fold}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-base/",
"../input/commonlit-roberta-base-i/",
test_flag=False,
)
train_pred_df2[f"fold{fold+5}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/roberta-large-itptfit/",
test_flag=False,
)
train_pred_df3[f"fold{fold+10}"] = run(
fold % 5,
"../input/roberta-transformers-pytorch/roberta-large",
"../input/commonlit-roberta-large-ii/",
test_flag=False,
)
train_pred_df1 = np.array(train_pred_df1)
train_pred_df2 = np.array(train_pred_df2)
train_pred_df3 = np.array(train_pred_df3)
train_model2_predictions = (
(train_pred_df2.mean(axis=1) * 0.5)
+ (train_pred_df1.mean(axis=1) * 0.3)
+ (train_pred_df3.mean(axis=1) * 0.2)
)
# ## Model 3
# Inspired from: https://www.kaggle.com/jcesquiveld/best-transformer-representations
import os
import numpy as np
import pandas as pd
import random
from transformers import (
AutoConfig,
AutoModel,
AutoTokenizer,
AdamW,
get_linear_schedule_with_warmup,
logging,
)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import (
Dataset,
TensorDataset,
SequentialSampler,
RandomSampler,
DataLoader,
)
from tqdm.notebook import tqdm
import gc
gc.enable()
from IPython.display import clear_output
from sklearn.model_selection import StratifiedKFold
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("whitegrid")
logging.set_verbosity_error()
INPUT_DIR = "../input/commonlitreadabilityprize"
MODEL_DIR = "../input/roberta-transformers-pytorch/roberta-large"
CHECKPOINT_DIR = "../input/clrp-mean-pooling/"
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
MAX_LENGTH = 248
TEST_BATCH_SIZE = 1
HIDDEN_SIZE = 1024
NUM_FOLDS = 5
SEEDS = [113]
test = pd.read_csv(os.path.join(INPUT_DIR, "test.csv"))
train = pd.read_csv(os.path.join(INPUT_DIR, "train.csv"))
class MeanPoolingModel(nn.Module):
def __init__(self, model_name):
super().__init__()
config = AutoConfig.from_pretrained(model_name)
self.model = AutoModel.from_pretrained(model_name, config=config)
self.linear = nn.Linear(HIDDEN_SIZE, 1)
self.loss = nn.MSELoss()
def forward(self, input_ids, attention_mask, labels=None):
outputs = self.model(input_ids, attention_mask)
last_hidden_state = outputs[0]
input_mask_expanded = (
attention_mask.unsqueeze(-1).expand(last_hidden_state.size()).float()
)
sum_embeddings = torch.sum(last_hidden_state * input_mask_expanded, 1)
sum_mask = input_mask_expanded.sum(1)
sum_mask = torch.clamp(sum_mask, min=1e-9)
mean_embeddings = sum_embeddings / sum_mask
logits = self.linear(mean_embeddings)
preds = logits.squeeze(-1).squeeze(-1)
if labels is not None:
loss = self.loss(preds.view(-1).float(), labels.view(-1).float())
return loss
else:
return preds
def get_test_loader(data):
x_test = data.excerpt.tolist()
tokenizer = AutoTokenizer.from_pretrained(MODEL_DIR)
encoded_test = tokenizer.batch_encode_plus(
x_test,
add_special_tokens=True,
return_attention_mask=True,
padding="max_length",
truncation=True,
max_length=MAX_LENGTH,
return_tensors="pt",
)
dataset_test = TensorDataset(
encoded_test["input_ids"], encoded_test["attention_mask"]
)
dataloader_test = DataLoader(
dataset_test,
sampler=SequentialSampler(dataset_test),
batch_size=TEST_BATCH_SIZE,
)
return dataloader_test
test_dataloader = get_test_loader(test)
train_dataloader = get_test_loader(train)
all_predictions = []
for seed in SEEDS:
fold_predictions = []
for fold in tqdm(range(NUM_FOLDS)):
model_path = f"model_{seed + 1}_{fold + 1}.pth"
print(f"\nUsing {model_path}")
model_path = CHECKPOINT_DIR + f"model_{seed + 1}_{fold + 1}.pth"
model = MeanPoolingModel(MODEL_DIR)
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
model.eval()
predictions = []
for batch in test_dataloader:
batch = tuple(b.to(DEVICE) for b in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": None,
}
preds = model(**inputs).item()
predictions.append(preds)
del model
gc.collect()
fold_predictions.append(predictions)
all_predictions.append(np.mean(fold_predictions, axis=0).tolist())
model3_predictions = np.mean(all_predictions, axis=0)
train_all_predictions = []
for seed in SEEDS:
fold_predictions = []
for fold in tqdm(range(NUM_FOLDS)):
model_path = f"model_{seed + 1}_{fold + 1}.pth"
print(f"\nUsing {model_path}")
model_path = CHECKPOINT_DIR + f"model_{seed + 1}_{fold + 1}.pth"
model = MeanPoolingModel(MODEL_DIR)
model.load_state_dict(torch.load(model_path))
model.to(DEVICE)
model.eval()
predictions = []
for batch in train_dataloader:
batch = tuple(b.to(DEVICE) for b in batch)
inputs = {
"input_ids": batch[0],
"attention_mask": batch[1],
"labels": None,
}
preds = model(**inputs).item()
predictions.append(preds)
del model
gc.collect()
fold_predictions.append(predictions)
train_all_predictions.append(np.mean(fold_predictions, axis=0).tolist())
train_model3_predictions = np.mean(train_all_predictions, axis=0)
# # predictions = model1_predictions * 0.5 + model2_predictions * 0.3 + model3_predictions * 0.2 # 0.461
# # predictions = model1_predictions * 0.45 + model2_predictions * 0.35 + model3_predictions * 0.2 # 0.461
# predictions = model1_predictions * 0.40 + model2_predictions * 0.25 + model3_predictions * 0.35 #
# predictions
# train_predictions = train_model1_predictions * 0.40 + train_model2_predictions * 0.25 + train_model3_predictions * 0.35 #
# train_predictions
train_results = pd.DataFrame(
np.vstack(
(train_model1_predictions, train_model2_predictions, train_model3_predictions)
).transpose(),
columns=["model1", "model2", "model3"],
)
train_results["target"] = train_df["target"]
train_results.head()
test_results = pd.DataFrame(
np.vstack((model1_predictions, model2_predictions, model3_predictions)).transpose(),
columns=["model1", "model2", "model3"],
)
from sklearn.metrics import mean_squared_error
from sklearn.linear_model import LinearRegression
# Create linear regression object
regr = LinearRegression()
# Train the model using the training sets
regr.fit(train_results.drop("target", axis=1), train_results.target)
# Make predictions using the testing set
y_pred = regr.predict(test_results)
# The coefficients
print("Coefficients: \n", regr.coef_)
print("Coefficients Sum: \n", sum(regr.coef_))
print("Coefficients: \n", regr.coef_)
M1, M2, M3 = regr.coef_
print(M1, M2, M3)
# # Stacking with LAMA
# TIMEOUT = 15_000 # Time in seconds for automl run
# TARGET_NAME = 'target' # Target column name
# def rmse(x, y): return np.sqrt(mean_squared_error(x, y))
# task = Task('reg', metric=rmse)
# roles = {
# 'target': TARGET_NAME,
# }
# automl = TabularAutoML(task=task,
# timeout=TIMEOUT,
# general_params={'nested_cv': False, 'use_algos': [['linear_l2']]},
# reader_params={'cv': 5},
# selection_params={'mode': 1},
# )
# oof_pred = automl.fit_predict(train_results, roles=roles)
# print('')
# print(rmse(train_results[TARGET_NAME], oof_pred.data[:, 0]))
# predictions = automl.predict(test_results).data[:, 0]
submission_df.target = y_pred # predictions
submission_df
submission_df.to_csv("submission.csv", index=False)
| false | 3 | 7,173 | 0 | 7,202 | 7,173 |
||
69543362
|
# # Tabular Playground AUG 2021 with GPU
# 
# Imports
from sklearn.preprocessing import LabelEncoder
from functools import partial
from sklearn import ensemble
from sklearn import model_selection
from hyperopt import hp, fmin, tpe, Trials
from hyperopt.pyll.base import scope
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as ptl
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#
# Files and cross validation setup
train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
train.head()
train.shape
train["kfold"] = -1
train = train.sample(frac=1).reset_index(drop=True)
train.loc[:, "bins"] = pd.cut(train["loss"], bins=19, labels=False)
kf = model_selection.StratifiedKFold(n_splits=10)
for f, (t_, v_) in enumerate(kf.split(X=train, y=train.bins.values)):
train.loc[v_, "kfold"] = f
train = train.drop("bins", axis=1)
X = train.iloc[:, 1:].copy()
X.drop("loss", axis=1, inplace=True)
target = ["loss", "kfold"]
X = X
y = train[target]
#
# HyperParameter Optimization
# Ideally Its a Classificaion problem but as the metrics that is being used is RMSE so I used Regression model.
def optimize(params, x, y):
model = XGBRegressor(n_jobs=2, nthread=4, random_state=0, **params)
RMSE = []
for i in range(0, 10):
X_t = x[x.kfold != i]
X_train = X_t.iloc[:, :-1]
y_t = y[y.kfold != i]
y_train = y_t.iloc[:, 0].values
X_te = x[x.kfold == i]
X_test = X_te.iloc[:, :-1]
y_te = y[y.kfold == i]
y_test = y_te.iloc[:, 0].values
model.fit(X_train, y_train)
preds = model.predict(X_test)
fold_ras = mean_squared_error(y_test, preds, squared=False)
RMSE.append(fold_ras)
print(RMSE)
return np.mean(RMSE)
param_space = {
"learning_rate": hp.uniform("learning_rate", 0.005, 0.2),
"max_depth": scope.int(hp.quniform("max_depth", 7, 14, 1)),
"n_estimators": scope.int(hp.quniform("n_estimators", 800, 1100, 1)),
"tree_method": hp.choice("tree_method", ["gpu_hist"]),
"booster": hp.choice("booster", ["gbtree"]),
# "objective": hp.choice("objective", ["binary:logistic"]),
"eval_metric": hp.choice("eval_metric", ["rmse"]),
"predictor": hp.choice("predictor", ["gpu_predictor"]),
"gpu_id": hp.choice("gpu_id", [0]),
# uniform chooses a value between two values
"subsample": hp.uniform("subsample", 0.75, 0.98),
"min_child_weight": hp.uniform("min_child_weight", 100, 400),
"colsample_bytree": hp.uniform("colsample_bytree", 0.75, 0.96),
"reg_alpha": hp.uniform("reg_alpha", 0.03, 6),
"reg_lambda": hp.uniform("reg_lambda", 0.03, 7),
"gamma": hp.uniform("gamma", 0.01, 6),
"max_delta_step": hp.uniform("max_delta_step", 0.1, 11),
"colsample_bylevel": hp.uniform("colsample_bylevel", 0.7, 0.96),
"colsample_bynode": hp.uniform("colsample_bynode", 0.6, 0.96),
}
optimization_function = partial(
optimize,
# param_names=param_names,
x=X,
y=y,
)
trials = Trials()
hopt = fmin(
fn=optimization_function,
space=param_space,
algo=tpe.suggest,
max_evals=50,
trials=trials,
)
print(hopt)
#
# Training and submission
para = {
"booster": 0,
"colsample_bylevel": 0.9377629603546923,
"colsample_bynode": 0.6203129196844466,
"colsample_bytree": 0.8662523590957234,
"eval_metric": 0,
"gamma": 2.56784868937463,
"gpu_id": 0,
"learning_rate": 0.020372546225192883,
"max_delta_step": 8.174023744945664,
"max_depth": 10.0,
"min_child_weight": 342.4606309007503,
"n_estimators": 909.0,
"predictor": 0,
"reg_alpha": 4.337902688500327,
"reg_lambda": 6.925018305951919,
"subsample": 0.855931381854741,
"tree_method": 0,
}
model = XGBRegressor(
n_jobs=4,
nthread=4,
random_state=0,
booster="gbtree",
colsample_bylevel=para["colsample_bylevel"],
colsample_bynode=para["colsample_bynode"],
colsample_bytree=para["colsample_bytree"],
gamma=para["gamma"],
eval_metric="rmse",
gpu_id=0,
learning_rate=para["learning_rate"],
max_delta_step=para["max_delta_step"],
max_depth=10,
min_child_weight=para["min_child_weight"],
n_estimators=909,
predictor="gpu_predictor",
reg_alpha=para["reg_alpha"],
reg_lambda=para["reg_lambda"],
subsample=para["subsample"],
tree_method="gpu_hist",
)
X_t = X[X.kfold != 5]
X_train = X_t.iloc[:, :-1]
y_t = y[y.kfold != 5]
y_train = y_t.iloc[:, 0].values
X_te = X[X.kfold == 5]
X_test = X_te.iloc[:, :-1]
y_te = y[y.kfold == 5]
y_test = y_te.iloc[:, 0].values
model.fit(X_train, y_train)
pre = model.predict(X_test)
mean_squared_error(y_test, pre, squared=False)
test1 = test.iloc[:, 1:]
preds = model.predict(test1)
submi = pd.DataFrame({"id": test["id"], "loss": preds})
submi.to_csv("submission1.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543362.ipynb
| null | null |
[{"Id": 69543362, "ScriptId": 18984854, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3711290, "CreationDate": "08/01/2021 10:21:57", "VersionNumber": 3.0, "Title": "Baseline_GPU Based XGB Hyperopt optimize", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 135.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 130.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# # Tabular Playground AUG 2021 with GPU
# 
# Imports
from sklearn.preprocessing import LabelEncoder
from functools import partial
from sklearn import ensemble
from sklearn import model_selection
from hyperopt import hp, fmin, tpe, Trials
from hyperopt.pyll.base import scope
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as ptl
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
#
# Files and cross validation setup
train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
train.head()
train.shape
train["kfold"] = -1
train = train.sample(frac=1).reset_index(drop=True)
train.loc[:, "bins"] = pd.cut(train["loss"], bins=19, labels=False)
kf = model_selection.StratifiedKFold(n_splits=10)
for f, (t_, v_) in enumerate(kf.split(X=train, y=train.bins.values)):
train.loc[v_, "kfold"] = f
train = train.drop("bins", axis=1)
X = train.iloc[:, 1:].copy()
X.drop("loss", axis=1, inplace=True)
target = ["loss", "kfold"]
X = X
y = train[target]
#
# HyperParameter Optimization
# Ideally Its a Classificaion problem but as the metrics that is being used is RMSE so I used Regression model.
def optimize(params, x, y):
model = XGBRegressor(n_jobs=2, nthread=4, random_state=0, **params)
RMSE = []
for i in range(0, 10):
X_t = x[x.kfold != i]
X_train = X_t.iloc[:, :-1]
y_t = y[y.kfold != i]
y_train = y_t.iloc[:, 0].values
X_te = x[x.kfold == i]
X_test = X_te.iloc[:, :-1]
y_te = y[y.kfold == i]
y_test = y_te.iloc[:, 0].values
model.fit(X_train, y_train)
preds = model.predict(X_test)
fold_ras = mean_squared_error(y_test, preds, squared=False)
RMSE.append(fold_ras)
print(RMSE)
return np.mean(RMSE)
param_space = {
"learning_rate": hp.uniform("learning_rate", 0.005, 0.2),
"max_depth": scope.int(hp.quniform("max_depth", 7, 14, 1)),
"n_estimators": scope.int(hp.quniform("n_estimators", 800, 1100, 1)),
"tree_method": hp.choice("tree_method", ["gpu_hist"]),
"booster": hp.choice("booster", ["gbtree"]),
# "objective": hp.choice("objective", ["binary:logistic"]),
"eval_metric": hp.choice("eval_metric", ["rmse"]),
"predictor": hp.choice("predictor", ["gpu_predictor"]),
"gpu_id": hp.choice("gpu_id", [0]),
# uniform chooses a value between two values
"subsample": hp.uniform("subsample", 0.75, 0.98),
"min_child_weight": hp.uniform("min_child_weight", 100, 400),
"colsample_bytree": hp.uniform("colsample_bytree", 0.75, 0.96),
"reg_alpha": hp.uniform("reg_alpha", 0.03, 6),
"reg_lambda": hp.uniform("reg_lambda", 0.03, 7),
"gamma": hp.uniform("gamma", 0.01, 6),
"max_delta_step": hp.uniform("max_delta_step", 0.1, 11),
"colsample_bylevel": hp.uniform("colsample_bylevel", 0.7, 0.96),
"colsample_bynode": hp.uniform("colsample_bynode", 0.6, 0.96),
}
optimization_function = partial(
optimize,
# param_names=param_names,
x=X,
y=y,
)
trials = Trials()
hopt = fmin(
fn=optimization_function,
space=param_space,
algo=tpe.suggest,
max_evals=50,
trials=trials,
)
print(hopt)
#
# Training and submission
para = {
"booster": 0,
"colsample_bylevel": 0.9377629603546923,
"colsample_bynode": 0.6203129196844466,
"colsample_bytree": 0.8662523590957234,
"eval_metric": 0,
"gamma": 2.56784868937463,
"gpu_id": 0,
"learning_rate": 0.020372546225192883,
"max_delta_step": 8.174023744945664,
"max_depth": 10.0,
"min_child_weight": 342.4606309007503,
"n_estimators": 909.0,
"predictor": 0,
"reg_alpha": 4.337902688500327,
"reg_lambda": 6.925018305951919,
"subsample": 0.855931381854741,
"tree_method": 0,
}
model = XGBRegressor(
n_jobs=4,
nthread=4,
random_state=0,
booster="gbtree",
colsample_bylevel=para["colsample_bylevel"],
colsample_bynode=para["colsample_bynode"],
colsample_bytree=para["colsample_bytree"],
gamma=para["gamma"],
eval_metric="rmse",
gpu_id=0,
learning_rate=para["learning_rate"],
max_delta_step=para["max_delta_step"],
max_depth=10,
min_child_weight=para["min_child_weight"],
n_estimators=909,
predictor="gpu_predictor",
reg_alpha=para["reg_alpha"],
reg_lambda=para["reg_lambda"],
subsample=para["subsample"],
tree_method="gpu_hist",
)
X_t = X[X.kfold != 5]
X_train = X_t.iloc[:, :-1]
y_t = y[y.kfold != 5]
y_train = y_t.iloc[:, 0].values
X_te = X[X.kfold == 5]
X_test = X_te.iloc[:, :-1]
y_te = y[y.kfold == 5]
y_test = y_te.iloc[:, 0].values
model.fit(X_train, y_train)
pre = model.predict(X_test)
mean_squared_error(y_test, pre, squared=False)
test1 = test.iloc[:, 1:]
preds = model.predict(test1)
submi = pd.DataFrame({"id": test["id"], "loss": preds})
submi.to_csv("submission1.csv", index=False)
| false | 0 | 2,041 | 2 | 2,041 | 2,041 |
||
69543829
|
<jupyter_start><jupyter_text>Bangladesh Weather Dataset
### Content
This dataset contains the monthly average value of Bangladesh temperature and rain from 1901 to 2015
Kaggle dataset identifier: bangladesh-weather-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('bangladesh-weather-dataset/Temp_and_rain.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1380 entries, 0 to 1379
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 tem 1380 non-null float64
1 Month 1380 non-null int64
2 Year 1380 non-null int64
3 rain 1380 non-null float64
dtypes: float64(2), int64(2)
memory usage: 43.2 KB
<jupyter_text>Examples:
{
"tem": 16.976,
"Month": 1.0,
"Year": 1901.0,
"rain": 18.5356
}
{
"tem": 19.9026,
"Month": 2.0,
"Year": 1901.0,
"rain": 16.2548
}
{
"tem": 24.3158,
"Month": 3.0,
"Year": 1901.0,
"rain": 70.7981
}
{
"tem": 28.1834,
"Month": 4.0,
"Year": 1901.0,
"rain": 66.1616
}
<jupyter_script>import numpy as np
import pandas as pd
import os
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.io as pio
df = pd.read_csv("../input/bangladesh-weather-dataset/Temp_and_rain.csv")
df.head(5)
df.loc[df["Month"] == 1, "month"] = "January"
df.loc[df["Month"] == 2, "month"] = "February"
df.loc[df["Month"] == 3, "month"] = "March"
df.loc[df["Month"] == 4, "month"] = "April"
df.loc[df["Month"] == 5, "month"] = "May"
df.loc[df["Month"] == 6, "month"] = "June"
df.loc[df["Month"] == 7, "month"] = "July"
df.loc[df["Month"] == 8, "month"] = "August"
df.loc[df["Month"] == 9, "month"] = "September"
df.loc[df["Month"] == 10, "month"] = "October"
df.loc[df["Month"] == 11, "month"] = "November"
df.loc[df["Month"] == 12, "month"] = "December"
df.info()
df.describe()
print(
"The dataset started from: ",
df["Year"].min(),
"\nThe dataset Ends at: ",
df["Year"].max(),
)
# ### Judging from the data, I think I can answer few questions, like:
# - Temparature of Bangladesh
# - Lowest Temparature
# - How temparature changes corresponding to months
# # Let's analyze the temparature
print("Highest Temparature: ", df["tem"].max())
print("Lowest Temparature: ", df["tem"].min())
# **The max temp recorded was on May 1979**
# **The min temp recorded was on Janurary 1978**
print(df[df["tem"] == df["tem"].max()])
print(df[df["tem"] == df["tem"].min()])
print(
"Total entries when temparature is above average: ",
len(df[df["tem"] >= df["tem"].mean()]),
)
print(
"Total entries when temparature is less than average: ",
len(df) - len(df[df["tem"] >= df["tem"].mean()]),
)
# #### 'January', 'February', 'November', 'December' are the Cold
# #### And 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October' are Warm
pio.templates.default = "plotly_dark"
fig = px.scatter(
df, x="month", y="tem", color="tem", title="Measurement of Temparature per Month"
)
fig.show()
# ## We can conclude that Bangladesh is a warm country
pio.templates.default = "plotly_dark"
fig = px.bar(
df[0:300],
x="Year",
y="tem",
color="tem",
title="Measurement of Temparature throughout the year",
)
fig.show()
# #### Max and Min Temperature per months
for i in range(1, 13):
print("Max value for month ", i, " is: ", df[df["Month"] == i]["tem"].max())
print("Min value for month ", i, " is: ", df[df["Month"] == i]["tem"].min())
print("____________________________________")
# #### Max and Min Temperature per year
for i in range(1901, 2016):
print("Max value for Year ", i, " is: ", df[df["Year"] == i]["tem"].max())
print("Min value for Year ", i, " is: ", df[df["Year"] == i]["tem"].min())
print("____________________________________")
df["Year"].unique()
# # Let's analyze the Rainfall data
# **Highest rainfall: 1012 was recorded on August 2011**
# **Lowest rainfall: 0 was on recorded December 1927**
print(
df[df["rain"] == df["rain"].max()][
[
"rain",
"tem",
"month",
"Year",
]
]
)
print(
df[df["rain"] == df["rain"].min()][
[
"rain",
"tem",
"month",
"Year",
]
]
)
print(
"Total entries when rainfall is above average: ",
len(df[df["rain"] >= df["rain"].mean()]),
)
print(
"Total entries when rainfall is less than average: ",
len(df) - len(df[df["rain"] >= df["rain"].mean()]),
)
# ### 'May', 'June', 'July','August', 'September' are the months when heavy rainfall takes place
pio.templates.default = "plotly_dark"
fig = px.scatter(df, x="month", y="rain", color="rain", title="Measurement of Rain")
fig.show()
fig = px.pie(df, values="rain", names="month", title="Rainfall according to Month")
fig.update_traces(textposition="inside", textinfo="percent+label")
fig.show()
# ## Rainfall occurs throughout the year
pio.templates.default = "plotly_dark"
fig = px.bar(df[0:300], x="Year", y="rain", color="rain")
fig.show()
# #### Max and Min Rainfall per months
for i in range(1, 13):
print("Max value for month ", i, " is: ", df[df["Month"] == i]["rain"].max())
print("Min value for month ", i, " is: ", df[df["Month"] == i]["rain"].min())
print("____________________________________")
# #### Max and Min Rainfall per Year
for i in range(1901, 2016):
print("Max value for Year ", i, " is: ", df[df["Year"] == i]["rain"].max())
print("Min value for Year ", i, " is: ", df[df["Year"] == i]["rain"].min())
print("____________________________________")
# # Corellation between rain and temparature
corr = df.corr(method="pearson")
fig = px.imshow(corr)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543829.ipynb
|
bangladesh-weather-dataset
|
yakinrubaiat
|
[{"Id": 69543829, "ScriptId": 18988014, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4378523, "CreationDate": "08/01/2021 10:29:29", "VersionNumber": 1.0, "Title": "EDA of Bangladeshi Weather Dataset", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 133.0, "LinesInsertedFromPrevious": 133.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92851308, "KernelVersionId": 69543829, "SourceDatasetVersionId": 386335}]
|
[{"Id": 386335, "DatasetId": 170193, "DatasourceVersionId": 401185, "CreatorUserId": 1755663, "LicenseName": "CC0: Public Domain", "CreationDate": "04/20/2019 06:06:34", "VersionNumber": 1.0, "Title": "Bangladesh Weather Dataset", "Slug": "bangladesh-weather-dataset", "Subtitle": "Weather Data from 1901 to 2015", "Description": "### Content\n\nThis dataset contains the monthly average value of Bangladesh temperature and rain from 1901 to 2015\n\n### Acknowledgements\n\nWe wouldn't be here without the help of others. If you owe any attributions or thanks, include them here along with any citations of past research.", "VersionNotes": "Initial release", "TotalCompressedBytes": 33189.0, "TotalUncompressedBytes": 33189.0}]
|
[{"Id": 170193, "CreatorUserId": 1755663, "OwnerUserId": 1755663.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 386335.0, "CurrentDatasourceVersionId": 401185.0, "ForumId": 180888, "Type": 2, "CreationDate": "04/20/2019 06:06:34", "LastActivityDate": "04/20/2019", "TotalViews": 18396, "TotalDownloads": 2521, "TotalVotes": 47, "TotalKernels": 11}]
|
[{"Id": 1755663, "UserName": "yakinrubaiat", "DisplayName": "Yakin", "RegisterDate": "03/25/2018", "PerformanceTier": 1}]
|
import numpy as np
import pandas as pd
import os
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import plotly.io as pio
df = pd.read_csv("../input/bangladesh-weather-dataset/Temp_and_rain.csv")
df.head(5)
df.loc[df["Month"] == 1, "month"] = "January"
df.loc[df["Month"] == 2, "month"] = "February"
df.loc[df["Month"] == 3, "month"] = "March"
df.loc[df["Month"] == 4, "month"] = "April"
df.loc[df["Month"] == 5, "month"] = "May"
df.loc[df["Month"] == 6, "month"] = "June"
df.loc[df["Month"] == 7, "month"] = "July"
df.loc[df["Month"] == 8, "month"] = "August"
df.loc[df["Month"] == 9, "month"] = "September"
df.loc[df["Month"] == 10, "month"] = "October"
df.loc[df["Month"] == 11, "month"] = "November"
df.loc[df["Month"] == 12, "month"] = "December"
df.info()
df.describe()
print(
"The dataset started from: ",
df["Year"].min(),
"\nThe dataset Ends at: ",
df["Year"].max(),
)
# ### Judging from the data, I think I can answer few questions, like:
# - Temparature of Bangladesh
# - Lowest Temparature
# - How temparature changes corresponding to months
# # Let's analyze the temparature
print("Highest Temparature: ", df["tem"].max())
print("Lowest Temparature: ", df["tem"].min())
# **The max temp recorded was on May 1979**
# **The min temp recorded was on Janurary 1978**
print(df[df["tem"] == df["tem"].max()])
print(df[df["tem"] == df["tem"].min()])
print(
"Total entries when temparature is above average: ",
len(df[df["tem"] >= df["tem"].mean()]),
)
print(
"Total entries when temparature is less than average: ",
len(df) - len(df[df["tem"] >= df["tem"].mean()]),
)
# #### 'January', 'February', 'November', 'December' are the Cold
# #### And 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October' are Warm
pio.templates.default = "plotly_dark"
fig = px.scatter(
df, x="month", y="tem", color="tem", title="Measurement of Temparature per Month"
)
fig.show()
# ## We can conclude that Bangladesh is a warm country
pio.templates.default = "plotly_dark"
fig = px.bar(
df[0:300],
x="Year",
y="tem",
color="tem",
title="Measurement of Temparature throughout the year",
)
fig.show()
# #### Max and Min Temperature per months
for i in range(1, 13):
print("Max value for month ", i, " is: ", df[df["Month"] == i]["tem"].max())
print("Min value for month ", i, " is: ", df[df["Month"] == i]["tem"].min())
print("____________________________________")
# #### Max and Min Temperature per year
for i in range(1901, 2016):
print("Max value for Year ", i, " is: ", df[df["Year"] == i]["tem"].max())
print("Min value for Year ", i, " is: ", df[df["Year"] == i]["tem"].min())
print("____________________________________")
df["Year"].unique()
# # Let's analyze the Rainfall data
# **Highest rainfall: 1012 was recorded on August 2011**
# **Lowest rainfall: 0 was on recorded December 1927**
print(
df[df["rain"] == df["rain"].max()][
[
"rain",
"tem",
"month",
"Year",
]
]
)
print(
df[df["rain"] == df["rain"].min()][
[
"rain",
"tem",
"month",
"Year",
]
]
)
print(
"Total entries when rainfall is above average: ",
len(df[df["rain"] >= df["rain"].mean()]),
)
print(
"Total entries when rainfall is less than average: ",
len(df) - len(df[df["rain"] >= df["rain"].mean()]),
)
# ### 'May', 'June', 'July','August', 'September' are the months when heavy rainfall takes place
pio.templates.default = "plotly_dark"
fig = px.scatter(df, x="month", y="rain", color="rain", title="Measurement of Rain")
fig.show()
fig = px.pie(df, values="rain", names="month", title="Rainfall according to Month")
fig.update_traces(textposition="inside", textinfo="percent+label")
fig.show()
# ## Rainfall occurs throughout the year
pio.templates.default = "plotly_dark"
fig = px.bar(df[0:300], x="Year", y="rain", color="rain")
fig.show()
# #### Max and Min Rainfall per months
for i in range(1, 13):
print("Max value for month ", i, " is: ", df[df["Month"] == i]["rain"].max())
print("Min value for month ", i, " is: ", df[df["Month"] == i]["rain"].min())
print("____________________________________")
# #### Max and Min Rainfall per Year
for i in range(1901, 2016):
print("Max value for Year ", i, " is: ", df[df["Year"] == i]["rain"].max())
print("Min value for Year ", i, " is: ", df[df["Year"] == i]["rain"].min())
print("____________________________________")
# # Corellation between rain and temparature
corr = df.corr(method="pearson")
fig = px.imshow(corr)
fig.show()
|
[{"bangladesh-weather-dataset/Temp_and_rain.csv": {"column_names": "[\"tem\", \"Month\", \"Year\", \"rain\"]", "column_data_types": "{\"tem\": \"float64\", \"Month\": \"int64\", \"Year\": \"int64\", \"rain\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1380 entries, 0 to 1379\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 tem 1380 non-null float64\n 1 Month 1380 non-null int64 \n 2 Year 1380 non-null int64 \n 3 rain 1380 non-null float64\ndtypes: float64(2), int64(2)\nmemory usage: 43.2 KB\n", "summary": "{\"tem\": {\"count\": 1380.0, \"mean\": 25.066212608695654, \"std\": 3.6824113257122137, \"min\": 16.8006, \"25%\": 22.114125, \"50%\": 27.01295, \"75%\": 28.00785, \"max\": 29.526}, \"Month\": {\"count\": 1380.0, \"mean\": 6.5, \"std\": 3.453303953367518, \"min\": 1.0, \"25%\": 3.75, \"50%\": 6.5, \"75%\": 9.25, \"max\": 12.0}, \"Year\": {\"count\": 1380.0, \"mean\": 1958.0, \"std\": 33.20841956197563, \"min\": 1901.0, \"25%\": 1929.0, \"50%\": 1958.0, \"75%\": 1987.0, \"max\": 2015.0}, \"rain\": {\"count\": 1380.0, \"mean\": 203.2754314057971, \"std\": 202.73089795533363, \"min\": 0.0, \"25%\": 18.498849999999997, \"50%\": 145.086, \"75%\": 347.6395, \"max\": 1012.02}}", "examples": "{\"tem\":{\"0\":16.976,\"1\":19.9026,\"2\":24.3158,\"3\":28.1834},\"Month\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Year\":{\"0\":1901,\"1\":1901,\"2\":1901,\"3\":1901},\"rain\":{\"0\":18.5356,\"1\":16.2548,\"2\":70.7981,\"3\":66.1616}}"}}]
| true | 1 |
<start_data_description><data_path>bangladesh-weather-dataset/Temp_and_rain.csv:
<column_names>
['tem', 'Month', 'Year', 'rain']
<column_types>
{'tem': 'float64', 'Month': 'int64', 'Year': 'int64', 'rain': 'float64'}
<dataframe_Summary>
{'tem': {'count': 1380.0, 'mean': 25.066212608695654, 'std': 3.6824113257122137, 'min': 16.8006, '25%': 22.114125, '50%': 27.01295, '75%': 28.00785, 'max': 29.526}, 'Month': {'count': 1380.0, 'mean': 6.5, 'std': 3.453303953367518, 'min': 1.0, '25%': 3.75, '50%': 6.5, '75%': 9.25, 'max': 12.0}, 'Year': {'count': 1380.0, 'mean': 1958.0, 'std': 33.20841956197563, 'min': 1901.0, '25%': 1929.0, '50%': 1958.0, '75%': 1987.0, 'max': 2015.0}, 'rain': {'count': 1380.0, 'mean': 203.2754314057971, 'std': 202.73089795533363, 'min': 0.0, '25%': 18.498849999999997, '50%': 145.086, '75%': 347.6395, 'max': 1012.02}}
<dataframe_info>
RangeIndex: 1380 entries, 0 to 1379
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 tem 1380 non-null float64
1 Month 1380 non-null int64
2 Year 1380 non-null int64
3 rain 1380 non-null float64
dtypes: float64(2), int64(2)
memory usage: 43.2 KB
<some_examples>
{'tem': {'0': 16.976, '1': 19.9026, '2': 24.3158, '3': 28.1834}, 'Month': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Year': {'0': 1901, '1': 1901, '2': 1901, '3': 1901}, 'rain': {'0': 18.5356, '1': 16.2548, '2': 70.7981, '3': 66.1616}}
<end_description>
| 1,581 | 0 | 2,035 | 1,581 |
69543364
|
import pandas as pd
import numpy as np
import keras
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.preprocessing.image import ImageDataGenerator, array_to_img
from keras.models import Sequential
from keras.layers import (
Dense,
Dropout,
InputLayer,
Conv2D,
MaxPool2D,
Activation,
Flatten,
BatchNormalization,
)
from keras.optimizers import RMSprop, Adam, SGD
from keras.utils import np_utils
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# ## train画像を確認する
# train画像の一つを表示してみる
from PIL import Image
import matplotlib.pyplot as plt
im = np.array(
Image.open("../input/plant-seedlings-classification/train/Maize/006196e1c.png")
)
plt.imshow(im)
# ## 種類ごとのtrain画像枚数を確認する.
# 植物名のlistを作成
species_list = [
"Black-grass",
"Charlock",
"Cleavers",
"Common Chickweed",
"Common wheat",
"Fat Hen",
"Loose Silky-bent",
"Maize",
"Scentless Mayweed",
"Shepherds Purse",
"Small-flowered Cranesbill",
"Sugar beet",
]
import glob
file_num = []
# 各ディレクトリの画像枚数をカウントしてlistにする
for i in range(12):
imfile = glob.glob(
"../input/plant-seedlings-classification/train/" + species_list[i] + "/*.png"
)
file_num += [len(imfile)]
file_num
# 棒グラフ化
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.bar(species_list, file_num)
plt.xticks(rotation=90)
plt.show()
# ## kerasのImageDataGeneratorを用いて学習データの前処理を行う
# train画像に対して以下の設定でDataAugmentation(データの水増し)を行う.
train_generator = ImageDataGenerator(
# ランダムに回転させる範囲
rotation_range=80,
# ランダムにズームする範囲
zoom_range=0.2,
# ランダムに水平・鉛直方向にシフトさせる範囲
width_shift_range=0.1,
height_shift_range=0.1,
# 反時計周りのシアー強度
shear_range=0.2,
# 水平・鉛直方向にランダムに反転させる
vertical_flip=True,
horizontal_flip=True,
)
train_generator = train_generator.flow_from_directory(
directory="/kaggle/input/plant-seedlings-classification/train",
target_size=(299, 299),
batch_size=32,
color_mode="rgb",
# One-hotベクトルでラベルを表現する
class_mode="categorical",
subset="training",
)
test_generator = ImageDataGenerator()
test_generator = test_generator.flow_from_directory(
directory="/kaggle/input/plant-seedlings-classification/",
classes=["test"],
target_size=(299, 299),
batch_size=1,
color_mode="rgb",
shuffle=False,
class_mode="categorical",
)
# 前処理後の学習用画像を確認
plt.figure(figsize=(32, 32))
for i in range(6):
batches = next(train_generator)
# 画像として表示するため、3次元データにし、float から uint8(0-255) にキャストする。
gen_img = batches[0][i].astype(np.uint8)
# gen_img = batches[0][i]
# gen_img = array_to_img(gen_img,scale=True)
plt.subplot(2, 3, i + 1)
plt.imshow(gen_img)
img = train_generator[0][0][1].astype(np.uint8)
# img = array_to_img(img,scale=True)
plt.imshow(img)
# ## NNのモデルを定義
# InceptionResNetV2を使用する.
ResNetV2 = InceptionResNetV2(
include_top=False, weights="imagenet", pooling="avg", input_shape=(299, 299, 3)
)
ResNetV2.output_shape
model_conv = Sequential(
[
ResNetV2,
Dense(128, activation="relu"),
# BatchNormalization(),
Dense(12, activation="softmax"),
]
)
# adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
sgd = SGD(lr=0.0002, momentum=0.9, decay=0.0, nesterov=False)
# モデルをコンパイルする
model_conv.compile(
loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
model_conv.summary()
# ## ニューラルネットワークを学習
epochs = 30
batch_size = 32
history = model_conv.fit(
train_generator, batch_size=batch_size, epochs=epochs, verbose=1
)
train_generator.class_indices
# ## 提出用ファイルを作成
# ネットワークの出力は12次元のベクトルである.また,出力層の活性化関数にsoftmax関数が用いられているため,出力値の範囲は0.0-1.0であり12個の出力を足すと1になるように変換されている.
# つまり,ネットワークの出力は12個の植物のどれに分類されるかの確率分布となっている.
#
# ネットワークの出力を確認
predict = model_conv.predict(test_generator, steps=test_generator.samples)
predict[0]
predict = model_conv.predict(test_generator, steps=test_generator.samples)
class_list = []
for i in range(0, predict.shape[0]):
# 最も確率が高いと予測された植物をclass_listに追加する
y_class = predict[i, :].argmax(axis=-1)
class_list += [species_list[y_class]]
submission = pd.DataFrame()
submission["file"] = test_generator.filenames
submission["file"] = submission["file"].str.replace(r"test/", "")
submission["species"] = class_list
submission.to_csv("submission.csv", index=False)
print("Submission file generated. All done.")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543364.ipynb
| null | null |
[{"Id": 69543364, "ScriptId": 18990704, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7717882, "CreationDate": "08/01/2021 10:21:57", "VersionNumber": 1.0, "Title": "Plant Seedlings Classification_test", "EvaluationDate": "08/01/2021", "IsChange": false, "TotalLines": 162.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 162.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 162.0, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import numpy as np
import keras
from keras.applications.resnet50 import ResNet50
from keras.applications.inception_resnet_v2 import InceptionResNetV2
from keras.preprocessing.image import ImageDataGenerator, array_to_img
from keras.models import Sequential
from keras.layers import (
Dense,
Dropout,
InputLayer,
Conv2D,
MaxPool2D,
Activation,
Flatten,
BatchNormalization,
)
from keras.optimizers import RMSprop, Adam, SGD
from keras.utils import np_utils
from tensorflow.python.client import device_lib
print(device_lib.list_local_devices())
# ## train画像を確認する
# train画像の一つを表示してみる
from PIL import Image
import matplotlib.pyplot as plt
im = np.array(
Image.open("../input/plant-seedlings-classification/train/Maize/006196e1c.png")
)
plt.imshow(im)
# ## 種類ごとのtrain画像枚数を確認する.
# 植物名のlistを作成
species_list = [
"Black-grass",
"Charlock",
"Cleavers",
"Common Chickweed",
"Common wheat",
"Fat Hen",
"Loose Silky-bent",
"Maize",
"Scentless Mayweed",
"Shepherds Purse",
"Small-flowered Cranesbill",
"Sugar beet",
]
import glob
file_num = []
# 各ディレクトリの画像枚数をカウントしてlistにする
for i in range(12):
imfile = glob.glob(
"../input/plant-seedlings-classification/train/" + species_list[i] + "/*.png"
)
file_num += [len(imfile)]
file_num
# 棒グラフ化
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(8, 5))
ax = fig.add_subplot(111)
ax.bar(species_list, file_num)
plt.xticks(rotation=90)
plt.show()
# ## kerasのImageDataGeneratorを用いて学習データの前処理を行う
# train画像に対して以下の設定でDataAugmentation(データの水増し)を行う.
train_generator = ImageDataGenerator(
# ランダムに回転させる範囲
rotation_range=80,
# ランダムにズームする範囲
zoom_range=0.2,
# ランダムに水平・鉛直方向にシフトさせる範囲
width_shift_range=0.1,
height_shift_range=0.1,
# 反時計周りのシアー強度
shear_range=0.2,
# 水平・鉛直方向にランダムに反転させる
vertical_flip=True,
horizontal_flip=True,
)
train_generator = train_generator.flow_from_directory(
directory="/kaggle/input/plant-seedlings-classification/train",
target_size=(299, 299),
batch_size=32,
color_mode="rgb",
# One-hotベクトルでラベルを表現する
class_mode="categorical",
subset="training",
)
test_generator = ImageDataGenerator()
test_generator = test_generator.flow_from_directory(
directory="/kaggle/input/plant-seedlings-classification/",
classes=["test"],
target_size=(299, 299),
batch_size=1,
color_mode="rgb",
shuffle=False,
class_mode="categorical",
)
# 前処理後の学習用画像を確認
plt.figure(figsize=(32, 32))
for i in range(6):
batches = next(train_generator)
# 画像として表示するため、3次元データにし、float から uint8(0-255) にキャストする。
gen_img = batches[0][i].astype(np.uint8)
# gen_img = batches[0][i]
# gen_img = array_to_img(gen_img,scale=True)
plt.subplot(2, 3, i + 1)
plt.imshow(gen_img)
img = train_generator[0][0][1].astype(np.uint8)
# img = array_to_img(img,scale=True)
plt.imshow(img)
# ## NNのモデルを定義
# InceptionResNetV2を使用する.
ResNetV2 = InceptionResNetV2(
include_top=False, weights="imagenet", pooling="avg", input_shape=(299, 299, 3)
)
ResNetV2.output_shape
model_conv = Sequential(
[
ResNetV2,
Dense(128, activation="relu"),
# BatchNormalization(),
Dense(12, activation="softmax"),
]
)
# adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
sgd = SGD(lr=0.0002, momentum=0.9, decay=0.0, nesterov=False)
# モデルをコンパイルする
model_conv.compile(
loss="categorical_crossentropy", optimizer="sgd", metrics=["accuracy"]
)
model_conv.summary()
# ## ニューラルネットワークを学習
epochs = 30
batch_size = 32
history = model_conv.fit(
train_generator, batch_size=batch_size, epochs=epochs, verbose=1
)
train_generator.class_indices
# ## 提出用ファイルを作成
# ネットワークの出力は12次元のベクトルである.また,出力層の活性化関数にsoftmax関数が用いられているため,出力値の範囲は0.0-1.0であり12個の出力を足すと1になるように変換されている.
# つまり,ネットワークの出力は12個の植物のどれに分類されるかの確率分布となっている.
#
# ネットワークの出力を確認
predict = model_conv.predict(test_generator, steps=test_generator.samples)
predict[0]
predict = model_conv.predict(test_generator, steps=test_generator.samples)
class_list = []
for i in range(0, predict.shape[0]):
# 最も確率が高いと予測された植物をclass_listに追加する
y_class = predict[i, :].argmax(axis=-1)
class_list += [species_list[y_class]]
submission = pd.DataFrame()
submission["file"] = test_generator.filenames
submission["file"] = submission["file"].str.replace(r"test/", "")
submission["species"] = class_list
submission.to_csv("submission.csv", index=False)
print("Submission file generated. All done.")
| false | 0 | 1,762 | 0 | 1,762 | 1,762 |
||
69543962
|
import nltk
text = text.lower()
text
from nltk import sent_tokenize
# Tokenize sentences
sent_text = sent_tokenize(text)
sent_text
from nltk import word_tokenize
# Do the word-tokenization from sentence-tokenization
for sent in sent_text:
word_text = word_tokenize(sent)
print(word_text)
# ## Stop Word Removal
from nltk.corpus import stopwords
stopwords_en = stopwords.words("english")
stopwords_en # pre-defined set of stop words in the english language
from string import punctuation
print("From string punctuation:", type(punctuation), punctuation)
stopwords_en = set(stopwords_en)
stopwords_en_with_punct = stopwords_en.union(set(punctuation))
print(stopwords_en_with_punct)
word_text2 = word_tokenize(text)
word_text2
# Weed-out words that are not stopwords
word_text2_filtered = [
word for word in word_text2 if word not in stopwords_en_with_punct
]
# ## Stemming & Lemmatization
# 
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
for word in word_text2_filtered:
print(stemmer.stem(word))
nltk.download("wordnet")
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
for word in word_text2_filtered:
print(lemmatizer.lemmatize(word))
# ## POS Tagging
nltk.download("averaged_perceptron_tagger")
tagged_text = nltk.pos_tag(word_text2_filtered)
tagged_text
# ## POS taggers list
# * CC coordinating conjunction
# * CD cardinal digit
# * DT determiner
# * EX existential there (like: “there is” … think of it like “there exists”)
# * FW foreign word
# * IN preposition/subordinating conjunction
# * JJ adjective ‘big’
# * JJR adjective, comparative ‘bigger’
# * JJS adjective, superlative ‘biggest’
# * LS list marker 1)
# * MD modal could, will
# * NN noun, singular ‘desk’
# * NNS noun plural ‘desks’
# * NNP proper noun, singular ‘Harrison’
# * NNPS proper noun, plural ‘Americans’
# * PDT predeterminer ‘all the kids’
# * POS possessive ending parent’s
# * PRP personal pronoun I, he, she
# * PRP$ possessive pronoun my, his, hers
# * RB adverb very, silently,
# * RBR adverb, comparative better
# * RBS adverb, superlative best
# * RP particle give up
# * TO, to go ‘to’ the store.
# * UH interjection, errrrrrrrm
# * VB verb, base form take
# * VBD verb, past tense took
# * VBG verb, gerund/present participle taking
# * VBN verb, past participle taken
# * VBP verb, sing. present, non-3d take
# * VBZ verb, 3rd person sing. present takes
# * WDT wh-determiner which
# * WP wh-pronoun who, what
# * WP$ possessive wh-pronoun whose
# * WRB wh-abverb where, when
# ## Named Entity Recognition
# 
ne_chunked = nltk.ne_chunk(tagged_text)
print(ne_chunked)
named_entities = []
for tagged_tree in ne_chunked:
if hasattr(tagged_tree, "label"):
entity_name = " ".join(c[0] for c in tagged_tree.leaves())
entity_type = tagged_tree.label()
named_entities.append((entity_name, entity_type))
print(named_entities)
text3 = "Sundar is the CEO of Google is an American company."
tokenised = nltk.word_tokenize(text3)
tagged_text3 = nltk.pos_tag(tokenised)
ne_chunked = nltk.ne_chunk(tagged_text3)
named_entities = []
for tagged_tree in ne_chunked:
if hasattr(tagged_tree, "label"):
entity_name = " ".join(c[0] for c in tagged_tree.leaves())
entity_type = tagged_tree.label()
named_entities.append((entity_name, entity_type))
print(named_entities)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543962.ipynb
| null | null |
[{"Id": 69543962, "ScriptId": 18990223, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5513605, "CreationDate": "08/01/2021 10:31:29", "VersionNumber": 2.0, "Title": "NLTK Beginners", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 175.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 175.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import nltk
text = text.lower()
text
from nltk import sent_tokenize
# Tokenize sentences
sent_text = sent_tokenize(text)
sent_text
from nltk import word_tokenize
# Do the word-tokenization from sentence-tokenization
for sent in sent_text:
word_text = word_tokenize(sent)
print(word_text)
# ## Stop Word Removal
from nltk.corpus import stopwords
stopwords_en = stopwords.words("english")
stopwords_en # pre-defined set of stop words in the english language
from string import punctuation
print("From string punctuation:", type(punctuation), punctuation)
stopwords_en = set(stopwords_en)
stopwords_en_with_punct = stopwords_en.union(set(punctuation))
print(stopwords_en_with_punct)
word_text2 = word_tokenize(text)
word_text2
# Weed-out words that are not stopwords
word_text2_filtered = [
word for word in word_text2 if word not in stopwords_en_with_punct
]
# ## Stemming & Lemmatization
# 
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
for word in word_text2_filtered:
print(stemmer.stem(word))
nltk.download("wordnet")
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
for word in word_text2_filtered:
print(lemmatizer.lemmatize(word))
# ## POS Tagging
nltk.download("averaged_perceptron_tagger")
tagged_text = nltk.pos_tag(word_text2_filtered)
tagged_text
# ## POS taggers list
# * CC coordinating conjunction
# * CD cardinal digit
# * DT determiner
# * EX existential there (like: “there is” … think of it like “there exists”)
# * FW foreign word
# * IN preposition/subordinating conjunction
# * JJ adjective ‘big’
# * JJR adjective, comparative ‘bigger’
# * JJS adjective, superlative ‘biggest’
# * LS list marker 1)
# * MD modal could, will
# * NN noun, singular ‘desk’
# * NNS noun plural ‘desks’
# * NNP proper noun, singular ‘Harrison’
# * NNPS proper noun, plural ‘Americans’
# * PDT predeterminer ‘all the kids’
# * POS possessive ending parent’s
# * PRP personal pronoun I, he, she
# * PRP$ possessive pronoun my, his, hers
# * RB adverb very, silently,
# * RBR adverb, comparative better
# * RBS adverb, superlative best
# * RP particle give up
# * TO, to go ‘to’ the store.
# * UH interjection, errrrrrrrm
# * VB verb, base form take
# * VBD verb, past tense took
# * VBG verb, gerund/present participle taking
# * VBN verb, past participle taken
# * VBP verb, sing. present, non-3d take
# * VBZ verb, 3rd person sing. present takes
# * WDT wh-determiner which
# * WP wh-pronoun who, what
# * WP$ possessive wh-pronoun whose
# * WRB wh-abverb where, when
# ## Named Entity Recognition
# 
ne_chunked = nltk.ne_chunk(tagged_text)
print(ne_chunked)
named_entities = []
for tagged_tree in ne_chunked:
if hasattr(tagged_tree, "label"):
entity_name = " ".join(c[0] for c in tagged_tree.leaves())
entity_type = tagged_tree.label()
named_entities.append((entity_name, entity_type))
print(named_entities)
text3 = "Sundar is the CEO of Google is an American company."
tokenised = nltk.word_tokenize(text3)
tagged_text3 = nltk.pos_tag(tokenised)
ne_chunked = nltk.ne_chunk(tagged_text3)
named_entities = []
for tagged_tree in ne_chunked:
if hasattr(tagged_tree, "label"):
entity_name = " ".join(c[0] for c in tagged_tree.leaves())
entity_type = tagged_tree.label()
named_entities.append((entity_name, entity_type))
print(named_entities)
| false | 0 | 1,138 | 1 | 1,138 | 1,138 |
||
69543514
|
<jupyter_start><jupyter_text>NIH Chest X-rays
# NIH Chest X-ray Dataset
---
### National Institutes of Health Chest X-Ray Dataset
Chest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.
This NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: "ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases." (*Wang et al.*)
[Link to paper][30]
[1]: https://openi.nlm.nih.gov/
<br>
### Data limitations:
1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%.
2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)
3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their “updated” image labels and/or new bounding boxes in their own studied later, maybe through manual annotation
<br>
### File contents
- **Image format**: 112,120 total images with size 1024 x 1024
- **images_001.zip**: Contains 4999 images
- **images_002.zip**: Contains 10,000 images
- **images_003.zip**: Contains 10,000 images
- **images_004.zip**: Contains 10,000 images
- **images_005.zip**: Contains 10,000 images
- **images_006.zip**: Contains 10,000 images
- **images_007.zip**: Contains 10,000 images
- **images_008.zip**: Contains 10,000 images
- **images_009.zip**: Contains 10,000 images
- **images_010.zip**: Contains 10,000 images
- **images_011.zip**: Contains 10,000 images
- **images_012.zip**: Contains 7,121 images
- **README_ChestXray.pdf**: Original README file
- **BBox_list_2017.csv**: Bounding box coordinates. *Note: Start at x,y, extend horizontally w pixels, and vertically h pixels*
- Image Index: File name
- Finding Label: Disease type (Class label)
- Bbox x
- Bbox y
- Bbox w
- Bbox h
- **Data_entry_2017.csv**: Class labels and patient data for the entire dataset
- Image Index: File name
- Finding Labels: Disease type (Class label)
- Follow-up #
- Patient ID
- Patient Age
- Patient Gender
- View Position: X-ray orientation
- OriginalImageWidth
- OriginalImageHeight
- OriginalImagePixelSpacing_x
- OriginalImagePixelSpacing_y
<br>
### Class descriptions
There are 15 classes (14 diseases, and one for "No findings"). Images can be classified as "No findings" or one or more disease classes:
- Atelectasis
- Consolidation
- Infiltration
- Pneumothorax
- Edema
- Emphysema
- Fibrosis
- Effusion
- Pneumonia
- Pleural_thickening
- Cardiomegaly
- Nodule Mass
- Hernia
<br>
### Full Dataset Content
There are 12 zip files in total and range from ~2 gb to 4 gb in size. Additionally, we randomly sampled 5% of these images and created a smaller dataset for use in Kernels. The random sample contains 5606 X-ray images and class labels.
- [Sample][9]: sample.zip
[9]: https://www.kaggle.com/nih-chest-xrays/sample
<br>
### Modifications to original data
- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform
- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory
<br>
### Citations
- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]
- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]
- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]
<br>
Kaggle dataset identifier: data
<jupyter_script># !pip install git+https://github.com/keras-team/keras-applications.git -q
# import keras_applications as ka
# def set_to_tf(ka):
# from tensorflow.keras import backend, layers, models, utils
# ka._KERAS_BACKEND = backend
# ka._KERAS_LAYERS = layers
# ka._KERAS_MODELS = models
# ka._KERAS_UTILS = utils
# set_to_tf(ka)
# !pip install /kaggle/input/keras-pretrained-imagenet-weights/image_classifiers-1.0.0-py3-none-any.whl
# from classification_models.tfkeras import Classifiers
# Classifiers.models_names()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# ML tools
import tensorflow as tf
from kaggle_datasets import KaggleDatasets
from keras.models import Sequential
from keras import layers
from keras.optimizers import Adam
from tensorflow.keras import Model
# import tensorflow.keras.applications.efficientnet as efn
from tensorflow.keras.applications import *
import os
from keras import optimizers
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
df = pd.read_csv("../input/nih-dataframe/NIH_Dataframe.csv")
df.img_ind = df.img_ind.apply(lambda x: x.split(".")[0])
display(df.head(4))
print(df.shape)
target_cols = df.drop(["img_ind"], axis=1).columns.to_list()
n_classes = len(target_cols)
img_size = 256
n_epochs = 35
lr = 0.0001
seed = 11
val_split = 0.2
seed = 33
batch_size = 12
n_classes
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("Running on TPU:", tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print(f"Running on {strategy.num_replicas_in_sync} replicas")
return strategy
"""
Reference
https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
def build_decoder(with_labels=True, target_size=(img_size, img_size), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(
path
) # Reads and outputs the entire contents of the input filename.
if ext == "png":
img = tf.image.decode_png(
file_bytes, channels=3
) # Decode a PNG-encoded image to a uint8 or uint16 tensor
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(
file_bytes, channels=3
) # Decode a JPEG-encoded image to a uint8 tensor
else:
raise ValueError("Image extension not supported")
img = tf.cast(
img, tf.float32
) # Casts a tensor to the type float32 and divides by 255.
img = tf.image.resize(img, target_size) # Resizing to target size
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
img = tf.image.random_saturation(img, 0.8, 1.2)
img = tf.image.random_brightness(img, 0.1)
img = tf.image.random_contrast(img, 0.8, 1.2)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(
AUTO
) # overlaps data preprocessing and model execution while training
return dset
DATASET_NAME = "nih-image-600x600-data"
strategy = auto_select_accelerator()
batch_size = strategy.num_replicas_in_sync * batch_size
print("batch size", batch_size)
GCS_DS_PATH = KaggleDatasets().get_gcs_path(DATASET_NAME)
GCS_DS_PATH
paths = GCS_DS_PATH + "/NIH_Images/" + df["img_ind"] + ".jpg"
# #Get the multi-labels
# label_cols = df.columns[:-1]
# labels = df[label_cols].values
def get_label(df, label):
labels = df[label].values
return labels
# Train test split
def t_t_s(df, paths, label):
labels = get_label(df, label)
(train_paths, valid_paths, train_labels, valid_labels) = train_test_split(
paths, labels, test_size=val_split, random_state=11
)
return train_paths, valid_paths, train_labels, valid_labels
# print(train_paths.shape, valid_paths.shape)
# train_labels.sum(axis=0), valid_labels.sum(axis=0)
# ## for Pneumonia
tr_paths, val_paths, tr_labels, val_labels = t_t_s(df, paths, "Cardiomegaly")
# Build the tensorflow datasets
decoder = build_decoder(with_labels=True, target_size=(img_size, img_size))
# Build the tensorflow datasets
dtrain = build_dataset(tr_paths, tr_labels, bsize=batch_size, decode_fn=decoder)
dvalid = build_dataset(
val_paths,
val_labels,
bsize=batch_size,
repeat=False,
shuffle=False,
augment=False,
decode_fn=decoder,
)
data, _ = dtrain.take(2)
images = data[0].numpy()
fig, axes = plt.subplots(3, 4, figsize=(20, 10))
axes = axes.flatten()
for img, ax in zip(images, axes):
img = img / 255.0
ax.imshow(img)
ax.axis("off")
plt.tight_layout()
plt.show()
def build_model():
# seresnet152, _ = Classifiers.get('seresnet152')
# base = seresnet152(input_shape=(img_size, img_size, 3), include_top=False, weights='imagenet')
base = efficientnet.EfficientNetB0(include_top=False, weights="imagenet")
pre = efficientnet.preprocess_input
inp = layers.Input(shape=(img_size, img_size, 3))
x = pre(inp)
x = base(x)
x = layers.GlobalAveragePooling2D()(layers.Dropout(0.16)(x))
x = layers.Dropout(0.3)(x)
x = layers.Dense(1, "sigmoid")(x)
return Model(inp, x)
with strategy.scope():
model = build_model()
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=0.0)
model.compile(
optimizers.Adam(lr=lr),
loss=loss,
metrics=[tf.keras.metrics.AUC(multi_label=True)],
)
model.summary()
name = "NIH_Cardiomegaly_moodel.h5"
rlr = ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
patience=2,
verbose=1,
min_delta=1e-4,
min_lr=1e-6,
mode="min",
cooldown=1,
)
ckp = ModelCheckpoint(
name, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
es = EarlyStopping(
monitor="val_loss",
min_delta=1e-4,
patience=5,
mode="min",
restore_best_weights=True,
verbose=1,
)
steps_per_epoch = tr_paths.shape[0] // batch_size
steps_per_epoch
history = model.fit(
dtrain,
validation_data=dvalid,
epochs=n_epochs,
callbacks=[rlr, es, ckp],
steps_per_epoch=steps_per_epoch,
verbose=1,
)
plt.figure(figsize=(12, 6))
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.plot(history.history["loss"], label="Training Loss", marker="o")
plt.plot(history.history["val_loss"], label="Validation Loss", marker="+")
plt.grid(True)
plt.legend()
plt.show()
plt.figure(figsize=(12, 6))
plt.xlabel("Epochs")
plt.ylabel("AUC")
plt.plot(history.history["auc"], label="Training AUC", marker="o")
plt.plot(history.history["val_auc"], label="Validation AUC", marker="+")
plt.grid(True)
plt.legend()
plt.show()
# tf.keras.backend.clear_session()
# from sklearn.metrics import roc_auc_score
# model= tf.keras.models.load_model(name)
# pred= model.predict(dvalid, verbose=1)
# print('AUC CKECK-UP per CLASS')
# classes= df.columns[:-1]
# for i, n in enumerate(classes):
# print(classes[i])
# print(i, roc_auc_score(valid_labels[:, i], pred[:, i]))
# print('---------')
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543514.ipynb
|
data
| null |
[{"Id": 69543514, "ScriptId": 18990770, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4519204, "CreationDate": "08/01/2021 10:24:40", "VersionNumber": 1.0, "Title": "Fork of NIH X-ray Training Cardiomegaly model", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 273.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 271.0, "LinesInsertedFromFork": 2.0, "LinesDeletedFromFork": 2.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 271.0, "TotalVotes": 0}]
|
[{"Id": 92850459, "KernelVersionId": 69543514, "SourceDatasetVersionId": 18613}, {"Id": 92850462, "KernelVersionId": 69543514, "SourceDatasetVersionId": 1920987}, {"Id": 92850460, "KernelVersionId": 69543514, "SourceDatasetVersionId": 1108231}, {"Id": 92850461, "KernelVersionId": 69543514, "SourceDatasetVersionId": 1920904}]
|
[{"Id": 18613, "DatasetId": 5839, "DatasourceVersionId": 18613, "CreatorUserId": 998023, "LicenseName": "CC0: Public Domain", "CreationDate": "02/21/2018 20:52:23", "VersionNumber": 3.0, "Title": "NIH Chest X-rays", "Slug": "data", "Subtitle": "Over 112,000 Chest X-ray images from more than 30,000 unique patients", "Description": "# NIH Chest X-ray Dataset \n\n---\n\n### National Institutes of Health Chest X-Ray Dataset\n\nChest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.\n\nThis NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: \"ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases.\" (*Wang et al.*)\n\n[Link to paper][30]\n\n[1]: https://openi.nlm.nih.gov/\n\n<br>\n### Data limitations: \n\n1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%. \n2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)\n3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their \u201cupdated\u201d image labels and/or new bounding boxes in their own studied later, maybe through manual annotation\n\n\n<br>\n### File contents\n\n- **Image format**: 112,120 total images with size 1024 x 1024\n\n- **images_001.zip**: Contains 4999 images\n\n- **images_002.zip**: Contains 10,000 images\n\n- **images_003.zip**: Contains 10,000 images\n\n- **images_004.zip**: Contains 10,000 images\n\n- **images_005.zip**: Contains 10,000 images\n\n- **images_006.zip**: Contains 10,000 images\n\n- **images_007.zip**: Contains 10,000 images\n\n- **images_008.zip**: Contains 10,000 images\n\n- **images_009.zip**: Contains 10,000 images\n\n- **images_010.zip**: Contains 10,000 images\n\n- **images_011.zip**: Contains 10,000 images\n\n- **images_012.zip**: Contains 7,121 images\n\n- **README_ChestXray.pdf**: Original README file\n\n- **BBox_list_2017.csv**: Bounding box coordinates. *Note: Start at x,y, extend horizontally w pixels, and vertically h pixels*\n - Image Index: File name\n - Finding Label: Disease type (Class label)\n - Bbox x \n - Bbox y\n - Bbox w\n - Bbox h\n\n\n- **Data_entry_2017.csv**: Class labels and patient data for the entire dataset\n - Image Index: File name\n - Finding Labels: Disease type (Class label)\n - Follow-up # \n - Patient ID\n - Patient Age\n - Patient Gender\n - View Position: X-ray orientation\n - OriginalImageWidth\n - OriginalImageHeight\n - OriginalImagePixelSpacing_x\n - OriginalImagePixelSpacing_y\n\n\n<br>\n### Class descriptions\n\nThere are 15 classes (14 diseases, and one for \"No findings\"). Images can be classified as \"No findings\" or one or more disease classes:\n\n- Atelectasis\n- Consolidation\n- Infiltration\n- Pneumothorax\n- Edema\n- Emphysema\n- Fibrosis\n- Effusion\n- Pneumonia\n- Pleural_thickening\n- Cardiomegaly\n- Nodule Mass\n- Hernia\n\n\n<br>\n### Full Dataset Content\n\nThere are 12 zip files in total and range from ~2 gb to 4 gb in size. Additionally, we randomly sampled 5% of these images and created a smaller dataset for use in Kernels. The random sample contains 5606 X-ray images and class labels. \n\n- [Sample][9]: sample.zip\n\n[9]: https://www.kaggle.com/nih-chest-xrays/sample\n\n\n\n<br>\n### Modifications to original data\n\n- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform\n\n- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory\n\n\n<br>\n### Citations\n\n- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]\n\n- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]\n\n- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]\n\n<br>\n### Acknowledgements\n\nThis work was supported by the Intramural Research Program of the NClinical Center (clinicalcenter.nih.gov) and National Library of Medicine (www.nlm.nih.gov). \n\n\n [30]: https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community\n\n [31]: https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345", "VersionNotes": "Add updated material", "TotalCompressedBytes": 45087244360.0, "TotalUncompressedBytes": 45087244360.0}]
|
[{"Id": 5839, "CreatorUserId": 998023, "OwnerUserId": NaN, "OwnerOrganizationId": 1146.0, "CurrentDatasetVersionId": 18613.0, "CurrentDatasourceVersionId": 18613.0, "ForumId": 12132, "Type": 2, "CreationDate": "12/01/2017 19:19:36", "LastActivityDate": "02/06/2018", "TotalViews": 527492, "TotalDownloads": 73912, "TotalVotes": 1069, "TotalKernels": 393}]
| null |
# !pip install git+https://github.com/keras-team/keras-applications.git -q
# import keras_applications as ka
# def set_to_tf(ka):
# from tensorflow.keras import backend, layers, models, utils
# ka._KERAS_BACKEND = backend
# ka._KERAS_LAYERS = layers
# ka._KERAS_MODELS = models
# ka._KERAS_UTILS = utils
# set_to_tf(ka)
# !pip install /kaggle/input/keras-pretrained-imagenet-weights/image_classifiers-1.0.0-py3-none-any.whl
# from classification_models.tfkeras import Classifiers
# Classifiers.models_names()
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import warnings
warnings.filterwarnings("ignore")
# ML tools
import tensorflow as tf
from kaggle_datasets import KaggleDatasets
from keras.models import Sequential
from keras import layers
from keras.optimizers import Adam
from tensorflow.keras import Model
# import tensorflow.keras.applications.efficientnet as efn
from tensorflow.keras.applications import *
import os
from keras import optimizers
from sklearn.model_selection import train_test_split
from tensorflow.keras.callbacks import ReduceLROnPlateau, ModelCheckpoint, EarlyStopping
df = pd.read_csv("../input/nih-dataframe/NIH_Dataframe.csv")
df.img_ind = df.img_ind.apply(lambda x: x.split(".")[0])
display(df.head(4))
print(df.shape)
target_cols = df.drop(["img_ind"], axis=1).columns.to_list()
n_classes = len(target_cols)
img_size = 256
n_epochs = 35
lr = 0.0001
seed = 11
val_split = 0.2
seed = 33
batch_size = 12
n_classes
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("Running on TPU:", tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print(f"Running on {strategy.num_replicas_in_sync} replicas")
return strategy
"""
Reference
https://www.kaggle.com/xhlulu/ranzcr-efficientnet-tpu-training
"""
def build_decoder(with_labels=True, target_size=(img_size, img_size), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(
path
) # Reads and outputs the entire contents of the input filename.
if ext == "png":
img = tf.image.decode_png(
file_bytes, channels=3
) # Decode a PNG-encoded image to a uint8 or uint16 tensor
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(
file_bytes, channels=3
) # Decode a JPEG-encoded image to a uint8 tensor
else:
raise ValueError("Image extension not supported")
img = tf.cast(
img, tf.float32
) # Casts a tensor to the type float32 and divides by 255.
img = tf.image.resize(img, target_size) # Resizing to target size
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
img = tf.image.random_saturation(img, 0.8, 1.2)
img = tf.image.random_brightness(img, 0.1)
img = tf.image.random_contrast(img, 0.8, 1.2)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(
AUTO
) # overlaps data preprocessing and model execution while training
return dset
DATASET_NAME = "nih-image-600x600-data"
strategy = auto_select_accelerator()
batch_size = strategy.num_replicas_in_sync * batch_size
print("batch size", batch_size)
GCS_DS_PATH = KaggleDatasets().get_gcs_path(DATASET_NAME)
GCS_DS_PATH
paths = GCS_DS_PATH + "/NIH_Images/" + df["img_ind"] + ".jpg"
# #Get the multi-labels
# label_cols = df.columns[:-1]
# labels = df[label_cols].values
def get_label(df, label):
labels = df[label].values
return labels
# Train test split
def t_t_s(df, paths, label):
labels = get_label(df, label)
(train_paths, valid_paths, train_labels, valid_labels) = train_test_split(
paths, labels, test_size=val_split, random_state=11
)
return train_paths, valid_paths, train_labels, valid_labels
# print(train_paths.shape, valid_paths.shape)
# train_labels.sum(axis=0), valid_labels.sum(axis=0)
# ## for Pneumonia
tr_paths, val_paths, tr_labels, val_labels = t_t_s(df, paths, "Cardiomegaly")
# Build the tensorflow datasets
decoder = build_decoder(with_labels=True, target_size=(img_size, img_size))
# Build the tensorflow datasets
dtrain = build_dataset(tr_paths, tr_labels, bsize=batch_size, decode_fn=decoder)
dvalid = build_dataset(
val_paths,
val_labels,
bsize=batch_size,
repeat=False,
shuffle=False,
augment=False,
decode_fn=decoder,
)
data, _ = dtrain.take(2)
images = data[0].numpy()
fig, axes = plt.subplots(3, 4, figsize=(20, 10))
axes = axes.flatten()
for img, ax in zip(images, axes):
img = img / 255.0
ax.imshow(img)
ax.axis("off")
plt.tight_layout()
plt.show()
def build_model():
# seresnet152, _ = Classifiers.get('seresnet152')
# base = seresnet152(input_shape=(img_size, img_size, 3), include_top=False, weights='imagenet')
base = efficientnet.EfficientNetB0(include_top=False, weights="imagenet")
pre = efficientnet.preprocess_input
inp = layers.Input(shape=(img_size, img_size, 3))
x = pre(inp)
x = base(x)
x = layers.GlobalAveragePooling2D()(layers.Dropout(0.16)(x))
x = layers.Dropout(0.3)(x)
x = layers.Dense(1, "sigmoid")(x)
return Model(inp, x)
with strategy.scope():
model = build_model()
loss = tf.keras.losses.BinaryCrossentropy(label_smoothing=0.0)
model.compile(
optimizers.Adam(lr=lr),
loss=loss,
metrics=[tf.keras.metrics.AUC(multi_label=True)],
)
model.summary()
name = "NIH_Cardiomegaly_moodel.h5"
rlr = ReduceLROnPlateau(
monitor="val_loss",
factor=0.1,
patience=2,
verbose=1,
min_delta=1e-4,
min_lr=1e-6,
mode="min",
cooldown=1,
)
ckp = ModelCheckpoint(
name, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
es = EarlyStopping(
monitor="val_loss",
min_delta=1e-4,
patience=5,
mode="min",
restore_best_weights=True,
verbose=1,
)
steps_per_epoch = tr_paths.shape[0] // batch_size
steps_per_epoch
history = model.fit(
dtrain,
validation_data=dvalid,
epochs=n_epochs,
callbacks=[rlr, es, ckp],
steps_per_epoch=steps_per_epoch,
verbose=1,
)
plt.figure(figsize=(12, 6))
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.plot(history.history["loss"], label="Training Loss", marker="o")
plt.plot(history.history["val_loss"], label="Validation Loss", marker="+")
plt.grid(True)
plt.legend()
plt.show()
plt.figure(figsize=(12, 6))
plt.xlabel("Epochs")
plt.ylabel("AUC")
plt.plot(history.history["auc"], label="Training AUC", marker="o")
plt.plot(history.history["val_auc"], label="Validation AUC", marker="+")
plt.grid(True)
plt.legend()
plt.show()
# tf.keras.backend.clear_session()
# from sklearn.metrics import roc_auc_score
# model= tf.keras.models.load_model(name)
# pred= model.predict(dvalid, verbose=1)
# print('AUC CKECK-UP per CLASS')
# classes= df.columns[:-1]
# for i, n in enumerate(classes):
# print(classes[i])
# print(i, roc_auc_score(valid_labels[:, i], pred[:, i]))
# print('---------')
| false | 0 | 2,749 | 0 | 4,251 | 2,749 |
||
69543207
|
<jupyter_start><jupyter_text>English Word Frequency
### Context:
How frequently a word occurs in a language is an important piece of information for natural language processing and linguists. In natural language processing, very frequent words tend to be less informative than less frequent one and are often removed during preprocessing. Human language users are also sensitive to word frequency. How often a word is used affects language processing in humans. For example, [very frequent words are read and understood more quickly](http://econtent.hogrefe.com/doi/abs/10.1027/1618-3169/a000123?journalCode=zea) and can be [understood more easily in background noise](http://asa.scitation.org/doi/abs/10.1121/1.1918432).
### Content:
This dataset contains the counts of the 333,333 most commonly-used single words on the English language web, as derived from the Google Web Trillion Word Corpus.
Kaggle dataset identifier: english-word-frequency
<jupyter_code>import pandas as pd
df = pd.read_csv('english-word-frequency/unigram_freq.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 333333 entries, 0 to 333332
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 word 333331 non-null object
1 count 333333 non-null int64
dtypes: int64(1), object(1)
memory usage: 5.1+ MB
<jupyter_text>Examples:
{
"word": "the",
"count": 23135851162
}
{
"word": "of",
"count": 13151942776
}
{
"word": "and",
"count": 12997637966
}
{
"word": "to",
"count": 12136980858
}
<jupyter_script>import pandas as pd
import re
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
df = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
df = df.fillna("Missing")
# df.target = df.target.apply(lambda x: np.floor(x))
sns.boxplot(x=df.target)
plt.show()
# df.target = df.target.apply(lambda x: -3 if (x==-4) else x)
# df.target = df.target.apply(lambda x: 0 if (x==1) else x)
# sns.boxplot(x=df.target)
# plt.show()
df["ex_len"] = df.excerpt.apply(lambda x: len(x))
sns.histplot(x=df.ex_len)
plt.show()
df
import nltk
from nltk.corpus import stopwords
nltk.download("wordnet")
nltk.download("punkt")
nltk.download("stopwords")
def cleaner(excerpt):
clean = nltk.word_tokenize(re.sub("[^a-zA-Z]", " ", excerpt).lower())
clean = [word for word in clean if not word in set(stopwords.words("english"))]
lem = nltk.WordNetLemmatizer()
clean = [lem.lemmatize(word) for word in clean]
return " ".join(clean)
df.excerpt = df.excerpt.apply(cleaner)
wdf = pd.read_csv("../input/english-word-frequency/unigram_freq.csv")
wdf["ncol"] = wdf.word.apply(
lambda x: True if (x not in set(stopwords.words("english"))) else False
)
nwdf = wdf[wdf.ncol == True]
lem1 = nltk.WordNetLemmatizer()
nwdf["lword"] = nwdf.word.apply(lambda x: lem1.lemmatize(str(x)))
nwdf = nwdf.sort_values("count")
def change_scale_word_count(old_value):
return ((old_value - 12711) / (1551258643 - 12711)) * (1 - 0) + 0
nwdf["scaled_count"] = nwdf["count"] # .apply(change_scale_word_count)
word_freq = dict(zip(nwdf.word, nwdf.scaled_count))
def get_score(excerpt):
score = 0
for i in excerpt.split(" "):
try:
score += word_freq[i]
except KeyError:
pass
return score
nwdf
df["excerpt_score"] = df.excerpt.apply(get_score)
arc_df = df.copy()
df = arc_df.copy()
df
from sklearn.preprocessing import MinMaxScaler
mms1 = MinMaxScaler()
mms2 = MinMaxScaler()
mms3 = MinMaxScaler()
df.target = mms1.fit_transform(np.reshape(list(df.target), (-1, 1)))
df.excerpt_score = mms2.fit_transform(np.reshape(list(df.excerpt_score), (-1, 1)))
df.ex_len = mms3.fit_transform(np.reshape(list(df.ex_len), (-1, 1)))
y = df["target"]
df.drop(
["id", "url_legal", "license", "target", "standard_error"], axis=1, inplace=True
)
df
from sklearn.feature_extraction.text import TfidfVectorizer
name_vectorizer = TfidfVectorizer()
names_encoded = name_vectorizer.fit_transform(df.excerpt)
print(names_encoded.shape)
names_df = pd.DataFrame(
data=names_encoded.toarray(), columns=name_vectorizer.get_feature_names()
)
df = pd.concat([df, names_df], axis=1)
df.drop(["excerpt"], axis=1, inplace=True)
X = df
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
from sklearn.metrics import mean_squared_error as mse
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse(y_test, y_pred)
# from sklearn.svm import SVR
# model = SVR()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.0689411833459731
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse(y_test, y_pred)
# 0.05142008547008548
# from sklearn.ensemble import GradientBoostingRegressor
# model = GradientBoostingRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.051231989788161326
# from xgboost import XGBRegressor
# model = XGBRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.05074928177769481
# ### Test Data
tdf = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
tdf = tdf.fillna("Missing")
tdf["ex_len"] = tdf.excerpt.apply(lambda x: len(x))
tdf.excerpt = tdf.excerpt.apply(cleaner)
tdf["excerpt_score"] = tdf.excerpt.apply(get_score)
tdf.drop(["id", "url_legal", "license"], axis=1, inplace=True)
tdf
names_encoded = name_vectorizer.transform(tdf.excerpt)
print(names_encoded.shape)
names_df = pd.DataFrame(
data=names_encoded.toarray(), columns=name_vectorizer.get_feature_names()
)
tdf = pd.concat([tdf, names_df], axis=1)
tdf.drop(["excerpt"], axis=1, inplace=True)
tdf.excerpt_score = mms2.transform(np.reshape(list(tdf.excerpt_score), (-1, 1)))
tdf.ex_len = mms3.transform(np.reshape(list(tdf.ex_len), (-1, 1)))
ypred = model.predict(tdf)
ypred
nypred = mms1.inverse_transform(np.reshape(list(ypred), (-1, 1)))[:, 0]
nypred
submission = pd.DataFrame(
{
"id": pd.read_csv("../input/commonlitreadabilityprize/test.csv")["id"],
"target": list(nypred),
}
)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/543/69543207.ipynb
|
english-word-frequency
|
rtatman
|
[{"Id": 69543207, "ScriptId": 18987782, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3216886, "CreationDate": "08/01/2021 10:19:27", "VersionNumber": 6.0, "Title": "Commonlit 4", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 188.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 177.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92849866, "KernelVersionId": 69543207, "SourceDatasetVersionId": 3976}]
|
[{"Id": 3976, "DatasetId": 2367, "DatasourceVersionId": 3976, "CreatorUserId": 1162990, "LicenseName": "Other (specified in description)", "CreationDate": "09/06/2017 18:21:27", "VersionNumber": 1.0, "Title": "English Word Frequency", "Slug": "english-word-frequency", "Subtitle": "\u2153 Million Most Frequent English Words on the Web", "Description": "### Context: \n\nHow frequently a word occurs in a language is an important piece of information for natural language processing and linguists. In natural language processing, very frequent words tend to be less informative than less frequent one and are often removed during preprocessing. Human language users are also sensitive to word frequency. How often a word is used affects language processing in humans. For example, [very frequent words are read and understood more quickly](http://econtent.hogrefe.com/doi/abs/10.1027/1618-3169/a000123?journalCode=zea) and can be [understood more easily in background noise](http://asa.scitation.org/doi/abs/10.1121/1.1918432).\n\n### Content: \n\nThis dataset contains the counts of the 333,333 most commonly-used single words on the English language web, as derived from the Google Web Trillion Word Corpus.\n\n### Acknowledgements: \n\nData files were derived from the Google Web Trillion Word Corpus (as [described](https://research.googleblog.com/2006/08/all-our-n-gram-are-belong-to-you.html) by Thorsten Brants and Alex Franz, and [distributed](https://catalog.ldc.upenn.edu/LDC2006T13) by the Linguistic Data Consortium) by Peter Norvig. You can find more information on these files and the code used to generate them [here](http://norvig.com/ngrams/).\n\nThe code used to generate this dataset is distributed under the [MIT License](https://en.wikipedia.org/wiki/MIT_License). \n\n### Inspiration:\n\n* Can you tag the part of speech of these words? Which parts of speech are most frequent? Is this similar to other languages, like [Japanese](https://www.kaggle.com/rtatman/japanese-lemma-frequency)?\n* What differences are there between the very frequent words in this dataset, and the the frequent words in other corpora, such as the [Brown Corpus](https://www.kaggle.com/nltkdata/brown-corpus) or the [TIMIT corpus](https://www.kaggle.com/nltkdata/timitcorpus)? What might these differences tell us about how language is used?", "VersionNotes": "Initial release", "TotalCompressedBytes": 4956252.0, "TotalUncompressedBytes": 4956252.0}]
|
[{"Id": 2367, "CreatorUserId": 1162990, "OwnerUserId": 1162990.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3976.0, "CurrentDatasourceVersionId": 3976.0, "ForumId": 6340, "Type": 2, "CreationDate": "09/06/2017 18:21:27", "LastActivityDate": "02/04/2018", "TotalViews": 167501, "TotalDownloads": 18219, "TotalVotes": 350, "TotalKernels": 44}]
|
[{"Id": 1162990, "UserName": "rtatman", "DisplayName": "Rachael Tatman", "RegisterDate": "07/10/2017", "PerformanceTier": 4}]
|
import pandas as pd
import re
import seaborn as sns
from matplotlib import pyplot as plt
import numpy as np
df = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
df = df.fillna("Missing")
# df.target = df.target.apply(lambda x: np.floor(x))
sns.boxplot(x=df.target)
plt.show()
# df.target = df.target.apply(lambda x: -3 if (x==-4) else x)
# df.target = df.target.apply(lambda x: 0 if (x==1) else x)
# sns.boxplot(x=df.target)
# plt.show()
df["ex_len"] = df.excerpt.apply(lambda x: len(x))
sns.histplot(x=df.ex_len)
plt.show()
df
import nltk
from nltk.corpus import stopwords
nltk.download("wordnet")
nltk.download("punkt")
nltk.download("stopwords")
def cleaner(excerpt):
clean = nltk.word_tokenize(re.sub("[^a-zA-Z]", " ", excerpt).lower())
clean = [word for word in clean if not word in set(stopwords.words("english"))]
lem = nltk.WordNetLemmatizer()
clean = [lem.lemmatize(word) for word in clean]
return " ".join(clean)
df.excerpt = df.excerpt.apply(cleaner)
wdf = pd.read_csv("../input/english-word-frequency/unigram_freq.csv")
wdf["ncol"] = wdf.word.apply(
lambda x: True if (x not in set(stopwords.words("english"))) else False
)
nwdf = wdf[wdf.ncol == True]
lem1 = nltk.WordNetLemmatizer()
nwdf["lword"] = nwdf.word.apply(lambda x: lem1.lemmatize(str(x)))
nwdf = nwdf.sort_values("count")
def change_scale_word_count(old_value):
return ((old_value - 12711) / (1551258643 - 12711)) * (1 - 0) + 0
nwdf["scaled_count"] = nwdf["count"] # .apply(change_scale_word_count)
word_freq = dict(zip(nwdf.word, nwdf.scaled_count))
def get_score(excerpt):
score = 0
for i in excerpt.split(" "):
try:
score += word_freq[i]
except KeyError:
pass
return score
nwdf
df["excerpt_score"] = df.excerpt.apply(get_score)
arc_df = df.copy()
df = arc_df.copy()
df
from sklearn.preprocessing import MinMaxScaler
mms1 = MinMaxScaler()
mms2 = MinMaxScaler()
mms3 = MinMaxScaler()
df.target = mms1.fit_transform(np.reshape(list(df.target), (-1, 1)))
df.excerpt_score = mms2.fit_transform(np.reshape(list(df.excerpt_score), (-1, 1)))
df.ex_len = mms3.fit_transform(np.reshape(list(df.ex_len), (-1, 1)))
y = df["target"]
df.drop(
["id", "url_legal", "license", "target", "standard_error"], axis=1, inplace=True
)
df
from sklearn.feature_extraction.text import TfidfVectorizer
name_vectorizer = TfidfVectorizer()
names_encoded = name_vectorizer.fit_transform(df.excerpt)
print(names_encoded.shape)
names_df = pd.DataFrame(
data=names_encoded.toarray(), columns=name_vectorizer.get_feature_names()
)
df = pd.concat([df, names_df], axis=1)
df.drop(["excerpt"], axis=1, inplace=True)
X = df
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.33, random_state=42
)
from sklearn.metrics import mean_squared_error as mse
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse(y_test, y_pred)
# from sklearn.svm import SVR
# model = SVR()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.0689411833459731
from sklearn.ensemble import RandomForestRegressor
model = RandomForestRegressor()
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
mse(y_test, y_pred)
# 0.05142008547008548
# from sklearn.ensemble import GradientBoostingRegressor
# model = GradientBoostingRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.051231989788161326
# from xgboost import XGBRegressor
# model = XGBRegressor()
# model.fit(X_train, y_train)
# y_pred = model.predict(X_test)
# mse(y_test, y_pred)
# # 0.05074928177769481
# ### Test Data
tdf = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
tdf = tdf.fillna("Missing")
tdf["ex_len"] = tdf.excerpt.apply(lambda x: len(x))
tdf.excerpt = tdf.excerpt.apply(cleaner)
tdf["excerpt_score"] = tdf.excerpt.apply(get_score)
tdf.drop(["id", "url_legal", "license"], axis=1, inplace=True)
tdf
names_encoded = name_vectorizer.transform(tdf.excerpt)
print(names_encoded.shape)
names_df = pd.DataFrame(
data=names_encoded.toarray(), columns=name_vectorizer.get_feature_names()
)
tdf = pd.concat([tdf, names_df], axis=1)
tdf.drop(["excerpt"], axis=1, inplace=True)
tdf.excerpt_score = mms2.transform(np.reshape(list(tdf.excerpt_score), (-1, 1)))
tdf.ex_len = mms3.transform(np.reshape(list(tdf.ex_len), (-1, 1)))
ypred = model.predict(tdf)
ypred
nypred = mms1.inverse_transform(np.reshape(list(ypred), (-1, 1)))[:, 0]
nypred
submission = pd.DataFrame(
{
"id": pd.read_csv("../input/commonlitreadabilityprize/test.csv")["id"],
"target": list(nypred),
}
)
submission.to_csv("submission.csv", index=False)
|
[{"english-word-frequency/unigram_freq.csv": {"column_names": "[\"word\", \"count\"]", "column_data_types": "{\"word\": \"object\", \"count\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 333333 entries, 0 to 333332\nData columns (total 2 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 word 333331 non-null object\n 1 count 333333 non-null int64 \ndtypes: int64(1), object(1)\nmemory usage: 5.1+ MB\n", "summary": "{\"count\": {\"count\": 333333.0, \"mean\": 1764374.424935425, \"std\": 66299870.42773988, \"min\": 12711.0, \"25%\": 21224.0, \"50%\": 41519.0, \"75%\": 136576.0, \"max\": 23135851162.0}}", "examples": "{\"word\":{\"0\":\"the\",\"1\":\"of\",\"2\":\"and\",\"3\":\"to\"},\"count\":{\"0\":23135851162,\"1\":13151942776,\"2\":12997637966,\"3\":12136980858}}"}}]
| true | 3 |
<start_data_description><data_path>english-word-frequency/unigram_freq.csv:
<column_names>
['word', 'count']
<column_types>
{'word': 'object', 'count': 'int64'}
<dataframe_Summary>
{'count': {'count': 333333.0, 'mean': 1764374.424935425, 'std': 66299870.42773988, 'min': 12711.0, '25%': 21224.0, '50%': 41519.0, '75%': 136576.0, 'max': 23135851162.0}}
<dataframe_info>
RangeIndex: 333333 entries, 0 to 333332
Data columns (total 2 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 word 333331 non-null object
1 count 333333 non-null int64
dtypes: int64(1), object(1)
memory usage: 5.1+ MB
<some_examples>
{'word': {'0': 'the', '1': 'of', '2': 'and', '3': 'to'}, 'count': {'0': 23135851162, '1': 13151942776, '2': 12997637966, '3': 12136980858}}
<end_description>
| 1,830 | 0 | 2,350 | 1,830 |
69534393
|
<jupyter_start><jupyter_text>US Accidents (2016 - 2023)
### Description
This is a countrywide car accident dataset, which covers __49 states of the USA__. The accident data are collected from __February 2016 to Dec 2020__, using multiple APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about __3 million__ accident records in this dataset. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset.
Kaggle dataset identifier: us-accidents
<jupyter_script>import pandas as pd
import numpy as np
df = pd.read_csv("../input/us-accidents/US_Accidents_Dec20_Updated.csv")
df.head()
df.shape
df.columns
df.info()
df.describe()
# # Questions
# 1. Are there more accidents in warmer or colder areas?
# 2. Which states ahs higher number of accidents/per capita accidents?
# #
# Check how many numeric columns
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
numeric_df = df.select_dtypes(include=numerics)
len(numeric_df.columns)
# # Percentage of missing values per column
# Missing values
missing_percentages = df.isna().sum().sort_values(ascending=False) / len(df)
missing_percentages
# Taking only non zero values and using zero value columns as index
missing_percentages[missing_percentages != 0]
missing_percentages[missing_percentages != 0].plot(kind="barh")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534393.ipynb
|
us-accidents
|
sobhanmoosavi
|
[{"Id": 69534393, "ScriptId": 18987624, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4389319, "CreationDate": "08/01/2021 08:09:10", "VersionNumber": 1.0, "Title": "US accidents", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 41.0, "LinesInsertedFromPrevious": 41.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92830572, "KernelVersionId": 69534393, "SourceDatasetVersionId": 2185555}]
|
[{"Id": 2185555, "DatasetId": 199387, "DatasourceVersionId": 2226970, "CreatorUserId": 348067, "LicenseName": "CC BY-NC-SA 4.0", "CreationDate": "05/02/2021 21:25:23", "VersionNumber": 9.0, "Title": "US Accidents (2016 - 2023)", "Slug": "us-accidents", "Subtitle": "A Countrywide Traffic Accident Dataset (2016 - 2023)", "Description": "### Description\nThis is a countrywide car accident dataset, which covers __49 states of the USA__. The accident data are collected from __February 2016 to Dec 2020__, using multiple APIs that provide streaming traffic incident (or event) data. These APIs broadcast traffic data captured by a variety of entities, such as the US and state departments of transportation, law enforcement agencies, traffic cameras, and traffic sensors within the road-networks. Currently, there are about __3 million__ accident records in this dataset. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset. \n\n### Acknowledgements\nPlease cite the following papers if you use this dataset: \n\n- Moosavi, Sobhan, Mohammad Hossein Samavatian, Srinivasan Parthasarathy, and Rajiv Ramnath. \u201c[A Countrywide Traffic Accident Dataset](https://arxiv.org/abs/1906.05409).\u201d, 2019.\n\n- Moosavi, Sobhan, Mohammad Hossein Samavatian, Srinivasan Parthasarathy, Radu Teodorescu, and Rajiv Ramnath. [\"Accident Risk Prediction based on Heterogeneous Sparse Data: New Dataset and Insights.\"](https://arxiv.org/abs/1909.09638) In proceedings of the 27th ACM SIGSPATIAL International Conference on Advances in Geographic Information Systems, ACM, 2019. \n\n### Content\nThis dataset has been collected in real-time, using multiple Traffic APIs. Currently, it contains accident data that are collected from February 2016 to Dec 2020 for the Contiguous United States. Check [here](https://smoosavi.org/datasets/us_accidents) to learn more about this dataset. \n\n### Inspiration\nUS-Accidents can be used for numerous applications such as real-time car accident prediction, studying car accidents hotspot locations, casualty analysis and extracting cause and effect rules to predict car accidents, and studying the impact of precipitation or other environmental stimuli on accident occurrence. The most recent release of the dataset can also be useful to study the impact of COVID-19 on traffic behavior and accidents. \n\n### Usage Policy and Legal Disclaimer\nThis dataset is being distributed only for __Research__ purposes, under Creative Commons Attribution-Noncommercial-ShareAlike license (CC BY-NC-SA 4.0). By clicking on download button(s) below, you are agreeing to use this data only for non-commercial, research, or academic applications. You may need to cite the above papers if you use this dataset.\n\n### Data Removal (Updated Dataset)\nPlease note that we removed a portion of the data due to a request from one of the main traffic data providers.", "VersionNotes": "Update, data from 2016 to 2020 (removed data from one of the sources)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 199387, "CreatorUserId": 348067, "OwnerUserId": 348067.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5793796.0, "CurrentDatasourceVersionId": 5870478.0, "ForumId": 210356, "Type": 2, "CreationDate": "05/20/2019 23:26:06", "LastActivityDate": "05/20/2019", "TotalViews": 697710, "TotalDownloads": 94299, "TotalVotes": 1910, "TotalKernels": 330}]
|
[{"Id": 348067, "UserName": "sobhanmoosavi", "DisplayName": "Sobhan Moosavi", "RegisterDate": "05/06/2015", "PerformanceTier": 2}]
|
import pandas as pd
import numpy as np
df = pd.read_csv("../input/us-accidents/US_Accidents_Dec20_Updated.csv")
df.head()
df.shape
df.columns
df.info()
df.describe()
# # Questions
# 1. Are there more accidents in warmer or colder areas?
# 2. Which states ahs higher number of accidents/per capita accidents?
# #
# Check how many numeric columns
numerics = ["int16", "int32", "int64", "float16", "float32", "float64"]
numeric_df = df.select_dtypes(include=numerics)
len(numeric_df.columns)
# # Percentage of missing values per column
# Missing values
missing_percentages = df.isna().sum().sort_values(ascending=False) / len(df)
missing_percentages
# Taking only non zero values and using zero value columns as index
missing_percentages[missing_percentages != 0]
missing_percentages[missing_percentages != 0].plot(kind="barh")
| false | 1 | 272 | 0 | 447 | 272 |
||
69534110
|
<jupyter_start><jupyter_text>COVID-19 World Vaccination Progress
### Context
Data is collected daily from [**Our World in Data**](https://ourworldindata.org/) GitHub repository for [covid-19](https://github.com/owid/covid-19-data), merged and uploaded. Country level vaccination data is gathered and assembled in one single file. Then, this data file is merged with locations data file to include vaccination sources information. A second file, with manufacturers information, is included.
### Content
The data (country vaccinations) contains the following information:
* **Country**- this is the country for which the vaccination information is provided;
* **Country ISO Code** - ISO code for the country;
* **Date** - date for the data entry; for some of the dates we have only the daily vaccinations, for others, only the (cumulative) total;
* **Total number of vaccinations** - this is the absolute number of total immunizations in the country;
* **Total number of people vaccinated** - a person, depending on the immunization scheme, will receive one or more (typically 2) vaccines; at a certain moment, the number of vaccination might be larger than the number of people;
* **Total number of people fully vaccinated** - this is the number of people that received the entire set of immunization according to the immunization scheme (typically 2); at a certain moment in time, there might be a certain number of people that received one vaccine and another number (smaller) of people that received all vaccines in the scheme;
* **Daily vaccinations (raw)** - for a certain data entry, the number of vaccination for that date/country;
* **Daily vaccinations** - for a certain data entry, the number of vaccination for that date/country;
* **Total vaccinations per hundred** - ratio (in percent) between vaccination number and total population up to the date in the country;
* **Total number of people vaccinated per hundred** - ratio (in percent) between population immunized and total population up to the date in the country;
* **Total number of people fully vaccinated per hundred** - ratio (in percent) between population fully immunized and total population up to the date in the country;
* **Number of vaccinations per day** - number of daily vaccination for that day and country;
* **Daily vaccinations per million** - ratio (in ppm) between vaccination number and total population for the current date in the country;
* **Vaccines used in the country** - total number of vaccines used in the country (up to date);
* **Source name** - source of the information (national authority, international organization, local organization etc.);
* **Source website** - website of the source of information;
There is a second file added recently (country vaccinations by manufacturer), with the following columns:
* **Location** - country;
* **Date** - date;
* **Vaccine** - vaccine type;
* **Total number of vaccinations** - total number of vaccinations / current time and vaccine type.
Kaggle dataset identifier: covid-world-vaccination-progress
<jupyter_code>import pandas as pd
df = pd.read_csv('covid-world-vaccination-progress/country_vaccinations.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 86512 entries, 0 to 86511
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 country 86512 non-null object
1 iso_code 86512 non-null object
2 date 86512 non-null object
3 total_vaccinations 43607 non-null float64
4 people_vaccinated 41294 non-null float64
5 people_fully_vaccinated 38802 non-null float64
6 daily_vaccinations_raw 35362 non-null float64
7 daily_vaccinations 86213 non-null float64
8 total_vaccinations_per_hundred 43607 non-null float64
9 people_vaccinated_per_hundred 41294 non-null float64
10 people_fully_vaccinated_per_hundred 38802 non-null float64
11 daily_vaccinations_per_million 86213 non-null float64
12 vaccines 86512 non-null object
13 source_name 86512 non-null object
14 source_website 86512 non-null object
dtypes: float64(9), object(6)
memory usage: 9.9+ MB
<jupyter_text>Examples:
{
"country": "Afghanistan",
"iso_code": "AFG",
"date": "2021-02-22 00:00:00",
"total_vaccinations": 0.0,
"people_vaccinated": 0.0,
"people_fully_vaccinated": NaN,
"daily_vaccinations_raw": NaN,
"daily_vaccinations": NaN,
"total_vaccinations_per_hundred": 0.0,
"people_vaccinated_per_hundred": 0.0,
"people_fully_vaccinated_per_hundred": NaN,
"daily_vaccinations_per_million": NaN,
"vaccines": "Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing",
"source_name": "World Health Organization",
"source_website": "https://covid19.who.int/"
}
{
"country": "Afghanistan",
"iso_code": "AFG",
"date": "2021-02-23 00:00:00",
"total_vaccinations": NaN,
"people_vaccinated": NaN,
"people_fully_vaccinated": NaN,
"daily_vaccinations_raw": NaN,
"daily_vaccinations": 1367.0,
"total_vaccinations_per_hundred": NaN,
"people_vaccinated_per_hundred": NaN,
"people_fully_vaccinated_per_hundred": NaN,
"daily_vaccinations_per_million": 34.0,
"vaccines": "Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing",
"source_name": "World Health Organization",
"source_website": "https://covid19.who.int/"
}
{
"country": "Afghanistan",
"iso_code": "AFG",
"date": "2021-02-24 00:00:00",
"total_vaccinations": NaN,
"people_vaccinated": NaN,
"people_fully_vaccinated": NaN,
"daily_vaccinations_raw": NaN,
"daily_vaccinations": 1367.0,
"total_vaccinations_per_hundred": NaN,
"people_vaccinated_per_hundred": NaN,
"people_fully_vaccinated_per_hundred": NaN,
"daily_vaccinations_per_million": 34.0,
"vaccines": "Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing",
"source_name": "World Health Organization",
"source_website": "https://covid19.who.int/"
}
{
"country": "Afghanistan",
"iso_code": "AFG",
"date": "2021-02-25 00:00:00",
"total_vaccinations": NaN,
"people_vaccinated": NaN,
"people_fully_vaccinated": NaN,
"daily_vaccinations_raw": NaN,
"daily_vaccinations": 1367.0,
"total_vaccinations_per_hundred": NaN,
"people_vaccinated_per_hundred": NaN,
"people_fully_vaccinated_per_hundred": NaN,
"daily_vaccinations_per_million": 34.0,
"vaccines": "Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing",
"source_name": "World Health Organization",
"source_website": "https://covid19.who.int/"
}
<jupyter_code>import pandas as pd
df = pd.read_csv('covid-world-vaccination-progress/country_vaccinations_by_manufacturer.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 35623 entries, 0 to 35622
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 location 35623 non-null object
1 date 35623 non-null object
2 vaccine 35623 non-null object
3 total_vaccinations 35623 non-null int64
dtypes: int64(1), object(3)
memory usage: 1.1+ MB
<jupyter_text>Examples:
{
"location": "Argentina",
"date": "2020-12-29 00:00:00",
"vaccine": "Moderna",
"total_vaccinations": 2
}
{
"location": "Argentina",
"date": "2020-12-29 00:00:00",
"vaccine": "Oxford/AstraZeneca",
"total_vaccinations": 3
}
{
"location": "Argentina",
"date": "2020-12-29 00:00:00",
"vaccine": "Sinopharm/Beijing",
"total_vaccinations": 1
}
{
"location": "Argentina",
"date": "2020-12-29 00:00:00",
"vaccine": "Sputnik V",
"total_vaccinations": 20481
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # Visualization
import matplotlib.pyplot as plt # Visualization
from colorama import Fore
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math
import warnings # Supress warnings
warnings.filterwarnings("ignore")
np.random.seed(7)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("../input/covid-world-vaccination-progress/country_vaccinations.csv")
df.head()
from datetime import datetime, date
df["date"] = pd.to_datetime(df["date"], format="%Y/%m/%d")
df.head().style.set_properties(subset=["date"], **{"background-color": "dodgerblue"})
df = pd.read_csv(
"../input/covid-world-vaccination-progress/country_vaccinations_by_manufacturer.csv"
)
df.head()
from datetime import datetime, date
df["date"] = pd.to_datetime(df["date"], format="%Y/%m/%d")
df.head().style.set_properties(subset=["date"], **{"background-color": "dodgerblue"})
# To compelte the data, as naive method, we will use ffill
f, ax = plt.subplots(nrows=3, ncols=1, figsize=(15, 25))
for i, column in enumerate(df.drop("date", axis=1).columns):
sns.lineplot(
x=df["date"], y=df[column].fillna(method="ffill"), ax=ax[i], color="dodgerblue"
)
ax[i].set_title("Feature: {}".format(column), fontsize=14)
ax[i].set_ylabel(ylabel=column, fontsize=14)
# # Forecasting
from sklearn.model_selection import TimeSeriesSplit
N_SPLITS = 3
X = df["date"]
y = df["total_vaccinations"]
folds = TimeSeriesSplit(n_splits=N_SPLITS)
f, ax = plt.subplots(nrows=N_SPLITS, ncols=2, figsize=(16, 9))
for i, (train_index, valid_index) in enumerate(folds.split(X)):
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
sns.lineplot(x=X_train, y=y_train, ax=ax[i, 0], color="dodgerblue", label="train")
sns.lineplot(
x=X_train[
len(X_train) - len(X_valid) : (len(X_train) - len(X_valid) + len(X_valid))
],
y=y_train[
len(X_train) - len(X_valid) : (len(X_train) - len(X_valid) + len(X_valid))
],
ax=ax[i, 1],
color="dodgerblue",
label="train",
)
for j in range(2):
sns.lineplot(
x=X_valid, y=y_valid, ax=ax[i, j], color="darkorange", label="validation"
)
ax[i, 0].set_title(
f"Rolling Window with Adjusting Training Size (Split {i+1})", fontsize=16
)
ax[i, 1].set_title(
f"Rolling Window with Constant Training Size (Split {i+1})", fontsize=16
)
for i in range(N_SPLITS):
ax[i, 0].set_xlim([date(2021, 1, 1), date(2021, 7, 30)])
ax[i, 1].set_xlim([date(2021, 1, 1), date(2021, 7, 30)])
plt.tight_layout()
plt.show()
train_size = int(0.85 * len(df))
test_size = len(df) - train_size
univariate_df = df[["date", "total_vaccinations"]].copy()
univariate_df.columns = ["ds", "y"]
train = univariate_df.iloc[:train_size, :]
x_train, y_train = pd.DataFrame(univariate_df.iloc[:train_size, 0]), pd.DataFrame(
univariate_df.iloc[:train_size, 1]
)
x_valid, y_valid = pd.DataFrame(univariate_df.iloc[train_size:, 0]), pd.DataFrame(
univariate_df.iloc[train_size:, 1]
)
print(len(train), len(x_valid))
# ## 1.Prophet
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math
from fbprophet import Prophet
# Train the model
model = Prophet()
model.fit(train)
# x_valid = model.make_future_dataframe(periods=test_size, freq='w')
# Predict on valid set
y_pred = model.predict(x_valid)
# Calcuate metrics
score_mae = mean_absolute_error(y_valid, y_pred.tail(test_size)["yhat"])
score_rmse = math.sqrt(mean_squared_error(y_valid, y_pred.tail(test_size)["yhat"]))
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
# Plot the forecast
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
model.plot(y_pred, ax=ax)
sns.lineplot(
x=x_valid["ds"], y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
plt.show()
# ## 2.ARIMA
from statsmodels.tsa.arima_model import ARIMA
# Fit model
model = ARIMA(y_train, order=(1, 1, 1))
model_fit = model.fit()
# Prediction with ARIMA
y_pred, se, conf = model_fit.forecast(1515)
# Calcuate metrics
score_mae = mean_absolute_error(y_valid, y_pred)
score_rmse = math.sqrt(mean_squared_error(y_valid, y_pred))
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
model_fit.plot_predict(1, 599, ax=ax)
sns.lineplot(
x=x_valid.index, y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
ax.set_ylim(-35, -18)
plt.show()
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
model_fit.plot_predict(1, 599, ax=ax)
sns.lineplot(
x=x_valid.index, y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="total Vaccinations", fontsize=14)
ax.set_ylim(-35, -18)
plt.show()
# ## 3. Auto-ARIMA
from statsmodels.tsa.arima_model import ARIMA
import pmdarima as pm
model = pm.auto_arima(
y_train,
start_p=1,
start_q=1,
test="adf", # use adftest to find optimal 'd'
max_p=3,
max_q=3, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
print(model.summary())
model.plot_diagnostics(figsize=(16, 8))
plt.show()
# ## 4. LSTM
from sklearn.preprocessing import MinMaxScaler
data = univariate_df.filter(["y"])
# Convert the dataframe to a numpy array
dataset = data.values
scaler = MinMaxScaler(feature_range=(-1, 0))
scaled_data = scaler.fit_transform(dataset)
scaled_data[:10]
# Defines the rolling window
look_back = 52
# Split into train and test sets
train, test = (
scaled_data[: train_size - look_back, :],
scaled_data[train_size - look_back :, :],
)
def create_dataset(dataset, look_back=1):
X, Y = [], []
for i in range(look_back, len(dataset)):
a = dataset[i - look_back : i, 0]
X.append(a)
Y.append(dataset[i, 0])
return np.array(X), np.array(Y)
x_train, y_train = create_dataset(train, look_back)
x_test, y_test = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1]))
x_test = np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1]))
print(len(x_train), len(x_test))
from keras.models import Sequential
from keras.layers import Dense, LSTM
# Build the LSTM model
model = Sequential()
model.add(
LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2]))
)
model.add(LSTM(64, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
# Compile the model
model.compile(optimizer="adam", loss="mean_squared_error")
# Train the model
model.fit(x_train, y_train, batch_size=1, epochs=5, validation_data=(x_test, y_test))
model.summary()
# Lets predict with the model
train_predict = model.predict(x_train)
test_predict = model.predict(x_test)
# invert predictions
train_predict = scaler.inverse_transform(train_predict)
y_train = scaler.inverse_transform([y_train])
test_predict = scaler.inverse_transform(test_predict)
y_test = scaler.inverse_transform([y_test])
# Get the root mean squared error (RMSE) and MAE
score_rmse = np.sqrt(mean_squared_error(y_test[0], test_predict[:, 0]))
score_mae = mean_absolute_error(y_test[0], test_predict[:, 0])
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
x_train_ticks = univariate_df.head(train_size)["ds"]
y_train = univariate_df.head(train_size)["y"]
x_test_ticks = univariate_df.tail(test_size)["ds"]
# Plot the forecast
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
sns.lineplot(x=x_train_ticks, y=y_train, ax=ax, label="Train Set") # navajowhite
sns.lineplot(
x=x_test_ticks, y=test_predict[:, 0], ax=ax, color="green", label="Prediction"
) # navajowhite
sns.lineplot(
x=x_test_ticks, y=y_test[0], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534110.ipynb
|
covid-world-vaccination-progress
|
gpreda
|
[{"Id": 69534110, "ScriptId": 18985204, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6046260, "CreationDate": "08/01/2021 08:04:37", "VersionNumber": 2.0, "Title": "Timeseries Analysis of Vaccinations", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 308.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 307.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 92829968, "KernelVersionId": 69534110, "SourceDatasetVersionId": 2482828}]
|
[{"Id": 2482828, "DatasetId": 1093816, "DatasourceVersionId": 2525374, "CreatorUserId": 769452, "LicenseName": "CC0: Public Domain", "CreationDate": "07/31/2021 06:32:10", "VersionNumber": 164.0, "Title": "COVID-19 World Vaccination Progress", "Slug": "covid-world-vaccination-progress", "Subtitle": "Daily and Total Vaccination for COVID-19 in the World from Our World in Data", "Description": "### Context\n\nData is collected daily from [**Our World in Data**](https://ourworldindata.org/) GitHub repository for [covid-19](https://github.com/owid/covid-19-data), merged and uploaded. Country level vaccination data is gathered and assembled in one single file. Then, this data file is merged with locations data file to include vaccination sources information. A second file, with manufacturers information, is included.\n\n\n### Content\n\nThe data (country vaccinations) contains the following information:\n* **Country**- this is the country for which the vaccination information is provided; \n* **Country ISO Code** - ISO code for the country; \n* **Date** - date for the data entry; for some of the dates we have only the daily vaccinations, for others, only the (cumulative) total; \n* **Total number of vaccinations** - this is the absolute number of total immunizations in the country; \n* **Total number of people vaccinated** - a person, depending on the immunization scheme, will receive one or more (typically 2) vaccines; at a certain moment, the number of vaccination might be larger than the number of people; \n* **Total number of people fully vaccinated** - this is the number of people that received the entire set of immunization according to the immunization scheme (typically 2); at a certain moment in time, there might be a certain number of people that received one vaccine and another number (smaller) of people that received all vaccines in the scheme; \n* **Daily vaccinations (raw)** - for a certain data entry, the number of vaccination for that date/country; \n* **Daily vaccinations** - for a certain data entry, the number of vaccination for that date/country; \n* **Total vaccinations per hundred** - ratio (in percent) between vaccination number and total population up to the date in the country; \n* **Total number of people vaccinated per hundred** - ratio (in percent) between population immunized and total population up to the date in the country; \n* **Total number of people fully vaccinated per hundred** - ratio (in percent) between population fully immunized and total population up to the date in the country; \n* **Number of vaccinations per day** - number of daily vaccination for that day and country; \n* **Daily vaccinations per million** - ratio (in ppm) between vaccination number and total population for the current date in the country; \n* **Vaccines used in the country** - total number of vaccines used in the country (up to date); \n* **Source name** - source of the information (national authority, international organization, local organization etc.); \n* **Source website** - website of the source of information; \n\n\nThere is a second file added recently (country vaccinations by manufacturer), with the following columns:\n* **Location** - country; \n* **Date** - date; \n* **Vaccine** - vaccine type; \n* **Total number of vaccinations** - total number of vaccinations / current time and vaccine type.\n\n\n### Acknowledgements\n\nI would like to specify that I am only making available **Our World in Data** collected data about vaccinations to Kagglers. My contribution is very small, just daily collection, merge and upload of the updated version, as maintained by **Our World in Data** in their GitHub repository.\n\n### Inspiration\n\nTrack COVID-19 vaccination in the World, answer instantly to your questions: \n- Which country is using what vaccine? \n- In which country the vaccination programme is more advanced? \n- Where are vaccinated more people per day? But in terms of percent from entire population ?\n\nCombine this dataset with [COVID-19 World Testing Progress](https://www.kaggle.com/gpreda/covid19-world-testing-progress) and [COVID-19 Variants Worldwide Evolution](https://www.kaggle.com/gpreda/covid19-variants) to get more insights on the dynamics of the pandemics, as reflected in the interdependence of amount of testing performed, results of sequencing and vaccination campaigns.", "VersionNotes": "164", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1093816, "CreatorUserId": 769452, "OwnerUserId": 769452.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3385976.0, "CurrentDatasourceVersionId": 3437501.0, "ForumId": 1110973, "Type": 2, "CreationDate": "01/12/2021 17:01:16", "LastActivityDate": "01/12/2021", "TotalViews": 542075, "TotalDownloads": 96188, "TotalVotes": 2164, "TotalKernels": 428}]
|
[{"Id": 769452, "UserName": "gpreda", "DisplayName": "Gabriel Preda", "RegisterDate": "10/27/2016", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns # Visualization
import matplotlib.pyplot as plt # Visualization
from colorama import Fore
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math
import warnings # Supress warnings
warnings.filterwarnings("ignore")
np.random.seed(7)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("../input/covid-world-vaccination-progress/country_vaccinations.csv")
df.head()
from datetime import datetime, date
df["date"] = pd.to_datetime(df["date"], format="%Y/%m/%d")
df.head().style.set_properties(subset=["date"], **{"background-color": "dodgerblue"})
df = pd.read_csv(
"../input/covid-world-vaccination-progress/country_vaccinations_by_manufacturer.csv"
)
df.head()
from datetime import datetime, date
df["date"] = pd.to_datetime(df["date"], format="%Y/%m/%d")
df.head().style.set_properties(subset=["date"], **{"background-color": "dodgerblue"})
# To compelte the data, as naive method, we will use ffill
f, ax = plt.subplots(nrows=3, ncols=1, figsize=(15, 25))
for i, column in enumerate(df.drop("date", axis=1).columns):
sns.lineplot(
x=df["date"], y=df[column].fillna(method="ffill"), ax=ax[i], color="dodgerblue"
)
ax[i].set_title("Feature: {}".format(column), fontsize=14)
ax[i].set_ylabel(ylabel=column, fontsize=14)
# # Forecasting
from sklearn.model_selection import TimeSeriesSplit
N_SPLITS = 3
X = df["date"]
y = df["total_vaccinations"]
folds = TimeSeriesSplit(n_splits=N_SPLITS)
f, ax = plt.subplots(nrows=N_SPLITS, ncols=2, figsize=(16, 9))
for i, (train_index, valid_index) in enumerate(folds.split(X)):
X_train, X_valid = X[train_index], X[valid_index]
y_train, y_valid = y[train_index], y[valid_index]
sns.lineplot(x=X_train, y=y_train, ax=ax[i, 0], color="dodgerblue", label="train")
sns.lineplot(
x=X_train[
len(X_train) - len(X_valid) : (len(X_train) - len(X_valid) + len(X_valid))
],
y=y_train[
len(X_train) - len(X_valid) : (len(X_train) - len(X_valid) + len(X_valid))
],
ax=ax[i, 1],
color="dodgerblue",
label="train",
)
for j in range(2):
sns.lineplot(
x=X_valid, y=y_valid, ax=ax[i, j], color="darkorange", label="validation"
)
ax[i, 0].set_title(
f"Rolling Window with Adjusting Training Size (Split {i+1})", fontsize=16
)
ax[i, 1].set_title(
f"Rolling Window with Constant Training Size (Split {i+1})", fontsize=16
)
for i in range(N_SPLITS):
ax[i, 0].set_xlim([date(2021, 1, 1), date(2021, 7, 30)])
ax[i, 1].set_xlim([date(2021, 1, 1), date(2021, 7, 30)])
plt.tight_layout()
plt.show()
train_size = int(0.85 * len(df))
test_size = len(df) - train_size
univariate_df = df[["date", "total_vaccinations"]].copy()
univariate_df.columns = ["ds", "y"]
train = univariate_df.iloc[:train_size, :]
x_train, y_train = pd.DataFrame(univariate_df.iloc[:train_size, 0]), pd.DataFrame(
univariate_df.iloc[:train_size, 1]
)
x_valid, y_valid = pd.DataFrame(univariate_df.iloc[train_size:, 0]), pd.DataFrame(
univariate_df.iloc[train_size:, 1]
)
print(len(train), len(x_valid))
# ## 1.Prophet
from sklearn.metrics import mean_absolute_error, mean_squared_error
import math
from fbprophet import Prophet
# Train the model
model = Prophet()
model.fit(train)
# x_valid = model.make_future_dataframe(periods=test_size, freq='w')
# Predict on valid set
y_pred = model.predict(x_valid)
# Calcuate metrics
score_mae = mean_absolute_error(y_valid, y_pred.tail(test_size)["yhat"])
score_rmse = math.sqrt(mean_squared_error(y_valid, y_pred.tail(test_size)["yhat"]))
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
# Plot the forecast
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
model.plot(y_pred, ax=ax)
sns.lineplot(
x=x_valid["ds"], y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
plt.show()
# ## 2.ARIMA
from statsmodels.tsa.arima_model import ARIMA
# Fit model
model = ARIMA(y_train, order=(1, 1, 1))
model_fit = model.fit()
# Prediction with ARIMA
y_pred, se, conf = model_fit.forecast(1515)
# Calcuate metrics
score_mae = mean_absolute_error(y_valid, y_pred)
score_rmse = math.sqrt(mean_squared_error(y_valid, y_pred))
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
f, ax = plt.subplots(1)
f.set_figheight(5)
f.set_figwidth(15)
model_fit.plot_predict(1, 599, ax=ax)
sns.lineplot(
x=x_valid.index, y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
ax.set_ylim(-35, -18)
plt.show()
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
model_fit.plot_predict(1, 599, ax=ax)
sns.lineplot(
x=x_valid.index, y=y_valid["y"], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="total Vaccinations", fontsize=14)
ax.set_ylim(-35, -18)
plt.show()
# ## 3. Auto-ARIMA
from statsmodels.tsa.arima_model import ARIMA
import pmdarima as pm
model = pm.auto_arima(
y_train,
start_p=1,
start_q=1,
test="adf", # use adftest to find optimal 'd'
max_p=3,
max_q=3, # maximum p and q
m=1, # frequency of series
d=None, # let model determine 'd'
seasonal=False, # No Seasonality
start_P=0,
D=0,
trace=True,
error_action="ignore",
suppress_warnings=True,
stepwise=True,
)
print(model.summary())
model.plot_diagnostics(figsize=(16, 8))
plt.show()
# ## 4. LSTM
from sklearn.preprocessing import MinMaxScaler
data = univariate_df.filter(["y"])
# Convert the dataframe to a numpy array
dataset = data.values
scaler = MinMaxScaler(feature_range=(-1, 0))
scaled_data = scaler.fit_transform(dataset)
scaled_data[:10]
# Defines the rolling window
look_back = 52
# Split into train and test sets
train, test = (
scaled_data[: train_size - look_back, :],
scaled_data[train_size - look_back :, :],
)
def create_dataset(dataset, look_back=1):
X, Y = [], []
for i in range(look_back, len(dataset)):
a = dataset[i - look_back : i, 0]
X.append(a)
Y.append(dataset[i, 0])
return np.array(X), np.array(Y)
x_train, y_train = create_dataset(train, look_back)
x_test, y_test = create_dataset(test, look_back)
# reshape input to be [samples, time steps, features]
x_train = np.reshape(x_train, (x_train.shape[0], 1, x_train.shape[1]))
x_test = np.reshape(x_test, (x_test.shape[0], 1, x_test.shape[1]))
print(len(x_train), len(x_test))
from keras.models import Sequential
from keras.layers import Dense, LSTM
# Build the LSTM model
model = Sequential()
model.add(
LSTM(128, return_sequences=True, input_shape=(x_train.shape[1], x_train.shape[2]))
)
model.add(LSTM(64, return_sequences=False))
model.add(Dense(25))
model.add(Dense(1))
# Compile the model
model.compile(optimizer="adam", loss="mean_squared_error")
# Train the model
model.fit(x_train, y_train, batch_size=1, epochs=5, validation_data=(x_test, y_test))
model.summary()
# Lets predict with the model
train_predict = model.predict(x_train)
test_predict = model.predict(x_test)
# invert predictions
train_predict = scaler.inverse_transform(train_predict)
y_train = scaler.inverse_transform([y_train])
test_predict = scaler.inverse_transform(test_predict)
y_test = scaler.inverse_transform([y_test])
# Get the root mean squared error (RMSE) and MAE
score_rmse = np.sqrt(mean_squared_error(y_test[0], test_predict[:, 0]))
score_mae = mean_absolute_error(y_test[0], test_predict[:, 0])
print(Fore.GREEN + "RMSE: {}".format(score_rmse))
x_train_ticks = univariate_df.head(train_size)["ds"]
y_train = univariate_df.head(train_size)["y"]
x_test_ticks = univariate_df.tail(test_size)["ds"]
# Plot the forecast
f, ax = plt.subplots(1)
f.set_figheight(6)
f.set_figwidth(15)
sns.lineplot(x=x_train_ticks, y=y_train, ax=ax, label="Train Set") # navajowhite
sns.lineplot(
x=x_test_ticks, y=test_predict[:, 0], ax=ax, color="green", label="Prediction"
) # navajowhite
sns.lineplot(
x=x_test_ticks, y=y_test[0], ax=ax, color="orange", label="Ground truth"
) # navajowhite
ax.set_title(f"Prediction \n MAE: {score_mae:.2f}, RMSE: {score_rmse:.2f}", fontsize=14)
ax.set_xlabel(xlabel="Date", fontsize=14)
ax.set_ylabel(ylabel="Total Vaccinations", fontsize=14)
plt.show()
|
[{"covid-world-vaccination-progress/country_vaccinations.csv": {"column_names": "[\"country\", \"iso_code\", \"date\", \"total_vaccinations\", \"people_vaccinated\", \"people_fully_vaccinated\", \"daily_vaccinations_raw\", \"daily_vaccinations\", \"total_vaccinations_per_hundred\", \"people_vaccinated_per_hundred\", \"people_fully_vaccinated_per_hundred\", \"daily_vaccinations_per_million\", \"vaccines\", \"source_name\", \"source_website\"]", "column_data_types": "{\"country\": \"object\", \"iso_code\": \"object\", \"date\": \"object\", \"total_vaccinations\": \"float64\", \"people_vaccinated\": \"float64\", \"people_fully_vaccinated\": \"float64\", \"daily_vaccinations_raw\": \"float64\", \"daily_vaccinations\": \"float64\", \"total_vaccinations_per_hundred\": \"float64\", \"people_vaccinated_per_hundred\": \"float64\", \"people_fully_vaccinated_per_hundred\": \"float64\", \"daily_vaccinations_per_million\": \"float64\", \"vaccines\": \"object\", \"source_name\": \"object\", \"source_website\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 86512 entries, 0 to 86511\nData columns (total 15 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 country 86512 non-null object \n 1 iso_code 86512 non-null object \n 2 date 86512 non-null object \n 3 total_vaccinations 43607 non-null float64\n 4 people_vaccinated 41294 non-null float64\n 5 people_fully_vaccinated 38802 non-null float64\n 6 daily_vaccinations_raw 35362 non-null float64\n 7 daily_vaccinations 86213 non-null float64\n 8 total_vaccinations_per_hundred 43607 non-null float64\n 9 people_vaccinated_per_hundred 41294 non-null float64\n 10 people_fully_vaccinated_per_hundred 38802 non-null float64\n 11 daily_vaccinations_per_million 86213 non-null float64\n 12 vaccines 86512 non-null object \n 13 source_name 86512 non-null object \n 14 source_website 86512 non-null object \ndtypes: float64(9), object(6)\nmemory usage: 9.9+ MB\n", "summary": "{\"total_vaccinations\": {\"count\": 43607.0, \"mean\": 45929644.638727725, \"std\": 224600360.18166688, \"min\": 0.0, \"25%\": 526410.0, \"50%\": 3590096.0, \"75%\": 17012303.5, \"max\": 3263129000.0}, \"people_vaccinated\": {\"count\": 41294.0, \"mean\": 17705077.78979997, \"std\": 70787311.5004759, \"min\": 0.0, \"25%\": 349464.25, \"50%\": 2187310.5, \"75%\": 9152519.75, \"max\": 1275541000.0}, \"people_fully_vaccinated\": {\"count\": 38802.0, \"mean\": 14138299.848152157, \"std\": 57139201.71915868, \"min\": 1.0, \"25%\": 243962.25, \"50%\": 1722140.5, \"75%\": 7559869.5, \"max\": 1240777000.0}, \"daily_vaccinations_raw\": {\"count\": 35362.0, \"mean\": 270599.5782478367, \"std\": 1212426.60195391, \"min\": 0.0, \"25%\": 4668.0, \"50%\": 25309.0, \"75%\": 123492.5, \"max\": 24741000.0}, \"daily_vaccinations\": {\"count\": 86213.0, \"mean\": 131305.48607518588, \"std\": 768238.7732930565, \"min\": 0.0, \"25%\": 900.0, \"50%\": 7343.0, \"75%\": 44098.0, \"max\": 22424286.0}, \"total_vaccinations_per_hundred\": {\"count\": 43607.0, \"mean\": 80.18854312381039, \"std\": 67.91357674747688, \"min\": 0.0, \"25%\": 16.05, \"50%\": 67.52, \"75%\": 132.735, \"max\": 345.37}, \"people_vaccinated_per_hundred\": {\"count\": 41294.0, \"mean\": 40.92731728580423, \"std\": 29.29075864533803, \"min\": 0.0, \"25%\": 11.37, \"50%\": 41.435, \"75%\": 67.91, \"max\": 124.76}, \"people_fully_vaccinated_per_hundred\": {\"count\": 38802.0, \"mean\": 35.52324287407866, \"std\": 28.37625180924737, \"min\": 0.0, \"25%\": 7.02, \"50%\": 31.75, \"75%\": 62.08, \"max\": 122.37}, \"daily_vaccinations_per_million\": {\"count\": 86213.0, \"mean\": 3257.049157319662, \"std\": 3934.3124401057307, \"min\": 0.0, \"25%\": 636.0, \"50%\": 2050.0, \"75%\": 4682.0, \"max\": 117497.0}}", "examples": "{\"country\":{\"0\":\"Afghanistan\",\"1\":\"Afghanistan\",\"2\":\"Afghanistan\",\"3\":\"Afghanistan\"},\"iso_code\":{\"0\":\"AFG\",\"1\":\"AFG\",\"2\":\"AFG\",\"3\":\"AFG\"},\"date\":{\"0\":\"2021-02-22\",\"1\":\"2021-02-23\",\"2\":\"2021-02-24\",\"3\":\"2021-02-25\"},\"total_vaccinations\":{\"0\":0.0,\"1\":null,\"2\":null,\"3\":null},\"people_vaccinated\":{\"0\":0.0,\"1\":null,\"2\":null,\"3\":null},\"people_fully_vaccinated\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"daily_vaccinations_raw\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"daily_vaccinations\":{\"0\":null,\"1\":1367.0,\"2\":1367.0,\"3\":1367.0},\"total_vaccinations_per_hundred\":{\"0\":0.0,\"1\":null,\"2\":null,\"3\":null},\"people_vaccinated_per_hundred\":{\"0\":0.0,\"1\":null,\"2\":null,\"3\":null},\"people_fully_vaccinated_per_hundred\":{\"0\":null,\"1\":null,\"2\":null,\"3\":null},\"daily_vaccinations_per_million\":{\"0\":null,\"1\":34.0,\"2\":34.0,\"3\":34.0},\"vaccines\":{\"0\":\"Johnson&Johnson, Oxford\\/AstraZeneca, Pfizer\\/BioNTech, Sinopharm\\/Beijing\",\"1\":\"Johnson&Johnson, Oxford\\/AstraZeneca, Pfizer\\/BioNTech, Sinopharm\\/Beijing\",\"2\":\"Johnson&Johnson, Oxford\\/AstraZeneca, Pfizer\\/BioNTech, Sinopharm\\/Beijing\",\"3\":\"Johnson&Johnson, Oxford\\/AstraZeneca, Pfizer\\/BioNTech, Sinopharm\\/Beijing\"},\"source_name\":{\"0\":\"World Health Organization\",\"1\":\"World Health Organization\",\"2\":\"World Health Organization\",\"3\":\"World Health Organization\"},\"source_website\":{\"0\":\"https:\\/\\/covid19.who.int\\/\",\"1\":\"https:\\/\\/covid19.who.int\\/\",\"2\":\"https:\\/\\/covid19.who.int\\/\",\"3\":\"https:\\/\\/covid19.who.int\\/\"}}"}}, {"covid-world-vaccination-progress/country_vaccinations_by_manufacturer.csv": {"column_names": "[\"location\", \"date\", \"vaccine\", \"total_vaccinations\"]", "column_data_types": "{\"location\": \"object\", \"date\": \"object\", \"vaccine\": \"object\", \"total_vaccinations\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 35623 entries, 0 to 35622\nData columns (total 4 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 location 35623 non-null object\n 1 date 35623 non-null object\n 2 vaccine 35623 non-null object\n 3 total_vaccinations 35623 non-null int64 \ndtypes: int64(1), object(3)\nmemory usage: 1.1+ MB\n", "summary": "{\"total_vaccinations\": {\"count\": 35623.0, \"mean\": 15083574.386969093, \"std\": 51817679.15312671, \"min\": 0.0, \"25%\": 97776.0, \"50%\": 1305506.0, \"75%\": 7932423.0, \"max\": 600519998.0}}", "examples": "{\"location\":{\"0\":\"Argentina\",\"1\":\"Argentina\",\"2\":\"Argentina\",\"3\":\"Argentina\"},\"date\":{\"0\":\"2020-12-29\",\"1\":\"2020-12-29\",\"2\":\"2020-12-29\",\"3\":\"2020-12-29\"},\"vaccine\":{\"0\":\"Moderna\",\"1\":\"Oxford\\/AstraZeneca\",\"2\":\"Sinopharm\\/Beijing\",\"3\":\"Sputnik V\"},\"total_vaccinations\":{\"0\":2,\"1\":3,\"2\":1,\"3\":20481}}"}}]
| true | 2 |
<start_data_description><data_path>covid-world-vaccination-progress/country_vaccinations.csv:
<column_names>
['country', 'iso_code', 'date', 'total_vaccinations', 'people_vaccinated', 'people_fully_vaccinated', 'daily_vaccinations_raw', 'daily_vaccinations', 'total_vaccinations_per_hundred', 'people_vaccinated_per_hundred', 'people_fully_vaccinated_per_hundred', 'daily_vaccinations_per_million', 'vaccines', 'source_name', 'source_website']
<column_types>
{'country': 'object', 'iso_code': 'object', 'date': 'object', 'total_vaccinations': 'float64', 'people_vaccinated': 'float64', 'people_fully_vaccinated': 'float64', 'daily_vaccinations_raw': 'float64', 'daily_vaccinations': 'float64', 'total_vaccinations_per_hundred': 'float64', 'people_vaccinated_per_hundred': 'float64', 'people_fully_vaccinated_per_hundred': 'float64', 'daily_vaccinations_per_million': 'float64', 'vaccines': 'object', 'source_name': 'object', 'source_website': 'object'}
<dataframe_Summary>
{'total_vaccinations': {'count': 43607.0, 'mean': 45929644.638727725, 'std': 224600360.18166688, 'min': 0.0, '25%': 526410.0, '50%': 3590096.0, '75%': 17012303.5, 'max': 3263129000.0}, 'people_vaccinated': {'count': 41294.0, 'mean': 17705077.78979997, 'std': 70787311.5004759, 'min': 0.0, '25%': 349464.25, '50%': 2187310.5, '75%': 9152519.75, 'max': 1275541000.0}, 'people_fully_vaccinated': {'count': 38802.0, 'mean': 14138299.848152157, 'std': 57139201.71915868, 'min': 1.0, '25%': 243962.25, '50%': 1722140.5, '75%': 7559869.5, 'max': 1240777000.0}, 'daily_vaccinations_raw': {'count': 35362.0, 'mean': 270599.5782478367, 'std': 1212426.60195391, 'min': 0.0, '25%': 4668.0, '50%': 25309.0, '75%': 123492.5, 'max': 24741000.0}, 'daily_vaccinations': {'count': 86213.0, 'mean': 131305.48607518588, 'std': 768238.7732930565, 'min': 0.0, '25%': 900.0, '50%': 7343.0, '75%': 44098.0, 'max': 22424286.0}, 'total_vaccinations_per_hundred': {'count': 43607.0, 'mean': 80.18854312381039, 'std': 67.91357674747688, 'min': 0.0, '25%': 16.05, '50%': 67.52, '75%': 132.735, 'max': 345.37}, 'people_vaccinated_per_hundred': {'count': 41294.0, 'mean': 40.92731728580423, 'std': 29.29075864533803, 'min': 0.0, '25%': 11.37, '50%': 41.435, '75%': 67.91, 'max': 124.76}, 'people_fully_vaccinated_per_hundred': {'count': 38802.0, 'mean': 35.52324287407866, 'std': 28.37625180924737, 'min': 0.0, '25%': 7.02, '50%': 31.75, '75%': 62.08, 'max': 122.37}, 'daily_vaccinations_per_million': {'count': 86213.0, 'mean': 3257.049157319662, 'std': 3934.3124401057307, 'min': 0.0, '25%': 636.0, '50%': 2050.0, '75%': 4682.0, 'max': 117497.0}}
<dataframe_info>
RangeIndex: 86512 entries, 0 to 86511
Data columns (total 15 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 country 86512 non-null object
1 iso_code 86512 non-null object
2 date 86512 non-null object
3 total_vaccinations 43607 non-null float64
4 people_vaccinated 41294 non-null float64
5 people_fully_vaccinated 38802 non-null float64
6 daily_vaccinations_raw 35362 non-null float64
7 daily_vaccinations 86213 non-null float64
8 total_vaccinations_per_hundred 43607 non-null float64
9 people_vaccinated_per_hundred 41294 non-null float64
10 people_fully_vaccinated_per_hundred 38802 non-null float64
11 daily_vaccinations_per_million 86213 non-null float64
12 vaccines 86512 non-null object
13 source_name 86512 non-null object
14 source_website 86512 non-null object
dtypes: float64(9), object(6)
memory usage: 9.9+ MB
<some_examples>
{'country': {'0': 'Afghanistan', '1': 'Afghanistan', '2': 'Afghanistan', '3': 'Afghanistan'}, 'iso_code': {'0': 'AFG', '1': 'AFG', '2': 'AFG', '3': 'AFG'}, 'date': {'0': '2021-02-22', '1': '2021-02-23', '2': '2021-02-24', '3': '2021-02-25'}, 'total_vaccinations': {'0': 0.0, '1': None, '2': None, '3': None}, 'people_vaccinated': {'0': 0.0, '1': None, '2': None, '3': None}, 'people_fully_vaccinated': {'0': None, '1': None, '2': None, '3': None}, 'daily_vaccinations_raw': {'0': None, '1': None, '2': None, '3': None}, 'daily_vaccinations': {'0': None, '1': 1367.0, '2': 1367.0, '3': 1367.0}, 'total_vaccinations_per_hundred': {'0': 0.0, '1': None, '2': None, '3': None}, 'people_vaccinated_per_hundred': {'0': 0.0, '1': None, '2': None, '3': None}, 'people_fully_vaccinated_per_hundred': {'0': None, '1': None, '2': None, '3': None}, 'daily_vaccinations_per_million': {'0': None, '1': 34.0, '2': 34.0, '3': 34.0}, 'vaccines': {'0': 'Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing', '1': 'Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing', '2': 'Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing', '3': 'Johnson&Johnson, Oxford/AstraZeneca, Pfizer/BioNTech, Sinopharm/Beijing'}, 'source_name': {'0': 'World Health Organization', '1': 'World Health Organization', '2': 'World Health Organization', '3': 'World Health Organization'}, 'source_website': {'0': 'https://covid19.who.int/', '1': 'https://covid19.who.int/', '2': 'https://covid19.who.int/', '3': 'https://covid19.who.int/'}}
<end_description>
<start_data_description><data_path>covid-world-vaccination-progress/country_vaccinations_by_manufacturer.csv:
<column_names>
['location', 'date', 'vaccine', 'total_vaccinations']
<column_types>
{'location': 'object', 'date': 'object', 'vaccine': 'object', 'total_vaccinations': 'int64'}
<dataframe_Summary>
{'total_vaccinations': {'count': 35623.0, 'mean': 15083574.386969093, 'std': 51817679.15312671, 'min': 0.0, '25%': 97776.0, '50%': 1305506.0, '75%': 7932423.0, 'max': 600519998.0}}
<dataframe_info>
RangeIndex: 35623 entries, 0 to 35622
Data columns (total 4 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 location 35623 non-null object
1 date 35623 non-null object
2 vaccine 35623 non-null object
3 total_vaccinations 35623 non-null int64
dtypes: int64(1), object(3)
memory usage: 1.1+ MB
<some_examples>
{'location': {'0': 'Argentina', '1': 'Argentina', '2': 'Argentina', '3': 'Argentina'}, 'date': {'0': '2020-12-29', '1': '2020-12-29', '2': '2020-12-29', '3': '2020-12-29'}, 'vaccine': {'0': 'Moderna', '1': 'Oxford/AstraZeneca', '2': 'Sinopharm/Beijing', '3': 'Sputnik V'}, 'total_vaccinations': {'0': 2, '1': 3, '2': 1, '3': 20481}}
<end_description>
| 3,477 | 2 | 6,108 | 3,477 |
69534181
|
<jupyter_start><jupyter_text>Mushroom Classification
### Context
Although this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as "shrooming") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be?
### Content
This dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like "leaflets three, let it be'' for Poisonous Oak and Ivy.
- **Time period**: Donated to UCI ML 27 April 1987
### Inspiration
- What types of machine learning models perform best on this dataset?
- Which features are most indicative of a poisonous mushroom?
Kaggle dataset identifier: mushroom-classification
<jupyter_script>import numpy as np
import pandas as pd
import keras
import sklearn
from keras.layers import Dense, Dropout
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from keras.utils.np_utils import to_categorical
from keras.utils import np_utils
import matplotlib.pyplot as plt
df = pd.read_csv("../input/mushroom-classification/mushrooms.csv")
df.head()
df.describe()
# Lets confirm that all the data are Strings/Objects
df.dtypes
# We confirmed that all the data are objects so we need to do some label encoding and make them into actual categorical values. Before that, we should check for NA values that may harm our data.
df.isna().sum()
# No values are NA so lets get to work on making this data usable
encoder = LabelEncoder()
for i in df.columns:
encoder.fit(df[i])
df[i] = encoder.transform(df[i])
df.head()
# Now that all of our objects are created, we can start the process of seperating our featuers and the target variable.
X = df.drop("class", axis=1)
y = df["class"]
print(X.head())
print(y.head())
# Our data is now saved as it should be so lets start the training process using Keras
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.15, random_state=5
)
print(x_train.shape) # input shape
print(y_train.shape)
model = Sequential()
model.add(Dense(64, input_dim=x_train.shape[1], activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, validation_split=0.1, batch_size=12, epochs=80)
scores = model.evaluate(x_test, y_test)
print("Accuracy: %.2f%%" % (scores[1] * 100))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534181.ipynb
|
mushroom-classification
| null |
[{"Id": 69534181, "ScriptId": 18984817, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7138379, "CreationDate": "08/01/2021 08:05:57", "VersionNumber": 1.0, "Title": "Mushroom with Keras(100% accuracy)", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 61.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 92830109, "KernelVersionId": 69534181, "SourceDatasetVersionId": 974}]
|
[{"Id": 974, "DatasetId": 478, "DatasourceVersionId": 974, "CreatorUserId": 495305, "LicenseName": "CC0: Public Domain", "CreationDate": "12/01/2016 23:08:00", "VersionNumber": 1.0, "Title": "Mushroom Classification", "Slug": "mushroom-classification", "Subtitle": "Safe to eat or deadly poison?", "Description": "### Context\n\nAlthough this dataset was originally contributed to the UCI Machine Learning repository nearly 30 years ago, mushroom hunting (otherwise known as \"shrooming\") is enjoying new peaks in popularity. Learn which features spell certain death and which are most palatable in this dataset of mushroom characteristics. And how certain can your model be?\n\n### Content \n\nThis dataset includes descriptions of hypothetical samples corresponding to 23 species of gilled mushrooms in the Agaricus and Lepiota Family Mushroom drawn from The Audubon Society Field Guide to North American Mushrooms (1981). Each species is identified as definitely edible, definitely poisonous, or of unknown edibility and not recommended. This latter class was combined with the poisonous one. The Guide clearly states that there is no simple rule for determining the edibility of a mushroom; no rule like \"leaflets three, let it be'' for Poisonous Oak and Ivy.\n\n- **Time period**: Donated to UCI ML 27 April 1987\n\n### Inspiration\n\n- What types of machine learning models perform best on this dataset?\n\n- Which features are most indicative of a poisonous mushroom?\n\n### Acknowledgements\n\nThis dataset was originally donated to the UCI Machine Learning repository. You can learn more about past research using the data [here][1]. \n\n#[Start a new kernel][2]\n\n\n [1]: https://archive.ics.uci.edu/ml/datasets/Mushroom\n [2]: https://www.kaggle.com/uciml/mushroom-classification/kernels?modal=true", "VersionNotes": "Initial release", "TotalCompressedBytes": 374003.0, "TotalUncompressedBytes": 374003.0}]
|
[{"Id": 478, "CreatorUserId": 495305, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 974.0, "CurrentDatasourceVersionId": 974.0, "ForumId": 2099, "Type": 2, "CreationDate": "12/01/2016 23:08:00", "LastActivityDate": "02/06/2018", "TotalViews": 873597, "TotalDownloads": 114985, "TotalVotes": 2206, "TotalKernels": 1371}]
| null |
import numpy as np
import pandas as pd
import keras
import sklearn
from keras.layers import Dense, Dropout
from keras.models import Sequential
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import shuffle
from keras.utils.np_utils import to_categorical
from keras.utils import np_utils
import matplotlib.pyplot as plt
df = pd.read_csv("../input/mushroom-classification/mushrooms.csv")
df.head()
df.describe()
# Lets confirm that all the data are Strings/Objects
df.dtypes
# We confirmed that all the data are objects so we need to do some label encoding and make them into actual categorical values. Before that, we should check for NA values that may harm our data.
df.isna().sum()
# No values are NA so lets get to work on making this data usable
encoder = LabelEncoder()
for i in df.columns:
encoder.fit(df[i])
df[i] = encoder.transform(df[i])
df.head()
# Now that all of our objects are created, we can start the process of seperating our featuers and the target variable.
X = df.drop("class", axis=1)
y = df["class"]
print(X.head())
print(y.head())
# Our data is now saved as it should be so lets start the training process using Keras
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.15, random_state=5
)
print(x_train.shape) # input shape
print(y_train.shape)
model = Sequential()
model.add(Dense(64, input_dim=x_train.shape[1], activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(1, activation="sigmoid"))
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
model.fit(x_train, y_train, validation_split=0.1, batch_size=12, epochs=80)
scores = model.evaluate(x_test, y_test)
print("Accuracy: %.2f%%" % (scores[1] * 100))
| false | 0 | 545 | 2 | 848 | 545 |
||
69534852
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
ratings = pd.read_csv(
"../input/movie-ratings-by-users-for-recommender-system/ratings.csv"
)
movies = pd.read_csv(
"../input/movie-ratings-by-users-for-recommender-system/movies.csv"
)
ratings = pd.merge(movies, ratings)
ratings = ratings.drop(columns=["genres", "timestamp"])
ratings.head()
user_ratings = ratings.pivot_table(index=["userId"], columns=["title"], values="rating")
user_ratings.head()
user_ratings = user_ratings.dropna(thresh=10, axis=1)
user_ratings = user_ratings.fillna(0)
user_ratings.head()
scaler = StandardScaler()
scaler.fit_transform(user_ratings)
user_ratings.head()
# We could use cosine similarity or pearson correlation to calculate similarity between movies (based on user ratings). But don't use Euclidean distance.
movie_similarity_df = user_ratings.corr(method="pearson")
movie_similarity_df.head()
# **Why (rating-2.5)?**
# Because 2.5 is the threshold I choose to distinguish whether a user likes or dislikes a movie. We could choose a different threshold. But in this case I choose 2.5 because it's the average of min and max score.
# Score>2.5 -> likes, score dislikes
# For example:
# A user dislikes action movies and likes romantic movies. He gives John Wick 1/5.
# John Wick is an action movie. The similarity score between John Wick and John Wick is 1.
# Before Sunrise is a romantic movie. The similarity between between John Wick and Before Sunrise is 0.03.
# If we don't subtract 2.5 from rating, when we multiply similarity by rating, we'll get 1 for John Wick and 0.03 for Before Sunrise, which is unreasonable because the user likes romantic more than action.
def get_similar_score(movie, rating):
similar_score = movie_similarity_df[movie] * (
rating - 2.5
) # 2.5 is the threshold that decides whether user likes the movie or not
return similar_score
test_user = [
("(500) Days of Summer (2009)", 5),
("10 Things I Hate About You (1999)", 4),
("12 Angry Men (1957)", 1),
]
similar_movies = pd.DataFrame()
for movie, rating in test_user:
similar_movies = similar_movies.append(get_similar_score(movie, rating))
similar_movies
# We calculate sum of values in each column. The higher the sum of a movie, the more likely the user would like it.
# The more ratings we have from a user, the better we can recommend them.
similar_movies.sum().sort_values(ascending=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534852.ipynb
| null | null |
[{"Id": 69534852, "ScriptId": 17392544, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1712667, "CreationDate": "08/01/2021 08:16:06", "VersionNumber": 9.0, "Title": "Recommender based on user ratings", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 82.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 77.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from scipy import sparse
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.preprocessing import StandardScaler
ratings = pd.read_csv(
"../input/movie-ratings-by-users-for-recommender-system/ratings.csv"
)
movies = pd.read_csv(
"../input/movie-ratings-by-users-for-recommender-system/movies.csv"
)
ratings = pd.merge(movies, ratings)
ratings = ratings.drop(columns=["genres", "timestamp"])
ratings.head()
user_ratings = ratings.pivot_table(index=["userId"], columns=["title"], values="rating")
user_ratings.head()
user_ratings = user_ratings.dropna(thresh=10, axis=1)
user_ratings = user_ratings.fillna(0)
user_ratings.head()
scaler = StandardScaler()
scaler.fit_transform(user_ratings)
user_ratings.head()
# We could use cosine similarity or pearson correlation to calculate similarity between movies (based on user ratings). But don't use Euclidean distance.
movie_similarity_df = user_ratings.corr(method="pearson")
movie_similarity_df.head()
# **Why (rating-2.5)?**
# Because 2.5 is the threshold I choose to distinguish whether a user likes or dislikes a movie. We could choose a different threshold. But in this case I choose 2.5 because it's the average of min and max score.
# Score>2.5 -> likes, score dislikes
# For example:
# A user dislikes action movies and likes romantic movies. He gives John Wick 1/5.
# John Wick is an action movie. The similarity score between John Wick and John Wick is 1.
# Before Sunrise is a romantic movie. The similarity between between John Wick and Before Sunrise is 0.03.
# If we don't subtract 2.5 from rating, when we multiply similarity by rating, we'll get 1 for John Wick and 0.03 for Before Sunrise, which is unreasonable because the user likes romantic more than action.
def get_similar_score(movie, rating):
similar_score = movie_similarity_df[movie] * (
rating - 2.5
) # 2.5 is the threshold that decides whether user likes the movie or not
return similar_score
test_user = [
("(500) Days of Summer (2009)", 5),
("10 Things I Hate About You (1999)", 4),
("12 Angry Men (1957)", 1),
]
similar_movies = pd.DataFrame()
for movie, rating in test_user:
similar_movies = similar_movies.append(get_similar_score(movie, rating))
similar_movies
# We calculate sum of values in each column. The higher the sum of a movie, the more likely the user would like it.
# The more ratings we have from a user, the better we can recommend them.
similar_movies.sum().sort_values(ascending=False)
| false | 0 | 916 | 0 | 916 | 916 |
||
69534242
|
<jupyter_start><jupyter_text>Dineout Restaurants in India
### Context
With smaller staff, capital and costing requirements, the restaurant business is on a consistent rise. Identifying a successful restaurant demands more than customer ratings. The dataset is a record of ratings, cost and location of all Dineout restaurants in India.
### Content
**Name:** Restaurant name
**Location:** Complete location of the restaurant. It consists of locality and city.
**Locality:** Locality of the restaurant within the city.
**City:** City of the restaurant.
**Cuisine:** Types of cuisines offered by the restaurant.
**Rating:** Rating of the restaurant given by a number of users.
**Votes:** Number of customers rating the restaurant.
**Cost:** Approximate cost for two people.
### Inspiration
How customers attract?
Every restaurant has a different story. Success of a restaurant depend on the location, cuisines, cost or ratings. Number of votes determine the number of customer visits and rating signifies the restaurant liking. An analysis on dataset can reveal the customer behaviour.
Kaggle dataset identifier: dineout-restaurants-in-india
<jupyter_code>import pandas as pd
df = pd.read_csv('dineout-restaurants-in-india/dineout_restaurants.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 6595 entries, 0 to 6594
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Name 6595 non-null object
1 Location 6595 non-null object
2 Locality 6595 non-null object
3 City 6595 non-null object
4 Cuisine 6595 non-null object
5 Rating 6595 non-null float64
6 Votes 6595 non-null int64
7 Cost 6595 non-null int64
dtypes: float64(1), int64(2), object(5)
memory usage: 412.3+ KB
<jupyter_text>Examples:
{
"Name": "Local",
"Location": "Scindia House,Connaught Place, Central Delhi",
"Locality": " Central Delhi",
"City": "Delhi",
"Cuisine": "North Indian, Finger Food, Continental",
"Rating": 4.1,
"Votes": 2415,
"Cost": 2000
}
{
"Name": "The G.T. ROAD",
"Location": "M-Block,Connaught Place, Central Delhi",
"Locality": " Central Delhi",
"City": "Delhi",
"Cuisine": "North Indian",
"Rating": 4.3,
"Votes": 2363,
"Cost": 1500
}
{
"Name": "Tamasha",
"Location": "Connaught Place, Central Delhi",
"Locality": " Central Delhi",
"City": "Delhi",
"Cuisine": "Finger Food, North Indian, Italian, Continental, Asian",
"Rating": 4.2,
"Votes": 5016,
"Cost": 2000
}
{
"Name": "The Junkyard Cafe",
"Location": "Connaught Place, Central Delhi",
"Locality": " Central Delhi",
"City": "Delhi",
"Cuisine": "North Indian, Mediterranean, Asian, Italian, Oriental ",
"Rating": 4.2,
"Votes": 2821,
"Cost": 1800
}
<jupyter_script># ## Introduction
# #### The notebook surveys and analyzes Dineout restaurants in Indian market. To determine regional performance and customer behaviour; region based analysis is perfomed.
# #### The project evaluates Indian restaurants on the basis of multiple attributes. This abstract key elements that are essential for restaurants to own beneficial position in the aggressive market. A comparison among different regions is also obtained to highlight scope of improvement.
# ## Table of Contents
# * [1. Analyzing Dataframe](#1)
# * [2. How are restaurants distributed across India?](#2)
# * [3. How are average ratings distributed across India?](#3)
# * [4. How is cost distributed across India?](#4)
# * [5. How are votes distributed across India?](#5)
# * [6. How is the overall performance of restaurants across different states?](#6)
# * [7. What are top cuisines in India?](#7)
# * [8. How are the cuisines distributed among states?](#8)
# * [9. What are top restaurant locations in Maharashtra, Delhi and Karnataka?](#9)
# * [10. References](#10)
# # Libraries
import pandas as pd
import numpy as np
import plotly.figure_factory as ff
from plotly.offline import iplot
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
#
# # Analyzing Dataframe
# Reading dataframe
df = pd.read_csv("../input/dineout-restaurants-in-india/dineout_restaurants.csv")
df.head()
# Evaluating dataframe
print("* Size of dataframe: {}\n".format(df.shape))
print("* Datatype of columns are:\n {}\n".format(df.info()))
df.describe()
df["City"].value_counts()
# **Cities** can be categorized in terms of **State**.
# ## Adding State Column
df["State"] = df["City"]
df["State"] = df["City"].replace(
{
"Bangalore": "Karnataka",
"Delhi": "Delhi NCR",
"Mumbai": "Maharashtra",
"Kolkata": "Bengal",
"Hyderabad": "Telangana",
"Ahmedabad": "Gujarat",
"Chennai": "Tamil Nadu",
"Pune": "Maharashtra",
"Jaipur": "Rajasthan",
"Chandigarh": "Punjab",
"Indore": "Madhya Pradesh",
"Gurgaon": "Delhi NCR",
"Noida": "Delhi NCR",
"Vadodara": "Gujarat",
"Lucknow": "Uttar Pradesh",
"Agra": "Uttar Pradesh",
"Nagpur": "Maharashtra",
"Surat": "Gujarat",
"Ludhiana": "Punjab",
"Goa": "Goa",
"Ghaziabad": "Delhi NCR",
"Udaipur": "Rajasthan",
"Kochi": "Kerala",
}
)
df["State"].value_counts()
# **Kochi** has **just two restaurants**.
# ## Removing Kochi
kochi_df = df[df["City"] == "Kochi"]
kochi_df.index
df = df.drop(kochi_df.index)
df["City"].value_counts()
# ## Distribution of restaraunt ratings, cost and votes in India
fig = ff.create_distplot([df.Rating], ["Rating"], bin_size=0.1)
fig.update_layout(
title_text="Distribution of Restaraunt Ratings",
title_font_color="medium turquoise",
title_x=0.47,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
# https://www.kaggle.com/subinium/basic-of-statistical-viz-plotly-seaborn#Table-View
fig = ff.create_distplot([df.Cost], ["Cost"], bin_size=100)
fig.update_layout(
title_text="Distribution of Restaraunt Cost",
title_font_color="medium turquoise",
title_x=0.5,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
fig = ff.create_distplot([df.Votes], ["Votes"], bin_size=200)
fig.update_layout(
title_text="Distribution of Restaraunt Votes",
title_font_color="medium turquoise",
title_x=0.5,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
# The above distribution **do not** provide analysis in **terms of states or cities**. The region-wise restaraunt performance is evaluated in following sections.
# # Question #1: How are restaurants distributed across India?
# Forming dataframes in term of cities and state
city_restnts = df.groupby("City").sum()
state_restnt = df.groupby("State").sum()
# List of states
restnt_state = df["State"].value_counts()
restnt_state
fig = px.bar(x=restnt_state.index, y=restnt_state)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="States",
yaxis_title="Total Restaurants",
title_text="Restaraunt Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
restnt_city = df["City"].value_counts().sort_values(ascending=True)
fig = px.bar(
y=restnt_city.index,
x=restnt_city,
color=restnt_city,
orientation="h",
labels={"color": "Total" + "<br>" + "Restaurants"},
) # color continuous scale
fig.update_layout(
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Restaraunt Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #2: How are average ratings distributed across India?
df.head()
# ## #2.1 State-Wise Distribution
# Forming state-wise dataframe
df_state = df.groupby("State").mean()
df_state.reset_index(level=0, inplace=True)
df_state
fig = px.bar(df_state, x="State", y="Rating")
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="States",
yaxis_title="Average Rating",
title_text="Rating Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# The bar graph shows that rating variation is small for different states.
# ### Comparing Ratings with Polar Bar Plot
labels = df_state["State"]
x1 = df_state["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.5, 4.25], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
title_text="Comparison of Ratings Across States",
title_x=0.45,
font=dict(
family="Courier New, monospace",
size=12,
),
)
fig.show()
# ## #2.2 City-Wise Distribution
df_city = df.groupby("City").mean()
df_city.reset_index(level=0, inplace=True)
df_city
fig = px.bar(df_city, x="City", y="Rating")
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cities",
yaxis_title="Average Rating",
title_text="Rating Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# The bar graph shows that rating variation is small for different cities.
# ### Comparing Ratings with Polar Bar Plot
labels = df_city["City"]
x1 = df_city["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.5, 4.33], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Comparison of Ratings Across Cities",
title_x=0.47,
font=dict(
family="Courier New, monospace",
size=12,
# color='rgb(12, 128, 128)'
),
)
fig.show()
# The bar graph shows that rating variation is small for different cities.
# # Question #3: How is cost distributed across India?
# ## #3.1 State-wise Distribution
df_state
# Cost distribution across states
df_state.sort_values(by=["Cost"], inplace=True)
fig = px.bar(
df_state,
x="Cost",
y="State",
color="Cost",
orientation="h",
labels={"Cost": "Average" + "<br>" + "Cost"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Cost",
title_text="Average Cost Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #3.2 City-wise Distribution
df_city
# Cost distribution across cities
df_city.sort_values(by=["Cost"], inplace=True)
df_city
fig = px.bar(
df_city,
x="Cost",
y="City",
color="Cost",
orientation="h",
labels={"Cost": "Average" + "<br>" + "Cost"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Cost",
title_text="Average Cost Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #4: How are votes distributed across India?
# ## #4.1 State-wise Distribution
df_state
# Cost distribution across states
df_state.sort_values(by=["Votes"], inplace=True)
fig = px.bar(
df_state,
x="Votes",
y="State",
color="Votes",
orientation="h",
labels={"Votes": "Average" + "<br>" + "Votes"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Votes",
title_text="Votes Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #4.2 City-wise Distribution
# Votes distribution across cities
df_city.sort_values(by=["Votes"], inplace=True)
fig = px.bar(
df_city,
x="Votes",
y="City",
color="Votes",
orientation="h",
labels={"Votes": "Average" + "<br>" + "Votes"},
)
fig.update_layout(
yaxis_title="Cities",
xaxis_title="Average Votes",
title_text="Votes Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #5: How is the overall performance of restaurants across different states?
# ## #5.1 Adding Attributes to State Dataframe
restnt_state
# Extracting total restaurants in each state and forming its dataframe
a = restnt_state.index
b = restnt_state
df_state_restnts = pd.DataFrame(list(zip(a, b)))
df_state_restnts.columns = ["State", "Total Restaurants"]
df_state_restnts = df_state_restnts.set_index("State")
display(df_state_restnts)
df_state
# Taking State column in dataframe as index
df_state = df_state.set_index("State")
df_state
# Matching indices of df_state_restnts with df_state
df_state_restnts.reindex(df_state.index)
# Adding total restaurants column to state dataframe
df_state["Total Restaurants"] = df_state_restnts["Total Restaurants"]
df_state
# Normalizing columns with integer values
df_state_normalized = df_state.copy()
columns = ["Rating", "Votes", "Cost", "Total Restaurants"]
# apply normalization techniques
for column in columns:
df_state_normalized[column] = (
df_state_normalized[column] / df_state_normalized[column].abs().max()
)
# view normalized data
df_state_normalized.reset_index(level=0, inplace=True)
display(df_state_normalized)
# ## #5.2 Comparing Attributes of all States
# Comparing attributes of all states using polar scatter plots
fig = make_subplots(
rows=6, cols=2, specs=[[{"type": "polar"}] * 2] * 6, column_widths=[0.45, 0.45]
)
for index, state in enumerate(df_state_normalized["State"]):
if index % 2 == 0:
row = int((index + 2) / 2)
col = 1
else:
row = int((index + 1) / 2)
col = 2
fig.add_trace(
go.Scatterpolar(
name=df_state_normalized["State"][index],
r=[
df_state_normalized["Rating"][index],
df_state_normalized["Votes"][index],
df_state_normalized["Cost"][index],
df_state_normalized["Total Restaurants"][index],
],
theta=["Rating", "Votes", "Cost", "Total Restaurants"],
fill="toself",
),
row,
col,
)
fig.update_layout(
height=2000,
width=900,
title_text="Comparison of Restaurants in Different States of India",
title_x=0.5,
title_font_color="#4B0082",
)
fig.show()
#
# # Question #6: What are top cuisines in India?
# ## #6.1 Forming Cuisines Dataframe
df.head()
cuisines = df["Cuisine"].str.split(",").explode().unique().tolist()
# Forming cuisine dataframe
data = []
df_filtered = pd.DataFrame()
columns = ["Cuisine", "Total Restaurants", "Rating"]
df_cuisine = pd.DataFrame(columns=columns)
for cuisine in cuisines:
df["Cuisine Verification"] = (
df["Cuisine"].str.contains(cuisine, case=False, na=False).astype(int)
)
df_filtered = df[df["Cuisine Verification"] == 1]
total_restnt = len(df_filtered.index)
df = df.drop(["Cuisine Verification"], axis=1)
avg_rating = df_filtered["Rating"].sum() / total_restnt
df_cuisine = df_cuisine.append(
{
"Cuisine": cuisine,
"Total Restaurants": total_restnt,
"Rating": avg_rating,
},
ignore_index=True,
)
df_cuisine.head(15)
df_cuisine.shape
# ## #6.2 Identifying Top Cuisines
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Cuisine Distribution Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# Many cuisines are served in very few restaurants.
# ### Filtering cuisines
# Taking cuisines that are atleast served in over 300 restaurants
df_cuisine = df_cuisine[df_cuisine["Total Restaurants"] > 300]
df_cuisine.shape
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Distribution of Top Cuisines Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# * Cuisines dataframe consists of duplicate values.
# * Multi-Cuisine is not a valid category.
# Printing some duplicate categories
df_cuisine.Cuisine[0], df_cuisine.Cuisine[13], df_cuisine.Cuisine[
0
], df_cuisine.Cuisine[64]
# Double spacing before text is resulting in **dupicates**.
# Reseting index and removing double space
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine.Cuisine = df_cuisine.Cuisine.str.replace(" ", "")
# Verifying double space removal
df_cuisine.Cuisine[5], df_cuisine.Cuisine[13], df_cuisine.Cuisine[
0
], df_cuisine.Cuisine[3]
# Identifying with duplicate values
duplicate_cuisine = df_cuisine.duplicated(subset=["Cuisine"])
duplicate_cuisines = []
duplicate_cuisines = df_cuisine.loc[duplicate_cuisine]["Cuisine"]
duplicate_cuisines
duplicate_indices = []
# Identifying indices dulplicate cuisines
duplicate_bool = []
count = 0
for index, cuisine in enumerate(duplicate_cuisines):
duplicate_bool = df_cuisine["Cuisine"].str.find(cuisine)
for index, value in enumerate(duplicate_bool):
if value == 0:
duplicate_indices.append(index)
duplicate_indices
# Removing duplicate indices and updating attributes
i = 0
for index in duplicate_indices:
if (i) % 2 == 0:
count = 0
# Updating attributes in first duplicate index (or Original Index)
total_restnt_1 = df_cuisine["Total Restaurants"][index]
avg_rating_1 = df_cuisine["Rating"][index]
else:
count = 2
total_restnt_2 = df_cuisine["Total Restaurants"][index]
avg_rating_2 = df_cuisine["Rating"][index]
i += 1
if count == 2:
df_cuisine["Total Restaurants"][(index - 1)] = total_restnt_1 + total_restnt_2
df_cuisine["Rating"][(index - 1)] = (
(total_restnt_1 * avg_rating_1) + (total_restnt_2 * avg_rating_2)
) / (total_restnt_1 + total_restnt_2)
# Removing second duplicate index
df_cuisine = df_cuisine.drop(index)
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine
# Dropping Multi-cuisine
df_cuisine = df_cuisine.drop(index=6)
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine
# Plotting cuisine with total restaurants
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Cuisine Distribution Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #6.3 Plotting Cuisines with Ratings
# Plotting rating with cuisines
fig = go.Figure(
data=[
go.Bar(name="Rating", x=df_cuisine["Cuisine"], y=df_cuisine["Rating"]),
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Average Rating",
title_text="Rating Distribution of Top Cuisines",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# Analysing with polar plot
labels = df_cuisine["Cuisine"]
x1 = df_cuisine["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.8, 4.25], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Comparison of Ratings of Different Cuisines",
title_x=0.46,
font=dict(
family="Courier New, monospace",
size=12,
# color='rgb(12, 128, 128)'
),
)
fig.show()
#
# # Question #7: How are the cuisines distributed among states?
df.head()
# x = df[df['Cuisine'] == 'Multi-Cuisine']
# x.head()
# state_cuisines
# ## #7.1 Declaring Function for Obtaining Cuisine Information
df_state = pd.DataFrame()
# # Removing Multi-Cuisine
# df = df['Multi-Cuisine']
def cuisine_info(state):
state_cuisines_clean = []
# Forming state dataframe
filter = df["State"] == state
df_state = df[filter].copy()
# Filtering cuisines
state_cuisines = df_state["Cuisine"].str.split(",").explode().unique().tolist()
# Removing 'Multi-Cuisine' category from cuisines
a = "Multi-Cuisine"
b = " Multi-Cuisine"
if a in state_cuisines:
state_cuisines.remove("Multi-Cuisine")
if b in state_cuisines:
state_cuisines.remove(" Multi-Cuisine")
for word in state_cuisines:
word = word.replace(" ", "")
state_cuisines_clean.append(word)
# Removing duplicates from cuisines list
state_cuisines_clean = np.unique(state_cuisines_clean)
state_cuisines_clean
# Forming state cuisine dataframe
df_filtered = pd.DataFrame()
df_cuisine_state = pd.DataFrame()
# Forming cuisine df for state
for cuisine in state_cuisines_clean:
df_state["Cuisine Verification"] = (
df_state["Cuisine"].str.contains(cuisine, case=False, na=False).astype(int)
)
df_filtered = df_state[df_state["Cuisine Verification"] == 1]
total_restnt = len(df_filtered.index)
total_votes = len(df_filtered.index)
df_state = df_state.drop(["Cuisine Verification"], axis=1)
avg_rating = df_filtered["Rating"].sum() / total_restnt
df_cuisine_state = df_cuisine_state.append(
{
"Cuisine": cuisine,
"Total Restaurants": total_restnt,
"Total Votes": total_votes,
"Rating": avg_rating,
},
ignore_index=True,
)
return df_cuisine_state
# ## #7.2 Forming Individual Cuisine Dataframes for all States
# Maharashtra cuisine dataframe
cuisine_maharashtra = cuisine_info("Maharashtra")
# Filtering top cusines
top_cuisine_maharashtra = cuisine_maharashtra[
cuisine_maharashtra["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_maharashtra.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_maharashtra.reset_index(inplace=True, drop=True)
top_cuisine_maharashtra["Total Votes"] = (
top_cuisine_maharashtra["Total Votes"].astype("str") + " votes"
)
# Delhi NCR cuisine dataframe
cuisine_delhi = cuisine_info("Delhi NCR")
# Filtering top cusines
top_cuisine_delhi = cuisine_delhi[cuisine_delhi["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_delhi.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_delhi.reset_index(inplace=True, drop=True)
top_cuisine_delhi["Total Votes"] = (
top_cuisine_delhi["Total Votes"].astype("str") + " votes"
)
# Karnataka NCR cuisine dataframe
cuisine_karnataka = cuisine_info("Karnataka")
# Filtering top cusines
top_cuisine_karnataka = cuisine_karnataka[
cuisine_karnataka["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_karnataka.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_karnataka.reset_index(inplace=True, drop=True)
top_cuisine_karnataka["Total Votes"] = (
top_cuisine_karnataka["Total Votes"].astype("str") + " votes"
)
# Bengal cuisine dataframe
cuisine_bengal = cuisine_info("Bengal")
# Filtering top cusines
top_cuisine_bengal = cuisine_bengal[cuisine_bengal["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_bengal.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_bengal.reset_index(inplace=True, drop=True)
top_cuisine_bengal["Total Votes"] = (
top_cuisine_bengal["Total Votes"].astype("str") + " votes"
)
# Telangana cuisine dataframe
cuisine_telangana = cuisine_info("Telangana")
# Filtering top cusines
top_cuisine_telangana = cuisine_telangana[
cuisine_telangana["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_telangana.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_telangana.reset_index(inplace=True, drop=True)
top_cuisine_telangana["Total Votes"] = (
top_cuisine_telangana["Total Votes"].astype("str") + " votes"
)
# Gujarat cuisine dataframe
cuisine_gujarat = cuisine_info("Gujarat")
# Filtering top cusines
top_cuisine_gujarat = cuisine_gujarat[cuisine_gujarat["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_gujarat.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_gujarat.reset_index(inplace=True, drop=True)
top_cuisine_gujarat["Total Votes"] = (
top_cuisine_gujarat["Total Votes"].astype("str") + " votes"
)
# Tamil Nadu cuisine dataframe
cuisine_tamil = cuisine_info("Tamil Nadu")
# Filtering top cusines
top_cuisine_tamil = cuisine_tamil[cuisine_tamil["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_tamil.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_tamil.reset_index(inplace=True, drop=True)
top_cuisine_tamil["Total Votes"] = (
top_cuisine_tamil["Total Votes"].astype("str") + " votes"
)
# Punjab cuisine dataframe
cuisine_punjab = cuisine_info("Punjab")
# Filtering top cusines
top_cuisine_punjab = cuisine_punjab[cuisine_punjab["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_punjab.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_punjab.reset_index(inplace=True, drop=True)
top_cuisine_punjab["Total Votes"] = (
top_cuisine_punjab["Total Votes"].astype("str") + " votes"
)
# Rajasthan cuisine dataframe
cuisine_rajasthan = cuisine_info("Rajasthan")
# Filtering top cusines
top_cuisine_rajasthan = cuisine_rajasthan[
cuisine_rajasthan["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_rajasthan.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_rajasthan.reset_index(inplace=True, drop=True)
top_cuisine_rajasthan["Total Votes"] = (
top_cuisine_rajasthan["Total Votes"].astype("str") + " votes"
)
# Madhya Pradesh cuisine dataframe
cuisine_madhya = cuisine_info("Madhya Pradesh")
# Filtering top cusines
top_cuisine_madhya = cuisine_madhya[cuisine_madhya["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_madhya.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_madhya.reset_index(inplace=True, drop=True)
top_cuisine_madhya["Total Votes"] = (
top_cuisine_madhya["Total Votes"].astype("str") + " votes"
)
# Uttar Pradesh cuisine dataframe
cuisine_uttar = cuisine_info("Uttar Pradesh")
# Filtering top cusines
top_cuisine_uttar = cuisine_uttar[cuisine_uttar["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_uttar.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_uttar.reset_index(inplace=True, drop=True)
top_cuisine_uttar["Total Votes"] = (
top_cuisine_uttar["Total Votes"].astype("str") + " votes"
)
# # Goa cuisine dataframe
# cuisine_goa = pd.DataFrame()
# cuisine_goa = cuisine_info('Goa')
# cuisine_goa[cuisine_goa['Total Restaurants']>50].head(25)
# ## #7.2 Printing State-wise Cuisine Table
# Plotting Maharashtra cuisines
top_cuisine_maharashtra["State"] = "Maharashtra"
fig = px.treemap(
top_cuisine_maharashtra,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Favourite Cuisines in Maharshtra",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Delhi cuisines
top_cuisine_delhi["State"] = "Delhi"
fig = px.treemap(
top_cuisine_delhi,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Delhi", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Karnataka cuisines
top_cuisine_karnataka["State"] = "Karnataka"
fig = px.treemap(
top_cuisine_karnataka,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Karnataka",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Bengal cuisines
top_cuisine_bengal["State"] = "Bengal"
fig = px.treemap(
top_cuisine_bengal,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Bengal", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Telangana cuisines
top_cuisine_telangana["State"] = "Telangana"
fig = px.treemap(
top_cuisine_telangana,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Telangana",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Gujarat cuisines
top_cuisine_gujarat["State"] = "Gujarat"
fig = px.treemap(
top_cuisine_gujarat,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Gujarat", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Tamil Nadu cuisines
top_cuisine_tamil["State"] = "Tamil Nadu"
fig = px.treemap(
top_cuisine_tamil,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Tamil Nadu",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting punjab cuisines
top_cuisine_punjab["State"] = "Punjab"
fig = px.treemap(
top_cuisine_punjab,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Punjab",
title_font_color="#4B0082",
# title_font_family = 'Times New Roman',
title_x=0.5,
)
fig.show()
# Plotting Rajasthan cuisines
top_cuisine_rajasthan["State"] = "Rajasthan"
fig = px.treemap(
top_cuisine_rajasthan,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Rajasthan",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Madhya Pradesh cuisines
top_cuisine_madhya["State"] = "Madhya Pradesh"
fig = px.treemap(
top_cuisine_madhya,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Madhya Pradesh",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Uttar Pradesh cuisines
top_cuisine_uttar["State"] = "Uttar Pradesh"
fig = px.treemap(
top_cuisine_uttar,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Uttar Pradesh",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
frames = [
top_cuisine_uttar,
top_cuisine_madhya,
top_cuisine_rajasthan,
top_cuisine_punjab,
top_cuisine_tamil,
top_cuisine_gujarat,
top_cuisine_telangana,
top_cuisine_bengal,
top_cuisine_karnataka,
top_cuisine_delhi,
top_cuisine_maharashtra,
]
top_cuisine_india = pd.concat(frames)
display(top_cuisine_india)
# ## #7.3 Plotting Consolidated Cuisine Table for India
top_cuisine_india["Country"] = "India"
fig = px.treemap(
top_cuisine_india,
path=["Country", "State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="State-wise Favourite Cuisines in India",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
#
# # Question #8: What are top restaurant locations in Maharashtra, Delhi and Karnataka?
df.head()
# ## #8.1 Forming Individual Dataframes for all States
df_maharashtra = df[df["State"] == "Maharashtra"]
df_delhi = df[df["State"] == "Delhi NCR"]
df_karnataka = df[df["State"] == "Karnataka"]
df_maharashtra
# ## #8.2 Defining Function to Return Votes in a Locality
def total_votes(locality):
df_x = df[df["Locality"] == locality]
total_votes = df_x["Votes"].sum()
return total_votes
# ## #8.3 Obtaining Votes for all Localities in Maharashtra
# List of all localities
maharashtra_locations = df_maharashtra["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(maharashtra_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
maharashtra_location_counts = df_maharashtra["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(
zip(maharashtra_locations, maharashtra_location_counts, total_votes_list)
)
maharashtra_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
maharashtra_locations_df
# ## #8.4 Adding Attributes to the Localities Dataframe
df_statedf_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(maharashtra_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
maharashtra_locations_df["Rating"] = location_rating_list
maharashtra_locations_df["Cost"] = location_cost_list
top_locations_maharashtra = maharashtra_locations_df[
maharashtra_locations_df["Total Votes"] > 150
]
top_locations_maharashtra["Total Votes"] = (
top_locations_maharashtra["Total Votes"].astype("str") + " votes"
)
# ## #8.5 Similarly Obtaining Dataframes for Delhi and Karnataka
# ### Obtaining Dataframe for Delhi
# Obtaining total votes for all localities in Delhi
# List of all localities
delhi_locations = df_delhi["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(delhi_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
delhi_location_counts = df_delhi["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(zip(delhi_locations, delhi_location_counts, total_votes_list))
delhi_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
delhi_locations_df
# Adding attributes to the localities dataframe
df_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(delhi_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
delhi_locations_df["Rating"] = location_rating_list
delhi_locations_df["Cost"] = location_cost_list
delhi_locations_df.head(20)
top_locations_delhi = delhi_locations_df[delhi_locations_df["Total Votes"] > 150]
top_locations_delhi["Total Votes"] = (
top_locations_delhi["Total Votes"].astype("str") + " votes"
)
# ### Obtaining dataframe for Karnataka
# Obtaining total votes for all localities in Delhi
# List of all localities
karnataka_locations = df_karnataka["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(karnataka_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
karnataka_location_counts = df_karnataka["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(
zip(karnataka_locations, karnataka_location_counts, total_votes_list)
)
karnataka_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
karnataka_locations_df
# Adding attributes to the localities dataframe
df_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(karnataka_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
karnataka_locations_df["Rating"] = location_rating_list
karnataka_locations_df["Cost"] = location_cost_list
# karnataka_locations_df.head(20)
top_locations_karnataka = karnataka_locations_df[
karnataka_locations_df["Total Votes"] > 150
]
top_locations_karnataka["Total Votes"] = (
top_locations_karnataka["Total Votes"].astype("str") + " votes"
)
top_locations_karnataka.head()
# ## #8.6 Plotting Treemaps
fig = px.treemap(
top_locations_maharashtra,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Maharashtra",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
fig = px.treemap(
top_locations_delhi,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Delhi",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
fig = px.treemap(
top_locations_karnataka,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Karnataka",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534242.ipynb
|
dineout-restaurants-in-india
|
vikram92
|
[{"Id": 69534242, "ScriptId": 18985609, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4807161, "CreationDate": "08/01/2021 08:06:43", "VersionNumber": 2.0, "Title": "EDA for Dineout Restaurants in India", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 1175.0, "LinesInsertedFromPrevious": 23.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1152.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92830310, "KernelVersionId": 69534242, "SourceDatasetVersionId": 2430235}]
|
[{"Id": 2430235, "DatasetId": 1470629, "DatasourceVersionId": 2472465, "CreatorUserId": 4807161, "LicenseName": "Unknown", "CreationDate": "07/16/2021 07:24:22", "VersionNumber": 1.0, "Title": "Dineout Restaurants in India", "Slug": "dineout-restaurants-in-india", "Subtitle": "Record of all dineout restaurants in India", "Description": "### Context\n\nWith smaller staff, capital and costing requirements, the restaurant business is on a consistent rise. Identifying a successful restaurant demands more than customer ratings. The dataset is a record of ratings, cost and location of all Dineout restaurants in India. \n\n\n### Content\n\n**Name:** Restaurant name\n\n**Location:** Complete location of the restaurant. It consists of locality and city.\n\n**Locality:** Locality of the restaurant within the city.\n\n**City:** City of the restaurant.\n\n**Cuisine:** Types of cuisines offered by the restaurant.\n\n**Rating:** Rating of the restaurant given by a number of users.\n\n**Votes:** Number of customers rating the restaurant.\n\n**Cost:** Approximate cost for two people.\n\n\n### Inspiration\n\nHow customers attract?\nEvery restaurant has a different story. Success of a restaurant depend on the location, cuisines, cost or ratings. Number of votes determine the number of customer visits and rating signifies the restaurant liking. An analysis on dataset can reveal the customer behaviour.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1470629, "CreatorUserId": 4807161, "OwnerUserId": 4807161.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2430235.0, "CurrentDatasourceVersionId": 2472465.0, "ForumId": 1490270, "Type": 2, "CreationDate": "07/16/2021 07:24:22", "LastActivityDate": "07/16/2021", "TotalViews": 2220, "TotalDownloads": 91, "TotalVotes": 3, "TotalKernels": 1}]
|
[{"Id": 4807161, "UserName": "vikram92", "DisplayName": "Vikram Nayyar", "RegisterDate": "04/04/2020", "PerformanceTier": 1}]
|
# ## Introduction
# #### The notebook surveys and analyzes Dineout restaurants in Indian market. To determine regional performance and customer behaviour; region based analysis is perfomed.
# #### The project evaluates Indian restaurants on the basis of multiple attributes. This abstract key elements that are essential for restaurants to own beneficial position in the aggressive market. A comparison among different regions is also obtained to highlight scope of improvement.
# ## Table of Contents
# * [1. Analyzing Dataframe](#1)
# * [2. How are restaurants distributed across India?](#2)
# * [3. How are average ratings distributed across India?](#3)
# * [4. How is cost distributed across India?](#4)
# * [5. How are votes distributed across India?](#5)
# * [6. How is the overall performance of restaurants across different states?](#6)
# * [7. What are top cuisines in India?](#7)
# * [8. How are the cuisines distributed among states?](#8)
# * [9. What are top restaurant locations in Maharashtra, Delhi and Karnataka?](#9)
# * [10. References](#10)
# # Libraries
import pandas as pd
import numpy as np
import plotly.figure_factory as ff
from plotly.offline import iplot
import plotly.express as px
from plotly.subplots import make_subplots
import plotly.graph_objects as go
#
# # Analyzing Dataframe
# Reading dataframe
df = pd.read_csv("../input/dineout-restaurants-in-india/dineout_restaurants.csv")
df.head()
# Evaluating dataframe
print("* Size of dataframe: {}\n".format(df.shape))
print("* Datatype of columns are:\n {}\n".format(df.info()))
df.describe()
df["City"].value_counts()
# **Cities** can be categorized in terms of **State**.
# ## Adding State Column
df["State"] = df["City"]
df["State"] = df["City"].replace(
{
"Bangalore": "Karnataka",
"Delhi": "Delhi NCR",
"Mumbai": "Maharashtra",
"Kolkata": "Bengal",
"Hyderabad": "Telangana",
"Ahmedabad": "Gujarat",
"Chennai": "Tamil Nadu",
"Pune": "Maharashtra",
"Jaipur": "Rajasthan",
"Chandigarh": "Punjab",
"Indore": "Madhya Pradesh",
"Gurgaon": "Delhi NCR",
"Noida": "Delhi NCR",
"Vadodara": "Gujarat",
"Lucknow": "Uttar Pradesh",
"Agra": "Uttar Pradesh",
"Nagpur": "Maharashtra",
"Surat": "Gujarat",
"Ludhiana": "Punjab",
"Goa": "Goa",
"Ghaziabad": "Delhi NCR",
"Udaipur": "Rajasthan",
"Kochi": "Kerala",
}
)
df["State"].value_counts()
# **Kochi** has **just two restaurants**.
# ## Removing Kochi
kochi_df = df[df["City"] == "Kochi"]
kochi_df.index
df = df.drop(kochi_df.index)
df["City"].value_counts()
# ## Distribution of restaraunt ratings, cost and votes in India
fig = ff.create_distplot([df.Rating], ["Rating"], bin_size=0.1)
fig.update_layout(
title_text="Distribution of Restaraunt Ratings",
title_font_color="medium turquoise",
title_x=0.47,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
# https://www.kaggle.com/subinium/basic-of-statistical-viz-plotly-seaborn#Table-View
fig = ff.create_distplot([df.Cost], ["Cost"], bin_size=100)
fig.update_layout(
title_text="Distribution of Restaraunt Cost",
title_font_color="medium turquoise",
title_x=0.5,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
fig = ff.create_distplot([df.Votes], ["Votes"], bin_size=200)
fig.update_layout(
title_text="Distribution of Restaraunt Votes",
title_font_color="medium turquoise",
title_x=0.5,
font_family="San Serif",
titlefont={"size": 20},
)
iplot(fig, filename="Basic Distplot")
# The above distribution **do not** provide analysis in **terms of states or cities**. The region-wise restaraunt performance is evaluated in following sections.
# # Question #1: How are restaurants distributed across India?
# Forming dataframes in term of cities and state
city_restnts = df.groupby("City").sum()
state_restnt = df.groupby("State").sum()
# List of states
restnt_state = df["State"].value_counts()
restnt_state
fig = px.bar(x=restnt_state.index, y=restnt_state)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="States",
yaxis_title="Total Restaurants",
title_text="Restaraunt Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
restnt_city = df["City"].value_counts().sort_values(ascending=True)
fig = px.bar(
y=restnt_city.index,
x=restnt_city,
color=restnt_city,
orientation="h",
labels={"color": "Total" + "<br>" + "Restaurants"},
) # color continuous scale
fig.update_layout(
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Restaraunt Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #2: How are average ratings distributed across India?
df.head()
# ## #2.1 State-Wise Distribution
# Forming state-wise dataframe
df_state = df.groupby("State").mean()
df_state.reset_index(level=0, inplace=True)
df_state
fig = px.bar(df_state, x="State", y="Rating")
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="States",
yaxis_title="Average Rating",
title_text="Rating Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# The bar graph shows that rating variation is small for different states.
# ### Comparing Ratings with Polar Bar Plot
labels = df_state["State"]
x1 = df_state["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.5, 4.25], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
title_text="Comparison of Ratings Across States",
title_x=0.45,
font=dict(
family="Courier New, monospace",
size=12,
),
)
fig.show()
# ## #2.2 City-Wise Distribution
df_city = df.groupby("City").mean()
df_city.reset_index(level=0, inplace=True)
df_city
fig = px.bar(df_city, x="City", y="Rating")
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cities",
yaxis_title="Average Rating",
title_text="Rating Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# The bar graph shows that rating variation is small for different cities.
# ### Comparing Ratings with Polar Bar Plot
labels = df_city["City"]
x1 = df_city["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.5, 4.33], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Comparison of Ratings Across Cities",
title_x=0.47,
font=dict(
family="Courier New, monospace",
size=12,
# color='rgb(12, 128, 128)'
),
)
fig.show()
# The bar graph shows that rating variation is small for different cities.
# # Question #3: How is cost distributed across India?
# ## #3.1 State-wise Distribution
df_state
# Cost distribution across states
df_state.sort_values(by=["Cost"], inplace=True)
fig = px.bar(
df_state,
x="Cost",
y="State",
color="Cost",
orientation="h",
labels={"Cost": "Average" + "<br>" + "Cost"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Cost",
title_text="Average Cost Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #3.2 City-wise Distribution
df_city
# Cost distribution across cities
df_city.sort_values(by=["Cost"], inplace=True)
df_city
fig = px.bar(
df_city,
x="Cost",
y="City",
color="Cost",
orientation="h",
labels={"Cost": "Average" + "<br>" + "Cost"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Cost",
title_text="Average Cost Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #4: How are votes distributed across India?
# ## #4.1 State-wise Distribution
df_state
# Cost distribution across states
df_state.sort_values(by=["Votes"], inplace=True)
fig = px.bar(
df_state,
x="Votes",
y="State",
color="Votes",
orientation="h",
labels={"Votes": "Average" + "<br>" + "Votes"},
)
fig.update_layout(
yaxis_title="States",
xaxis_title="Average Votes",
title_text="Votes Distribution Across States",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #4.2 City-wise Distribution
# Votes distribution across cities
df_city.sort_values(by=["Votes"], inplace=True)
fig = px.bar(
df_city,
x="Votes",
y="City",
color="Votes",
orientation="h",
labels={"Votes": "Average" + "<br>" + "Votes"},
)
fig.update_layout(
yaxis_title="Cities",
xaxis_title="Average Votes",
title_text="Votes Distribution Across Cities",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
#
# # Question #5: How is the overall performance of restaurants across different states?
# ## #5.1 Adding Attributes to State Dataframe
restnt_state
# Extracting total restaurants in each state and forming its dataframe
a = restnt_state.index
b = restnt_state
df_state_restnts = pd.DataFrame(list(zip(a, b)))
df_state_restnts.columns = ["State", "Total Restaurants"]
df_state_restnts = df_state_restnts.set_index("State")
display(df_state_restnts)
df_state
# Taking State column in dataframe as index
df_state = df_state.set_index("State")
df_state
# Matching indices of df_state_restnts with df_state
df_state_restnts.reindex(df_state.index)
# Adding total restaurants column to state dataframe
df_state["Total Restaurants"] = df_state_restnts["Total Restaurants"]
df_state
# Normalizing columns with integer values
df_state_normalized = df_state.copy()
columns = ["Rating", "Votes", "Cost", "Total Restaurants"]
# apply normalization techniques
for column in columns:
df_state_normalized[column] = (
df_state_normalized[column] / df_state_normalized[column].abs().max()
)
# view normalized data
df_state_normalized.reset_index(level=0, inplace=True)
display(df_state_normalized)
# ## #5.2 Comparing Attributes of all States
# Comparing attributes of all states using polar scatter plots
fig = make_subplots(
rows=6, cols=2, specs=[[{"type": "polar"}] * 2] * 6, column_widths=[0.45, 0.45]
)
for index, state in enumerate(df_state_normalized["State"]):
if index % 2 == 0:
row = int((index + 2) / 2)
col = 1
else:
row = int((index + 1) / 2)
col = 2
fig.add_trace(
go.Scatterpolar(
name=df_state_normalized["State"][index],
r=[
df_state_normalized["Rating"][index],
df_state_normalized["Votes"][index],
df_state_normalized["Cost"][index],
df_state_normalized["Total Restaurants"][index],
],
theta=["Rating", "Votes", "Cost", "Total Restaurants"],
fill="toself",
),
row,
col,
)
fig.update_layout(
height=2000,
width=900,
title_text="Comparison of Restaurants in Different States of India",
title_x=0.5,
title_font_color="#4B0082",
)
fig.show()
#
# # Question #6: What are top cuisines in India?
# ## #6.1 Forming Cuisines Dataframe
df.head()
cuisines = df["Cuisine"].str.split(",").explode().unique().tolist()
# Forming cuisine dataframe
data = []
df_filtered = pd.DataFrame()
columns = ["Cuisine", "Total Restaurants", "Rating"]
df_cuisine = pd.DataFrame(columns=columns)
for cuisine in cuisines:
df["Cuisine Verification"] = (
df["Cuisine"].str.contains(cuisine, case=False, na=False).astype(int)
)
df_filtered = df[df["Cuisine Verification"] == 1]
total_restnt = len(df_filtered.index)
df = df.drop(["Cuisine Verification"], axis=1)
avg_rating = df_filtered["Rating"].sum() / total_restnt
df_cuisine = df_cuisine.append(
{
"Cuisine": cuisine,
"Total Restaurants": total_restnt,
"Rating": avg_rating,
},
ignore_index=True,
)
df_cuisine.head(15)
df_cuisine.shape
# ## #6.2 Identifying Top Cuisines
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Cuisine Distribution Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# Many cuisines are served in very few restaurants.
# ### Filtering cuisines
# Taking cuisines that are atleast served in over 300 restaurants
df_cuisine = df_cuisine[df_cuisine["Total Restaurants"] > 300]
df_cuisine.shape
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Distribution of Top Cuisines Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# * Cuisines dataframe consists of duplicate values.
# * Multi-Cuisine is not a valid category.
# Printing some duplicate categories
df_cuisine.Cuisine[0], df_cuisine.Cuisine[13], df_cuisine.Cuisine[
0
], df_cuisine.Cuisine[64]
# Double spacing before text is resulting in **dupicates**.
# Reseting index and removing double space
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine.Cuisine = df_cuisine.Cuisine.str.replace(" ", "")
# Verifying double space removal
df_cuisine.Cuisine[5], df_cuisine.Cuisine[13], df_cuisine.Cuisine[
0
], df_cuisine.Cuisine[3]
# Identifying with duplicate values
duplicate_cuisine = df_cuisine.duplicated(subset=["Cuisine"])
duplicate_cuisines = []
duplicate_cuisines = df_cuisine.loc[duplicate_cuisine]["Cuisine"]
duplicate_cuisines
duplicate_indices = []
# Identifying indices dulplicate cuisines
duplicate_bool = []
count = 0
for index, cuisine in enumerate(duplicate_cuisines):
duplicate_bool = df_cuisine["Cuisine"].str.find(cuisine)
for index, value in enumerate(duplicate_bool):
if value == 0:
duplicate_indices.append(index)
duplicate_indices
# Removing duplicate indices and updating attributes
i = 0
for index in duplicate_indices:
if (i) % 2 == 0:
count = 0
# Updating attributes in first duplicate index (or Original Index)
total_restnt_1 = df_cuisine["Total Restaurants"][index]
avg_rating_1 = df_cuisine["Rating"][index]
else:
count = 2
total_restnt_2 = df_cuisine["Total Restaurants"][index]
avg_rating_2 = df_cuisine["Rating"][index]
i += 1
if count == 2:
df_cuisine["Total Restaurants"][(index - 1)] = total_restnt_1 + total_restnt_2
df_cuisine["Rating"][(index - 1)] = (
(total_restnt_1 * avg_rating_1) + (total_restnt_2 * avg_rating_2)
) / (total_restnt_1 + total_restnt_2)
# Removing second duplicate index
df_cuisine = df_cuisine.drop(index)
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine
# Dropping Multi-cuisine
df_cuisine = df_cuisine.drop(index=6)
df_cuisine = df_cuisine.reset_index(drop=True)
df_cuisine
# Plotting cuisine with total restaurants
fig = go.Figure(
data=[
go.Bar(
name="Total Restaurants",
x=df_cuisine["Cuisine"],
y=df_cuisine["Total Restaurants"],
)
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Total Restaurants",
title_text="Cuisine Distribution Across Restaurants",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# ## #6.3 Plotting Cuisines with Ratings
# Plotting rating with cuisines
fig = go.Figure(
data=[
go.Bar(name="Rating", x=df_cuisine["Cuisine"], y=df_cuisine["Rating"]),
]
)
fig.update_traces(marker_color="rgb(12, 128, 128)", opacity=1)
fig.update_layout(
xaxis_title="Cuisines",
yaxis_title="Average Rating",
title_text="Rating Distribution of Top Cuisines",
title_x=0.5,
font=dict(family="Courier New, monospace", size=12, color="rgb(12, 128, 128)"),
)
fig.show()
# Analysing with polar plot
labels = df_cuisine["Cuisine"]
x1 = df_cuisine["Rating"]
num_slices = len(x1)
theta = [(i + 1.5) * 360 / num_slices for i in range(num_slices)]
r = x1
width = [360 / num_slices for _ in range(num_slices)]
barpolar_plots = [
go.Barpolar(r=[r], theta=[t], width=[w], name=n)
for r, t, w, n in zip(r, theta, width, labels)
]
fig = go.Figure(barpolar_plots)
fig.update_layout( # template='ggplot2',
polar=dict(
radialaxis=dict(range=[3.8, 4.25], showticklabels=True),
angularaxis=dict(showticklabels=False, ticks=""),
),
yaxis_title="States",
xaxis_title="Total Restaurants",
title_text="Comparison of Ratings of Different Cuisines",
title_x=0.46,
font=dict(
family="Courier New, monospace",
size=12,
# color='rgb(12, 128, 128)'
),
)
fig.show()
#
# # Question #7: How are the cuisines distributed among states?
df.head()
# x = df[df['Cuisine'] == 'Multi-Cuisine']
# x.head()
# state_cuisines
# ## #7.1 Declaring Function for Obtaining Cuisine Information
df_state = pd.DataFrame()
# # Removing Multi-Cuisine
# df = df['Multi-Cuisine']
def cuisine_info(state):
state_cuisines_clean = []
# Forming state dataframe
filter = df["State"] == state
df_state = df[filter].copy()
# Filtering cuisines
state_cuisines = df_state["Cuisine"].str.split(",").explode().unique().tolist()
# Removing 'Multi-Cuisine' category from cuisines
a = "Multi-Cuisine"
b = " Multi-Cuisine"
if a in state_cuisines:
state_cuisines.remove("Multi-Cuisine")
if b in state_cuisines:
state_cuisines.remove(" Multi-Cuisine")
for word in state_cuisines:
word = word.replace(" ", "")
state_cuisines_clean.append(word)
# Removing duplicates from cuisines list
state_cuisines_clean = np.unique(state_cuisines_clean)
state_cuisines_clean
# Forming state cuisine dataframe
df_filtered = pd.DataFrame()
df_cuisine_state = pd.DataFrame()
# Forming cuisine df for state
for cuisine in state_cuisines_clean:
df_state["Cuisine Verification"] = (
df_state["Cuisine"].str.contains(cuisine, case=False, na=False).astype(int)
)
df_filtered = df_state[df_state["Cuisine Verification"] == 1]
total_restnt = len(df_filtered.index)
total_votes = len(df_filtered.index)
df_state = df_state.drop(["Cuisine Verification"], axis=1)
avg_rating = df_filtered["Rating"].sum() / total_restnt
df_cuisine_state = df_cuisine_state.append(
{
"Cuisine": cuisine,
"Total Restaurants": total_restnt,
"Total Votes": total_votes,
"Rating": avg_rating,
},
ignore_index=True,
)
return df_cuisine_state
# ## #7.2 Forming Individual Cuisine Dataframes for all States
# Maharashtra cuisine dataframe
cuisine_maharashtra = cuisine_info("Maharashtra")
# Filtering top cusines
top_cuisine_maharashtra = cuisine_maharashtra[
cuisine_maharashtra["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_maharashtra.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_maharashtra.reset_index(inplace=True, drop=True)
top_cuisine_maharashtra["Total Votes"] = (
top_cuisine_maharashtra["Total Votes"].astype("str") + " votes"
)
# Delhi NCR cuisine dataframe
cuisine_delhi = cuisine_info("Delhi NCR")
# Filtering top cusines
top_cuisine_delhi = cuisine_delhi[cuisine_delhi["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_delhi.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_delhi.reset_index(inplace=True, drop=True)
top_cuisine_delhi["Total Votes"] = (
top_cuisine_delhi["Total Votes"].astype("str") + " votes"
)
# Karnataka NCR cuisine dataframe
cuisine_karnataka = cuisine_info("Karnataka")
# Filtering top cusines
top_cuisine_karnataka = cuisine_karnataka[
cuisine_karnataka["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_karnataka.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_karnataka.reset_index(inplace=True, drop=True)
top_cuisine_karnataka["Total Votes"] = (
top_cuisine_karnataka["Total Votes"].astype("str") + " votes"
)
# Bengal cuisine dataframe
cuisine_bengal = cuisine_info("Bengal")
# Filtering top cusines
top_cuisine_bengal = cuisine_bengal[cuisine_bengal["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_bengal.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_bengal.reset_index(inplace=True, drop=True)
top_cuisine_bengal["Total Votes"] = (
top_cuisine_bengal["Total Votes"].astype("str") + " votes"
)
# Telangana cuisine dataframe
cuisine_telangana = cuisine_info("Telangana")
# Filtering top cusines
top_cuisine_telangana = cuisine_telangana[
cuisine_telangana["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_telangana.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_telangana.reset_index(inplace=True, drop=True)
top_cuisine_telangana["Total Votes"] = (
top_cuisine_telangana["Total Votes"].astype("str") + " votes"
)
# Gujarat cuisine dataframe
cuisine_gujarat = cuisine_info("Gujarat")
# Filtering top cusines
top_cuisine_gujarat = cuisine_gujarat[cuisine_gujarat["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_gujarat.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_gujarat.reset_index(inplace=True, drop=True)
top_cuisine_gujarat["Total Votes"] = (
top_cuisine_gujarat["Total Votes"].astype("str") + " votes"
)
# Tamil Nadu cuisine dataframe
cuisine_tamil = cuisine_info("Tamil Nadu")
# Filtering top cusines
top_cuisine_tamil = cuisine_tamil[cuisine_tamil["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_tamil.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_tamil.reset_index(inplace=True, drop=True)
top_cuisine_tamil["Total Votes"] = (
top_cuisine_tamil["Total Votes"].astype("str") + " votes"
)
# Punjab cuisine dataframe
cuisine_punjab = cuisine_info("Punjab")
# Filtering top cusines
top_cuisine_punjab = cuisine_punjab[cuisine_punjab["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_punjab.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_punjab.reset_index(inplace=True, drop=True)
top_cuisine_punjab["Total Votes"] = (
top_cuisine_punjab["Total Votes"].astype("str") + " votes"
)
# Rajasthan cuisine dataframe
cuisine_rajasthan = cuisine_info("Rajasthan")
# Filtering top cusines
top_cuisine_rajasthan = cuisine_rajasthan[
cuisine_rajasthan["Total Votes"] > 50
].reset_index(drop=True)
top_cuisine_rajasthan.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_rajasthan.reset_index(inplace=True, drop=True)
top_cuisine_rajasthan["Total Votes"] = (
top_cuisine_rajasthan["Total Votes"].astype("str") + " votes"
)
# Madhya Pradesh cuisine dataframe
cuisine_madhya = cuisine_info("Madhya Pradesh")
# Filtering top cusines
top_cuisine_madhya = cuisine_madhya[cuisine_madhya["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_madhya.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_madhya.reset_index(inplace=True, drop=True)
top_cuisine_madhya["Total Votes"] = (
top_cuisine_madhya["Total Votes"].astype("str") + " votes"
)
# Uttar Pradesh cuisine dataframe
cuisine_uttar = cuisine_info("Uttar Pradesh")
# Filtering top cusines
top_cuisine_uttar = cuisine_uttar[cuisine_uttar["Total Votes"] > 50].reset_index(
drop=True
)
top_cuisine_uttar.sort_values(by="Rating", ascending=False, inplace=True)
top_cuisine_uttar.reset_index(inplace=True, drop=True)
top_cuisine_uttar["Total Votes"] = (
top_cuisine_uttar["Total Votes"].astype("str") + " votes"
)
# # Goa cuisine dataframe
# cuisine_goa = pd.DataFrame()
# cuisine_goa = cuisine_info('Goa')
# cuisine_goa[cuisine_goa['Total Restaurants']>50].head(25)
# ## #7.2 Printing State-wise Cuisine Table
# Plotting Maharashtra cuisines
top_cuisine_maharashtra["State"] = "Maharashtra"
fig = px.treemap(
top_cuisine_maharashtra,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Favourite Cuisines in Maharshtra",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Delhi cuisines
top_cuisine_delhi["State"] = "Delhi"
fig = px.treemap(
top_cuisine_delhi,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Delhi", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Karnataka cuisines
top_cuisine_karnataka["State"] = "Karnataka"
fig = px.treemap(
top_cuisine_karnataka,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Karnataka",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Bengal cuisines
top_cuisine_bengal["State"] = "Bengal"
fig = px.treemap(
top_cuisine_bengal,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Bengal", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Telangana cuisines
top_cuisine_telangana["State"] = "Telangana"
fig = px.treemap(
top_cuisine_telangana,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Telangana",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Gujarat cuisines
top_cuisine_gujarat["State"] = "Gujarat"
fig = px.treemap(
top_cuisine_gujarat,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Gujarat", title_font_color="#4B0082", title_x=0.5
)
fig.show()
# Plotting Tamil Nadu cuisines
top_cuisine_tamil["State"] = "Tamil Nadu"
fig = px.treemap(
top_cuisine_tamil,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Tamil Nadu",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting punjab cuisines
top_cuisine_punjab["State"] = "Punjab"
fig = px.treemap(
top_cuisine_punjab,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Punjab",
title_font_color="#4B0082",
# title_font_family = 'Times New Roman',
title_x=0.5,
)
fig.show()
# Plotting Rajasthan cuisines
top_cuisine_rajasthan["State"] = "Rajasthan"
fig = px.treemap(
top_cuisine_rajasthan,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Rajasthan",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Madhya Pradesh cuisines
top_cuisine_madhya["State"] = "Madhya Pradesh"
fig = px.treemap(
top_cuisine_madhya,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Madhya Pradesh",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
# Plotting Uttar Pradesh cuisines
top_cuisine_uttar["State"] = "Uttar Pradesh"
fig = px.treemap(
top_cuisine_uttar,
path=["State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="Favourite Cuisines in Uttar Pradesh",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
frames = [
top_cuisine_uttar,
top_cuisine_madhya,
top_cuisine_rajasthan,
top_cuisine_punjab,
top_cuisine_tamil,
top_cuisine_gujarat,
top_cuisine_telangana,
top_cuisine_bengal,
top_cuisine_karnataka,
top_cuisine_delhi,
top_cuisine_maharashtra,
]
top_cuisine_india = pd.concat(frames)
display(top_cuisine_india)
# ## #7.3 Plotting Consolidated Cuisine Table for India
top_cuisine_india["Country"] = "India"
fig = px.treemap(
top_cuisine_india,
path=["Country", "State", "Cuisine", "Total Votes"],
values="Rating",
color="Rating",
)
fig.update_layout(
title_text="State-wise Favourite Cuisines in India",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
#
# # Question #8: What are top restaurant locations in Maharashtra, Delhi and Karnataka?
df.head()
# ## #8.1 Forming Individual Dataframes for all States
df_maharashtra = df[df["State"] == "Maharashtra"]
df_delhi = df[df["State"] == "Delhi NCR"]
df_karnataka = df[df["State"] == "Karnataka"]
df_maharashtra
# ## #8.2 Defining Function to Return Votes in a Locality
def total_votes(locality):
df_x = df[df["Locality"] == locality]
total_votes = df_x["Votes"].sum()
return total_votes
# ## #8.3 Obtaining Votes for all Localities in Maharashtra
# List of all localities
maharashtra_locations = df_maharashtra["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(maharashtra_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
maharashtra_location_counts = df_maharashtra["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(
zip(maharashtra_locations, maharashtra_location_counts, total_votes_list)
)
maharashtra_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
maharashtra_locations_df
# ## #8.4 Adding Attributes to the Localities Dataframe
df_statedf_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(maharashtra_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
maharashtra_locations_df["Rating"] = location_rating_list
maharashtra_locations_df["Cost"] = location_cost_list
top_locations_maharashtra = maharashtra_locations_df[
maharashtra_locations_df["Total Votes"] > 150
]
top_locations_maharashtra["Total Votes"] = (
top_locations_maharashtra["Total Votes"].astype("str") + " votes"
)
# ## #8.5 Similarly Obtaining Dataframes for Delhi and Karnataka
# ### Obtaining Dataframe for Delhi
# Obtaining total votes for all localities in Delhi
# List of all localities
delhi_locations = df_delhi["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(delhi_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
delhi_location_counts = df_delhi["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(zip(delhi_locations, delhi_location_counts, total_votes_list))
delhi_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
delhi_locations_df
# Adding attributes to the localities dataframe
df_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(delhi_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
delhi_locations_df["Rating"] = location_rating_list
delhi_locations_df["Cost"] = location_cost_list
delhi_locations_df.head(20)
top_locations_delhi = delhi_locations_df[delhi_locations_df["Total Votes"] > 150]
top_locations_delhi["Total Votes"] = (
top_locations_delhi["Total Votes"].astype("str") + " votes"
)
# ### Obtaining dataframe for Karnataka
# Obtaining total votes for all localities in Delhi
# List of all localities
karnataka_locations = df_karnataka["Locality"].value_counts().index.tolist()
# Obtaining total votes
total_votes_value = []
total_votes_list = []
for index, locality in enumerate(karnataka_locations):
total_votes_value = total_votes(locality)
total_votes_list.append(total_votes_value)
# Locality-wise total restuarants in Maharashtra
karnataka_location_counts = df_karnataka["Locality"].value_counts()
# Zipping required lists and forming dataframe
list_of_tuples = list(
zip(karnataka_locations, karnataka_location_counts, total_votes_list)
)
karnataka_locations_df = pd.DataFrame(
list_of_tuples, columns=["Location", "Total Restaurants", "Total Votes"]
)
karnataka_locations_df
# Adding attributes to the localities dataframe
df_location = pd.DataFrame()
rating_list = []
cost_list = []
location_rating_list = []
location_cost_list = []
for index, location in enumerate(karnataka_locations_df["Location"]):
df_location = df[df["Locality"] == location]
# Calculating average rating
for rating in df_location["Rating"]:
rating_list.append(rating)
avg_rating = sum(rating_list) / len(rating_list)
location_rating_list.append(avg_rating)
# Calculating average cost
for cost in df_location["Cost"]:
cost_list.append(cost)
avg_cost = sum(cost_list) / len(cost_list)
location_cost_list.append(avg_cost)
# Adding attributes to the dataframe
karnataka_locations_df["Rating"] = location_rating_list
karnataka_locations_df["Cost"] = location_cost_list
# karnataka_locations_df.head(20)
top_locations_karnataka = karnataka_locations_df[
karnataka_locations_df["Total Votes"] > 150
]
top_locations_karnataka["Total Votes"] = (
top_locations_karnataka["Total Votes"].astype("str") + " votes"
)
top_locations_karnataka.head()
# ## #8.6 Plotting Treemaps
fig = px.treemap(
top_locations_maharashtra,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Maharashtra",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
fig = px.treemap(
top_locations_delhi,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Delhi",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
fig = px.treemap(
top_locations_karnataka,
path=["Location", "Total Votes"],
values="Rating",
color="Rating",
labels={"Votes"},
)
fig.update_layout(
title_text="Top Localities in Karnataka",
title_font_color="#4B0082",
title_x=0.5,
)
fig.show()
|
[{"dineout-restaurants-in-india/dineout_restaurants.csv": {"column_names": "[\"Name\", \"Location\", \"Locality\", \"City\", \"Cuisine\", \"Rating\", \"Votes\", \"Cost\"]", "column_data_types": "{\"Name\": \"object\", \"Location\": \"object\", \"Locality\": \"object\", \"City\": \"object\", \"Cuisine\": \"object\", \"Rating\": \"float64\", \"Votes\": \"int64\", \"Cost\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6595 entries, 0 to 6594\nData columns (total 8 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Name 6595 non-null object \n 1 Location 6595 non-null object \n 2 Locality 6595 non-null object \n 3 City 6595 non-null object \n 4 Cuisine 6595 non-null object \n 5 Rating 6595 non-null float64\n 6 Votes 6595 non-null int64 \n 7 Cost 6595 non-null int64 \ndtypes: float64(1), int64(2), object(5)\nmemory usage: 412.3+ KB\n", "summary": "{\"Rating\": {\"count\": 6595.0, \"mean\": 4.088248673237301, \"std\": 0.669935790224211, \"min\": 1.0, \"25%\": 3.9, \"50%\": 4.2, \"75%\": 4.4, \"max\": 5.0}, \"Votes\": {\"count\": 6595.0, \"mean\": 119.40333586050038, \"std\": 261.81178399876194, \"min\": 1.0, \"25%\": 6.0, \"50%\": 31.0, \"75%\": 115.0, \"max\": 5016.0}, \"Cost\": {\"count\": 6595.0, \"mean\": 1102.6154662623198, \"std\": 716.9033248173203, \"min\": 100.0, \"25%\": 500.0, \"50%\": 900.0, \"75%\": 1500.0, \"max\": 8000.0}}", "examples": "{\"Name\":{\"0\":\"Local\",\"1\":\"The G.T. ROAD\",\"2\":\"Tamasha\",\"3\":\"The Junkyard Cafe\"},\"Location\":{\"0\":\"Scindia House,Connaught Place, Central Delhi\",\"1\":\"M-Block,Connaught Place, Central Delhi\",\"2\":\"Connaught Place, Central Delhi\",\"3\":\"Connaught Place, Central Delhi\"},\"Locality\":{\"0\":\" Central Delhi\",\"1\":\" Central Delhi\",\"2\":\" Central Delhi\",\"3\":\" Central Delhi\"},\"City\":{\"0\":\"Delhi\",\"1\":\"Delhi\",\"2\":\"Delhi\",\"3\":\"Delhi\"},\"Cuisine\":{\"0\":\"North Indian, Finger Food, Continental\",\"1\":\"North Indian\",\"2\":\"Finger Food, North Indian, Italian, Continental, Asian\",\"3\":\"North Indian, Mediterranean, Asian, Italian, Oriental \"},\"Rating\":{\"0\":4.1,\"1\":4.3,\"2\":4.2,\"3\":4.2},\"Votes\":{\"0\":2415,\"1\":2363,\"2\":5016,\"3\":2821},\"Cost\":{\"0\":2000,\"1\":1500,\"2\":2000,\"3\":1800}}"}}]
| true | 1 |
<start_data_description><data_path>dineout-restaurants-in-india/dineout_restaurants.csv:
<column_names>
['Name', 'Location', 'Locality', 'City', 'Cuisine', 'Rating', 'Votes', 'Cost']
<column_types>
{'Name': 'object', 'Location': 'object', 'Locality': 'object', 'City': 'object', 'Cuisine': 'object', 'Rating': 'float64', 'Votes': 'int64', 'Cost': 'int64'}
<dataframe_Summary>
{'Rating': {'count': 6595.0, 'mean': 4.088248673237301, 'std': 0.669935790224211, 'min': 1.0, '25%': 3.9, '50%': 4.2, '75%': 4.4, 'max': 5.0}, 'Votes': {'count': 6595.0, 'mean': 119.40333586050038, 'std': 261.81178399876194, 'min': 1.0, '25%': 6.0, '50%': 31.0, '75%': 115.0, 'max': 5016.0}, 'Cost': {'count': 6595.0, 'mean': 1102.6154662623198, 'std': 716.9033248173203, 'min': 100.0, '25%': 500.0, '50%': 900.0, '75%': 1500.0, 'max': 8000.0}}
<dataframe_info>
RangeIndex: 6595 entries, 0 to 6594
Data columns (total 8 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Name 6595 non-null object
1 Location 6595 non-null object
2 Locality 6595 non-null object
3 City 6595 non-null object
4 Cuisine 6595 non-null object
5 Rating 6595 non-null float64
6 Votes 6595 non-null int64
7 Cost 6595 non-null int64
dtypes: float64(1), int64(2), object(5)
memory usage: 412.3+ KB
<some_examples>
{'Name': {'0': 'Local', '1': 'The G.T. ROAD', '2': 'Tamasha', '3': 'The Junkyard Cafe'}, 'Location': {'0': 'Scindia House,Connaught Place, Central Delhi', '1': 'M-Block,Connaught Place, Central Delhi', '2': 'Connaught Place, Central Delhi', '3': 'Connaught Place, Central Delhi'}, 'Locality': {'0': ' Central Delhi', '1': ' Central Delhi', '2': ' Central Delhi', '3': ' Central Delhi'}, 'City': {'0': 'Delhi', '1': 'Delhi', '2': 'Delhi', '3': 'Delhi'}, 'Cuisine': {'0': 'North Indian, Finger Food, Continental', '1': 'North Indian', '2': 'Finger Food, North Indian, Italian, Continental, Asian', '3': 'North Indian, Mediterranean, Asian, Italian, Oriental '}, 'Rating': {'0': 4.1, '1': 4.3, '2': 4.2, '3': 4.2}, 'Votes': {'0': 2415, '1': 2363, '2': 5016, '3': 2821}, 'Cost': {'0': 2000, '1': 1500, '2': 2000, '3': 1800}}
<end_description>
| 12,896 | 0 | 13,828 | 12,896 |
69534966
|
<jupyter_start><jupyter_text>Logistic regression To predict heart disease
**LOGISTIC REGRESSION - HEART DISEASE PREDICTION**
**Introduction**
World Health Organization has estimated 12 million deaths occur worldwide, every year due to Heart diseases. Half the deaths in the United States and other developed countries are due to cardio vascular diseases. The early prognosis of cardiovascular diseases can aid in making decisions on lifestyle changes in high risk patients and in turn reduce the complications. This research intends to pinpoint the most relevant/risk factors of heart disease as well as predict the overall risk using logistic regression
Data Preparation
Source
The dataset is publically available on the Kaggle website, and it is from an ongoing cardiovascular study on residents of the town of Framingham, Massachusetts. The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients’ information. It includes over 4,000 records and 15 attributes.
Variables
Each attribute is a potential risk factor. There are both demographic, behavioral and medical risk factors.
Demographic:
• Sex: male or female(Nominal)
• Age: Age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous)
Behavioral
• Current Smoker: whether or not the patient is a current smoker (Nominal)
• Cigs Per Day: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarettes, even half a cigarette.)
Medical( history)
• BP Meds: whether or not the patient was on blood pressure medication (Nominal)
• Prevalent Stroke: whether or not the patient had previously had a stroke (Nominal)
• Prevalent Hyp: whether or not the patient was hypertensive (Nominal)
• Diabetes: whether or not the patient had diabetes (Nominal)
Medical(current)
• Tot Chol: total cholesterol level (Continuous)
• Sys BP: systolic blood pressure (Continuous)
• Dia BP: diastolic blood pressure (Continuous)
• BMI: Body Mass Index (Continuous)
• Heart Rate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.)
• Glucose: glucose level (Continuous)
Predict variable (desired target)
• 10 year risk of coronary heart disease CHD (binary: “1”, means “Yes”, “0” means “No”)
Logistic Regression
Logistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success.
The results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elimination approach is used here to remove those attributes with highest P-value one at a time followed by running the regression repeatedly until all attributes have P Values less than 0.05.
Feature Selection: Backward elimination (P-value approach)
Logistic regression equation
P=eβ0+β1X1/1+eβ0+β1X1P=eβ0+β1X1/1+eβ0+β1X1
When all features plugged in:
logit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucoselogit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucose
Interpreting the results: Odds Ratio, Confidence Intervals and P-values
• This fitted model shows that, holding all other features constant, the odds of getting diagnosed with heart disease for males (sex_male = 1)over that of females (sex_male = 0) is exp(0.5815) = 1.788687. In terms of percent change, we can say that the odds for males are 78.8% higher than the odds for females.
• The coefficient for age says that, holding all others constant, we will see 7% increase in the odds of getting diagnosed with CDH for a one year increase in age since exp(0.0655) = 1.067644.
• Similarly , with every extra cigarette one smokes thers is a 2% increase in the odds of CDH.
• For Total cholesterol level and glucose level there is no significant change.
• There is a 1.7% increase in odds for every unit increase in systolic Blood Pressure.
Model Evaluation - Statistics
From the above statistics it is clear that the model is highly specific than sensitive. The negative values are predicted more accurately than the positives.
Predicted probabilities of 0 (No Coronary Heart Disease) and 1 ( Coronary Heart Disease: Yes) for the test data with a default classification threshold of 0.5
lower the threshold
Since the model is predicting Heart disease too many type II errors is not advisable. A False Negative ( ignoring the probability of disease when there actually is one) is more dangerous than a False Positive in this case. Hence in order to increase the sensitivity, threshold can be lowered.
Conclusions
• All attributes selected after the elimination process show P-values lower than 5% and thereby suggesting significant role in the Heart disease prediction.
• Men seem to be more susceptible to heart disease than women. Increase in age, number of cigarettes smoked per day and systolic Blood Pressure also show increasing odds of having heart disease
• Total cholesterol shows no significant change in the odds of CHD. This could be due to the presence of 'good cholesterol(HDL) in the total cholesterol reading. Glucose too causes a very negligible change in odds (0.2%)
• The model predicted with 0.88 accuracy. The model is more specific than sensitive. Overall model could be improved with more data
Appendix
http://www.who.int/mediacentre/factsheets/fs317/en/
Data Source References
https://www.kaggle.com/amanajmera1/framingham-heart-study-dataset/data
Kaggle dataset identifier: heart-disease-prediction-using-logistic-regression
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filepath = "../input/heart-disease-prediction-using-logistic-regression/framingham.csv"
df_org = pd.read_csv(filepath)
# Data clean up
df = df_org.dropna()
new_df = df.drop(df[df["TenYearCHD"] == 0].sample(frac=0.8).index)
dataset = new_df.to_numpy()
np.random.shuffle(dataset)
datasetX = dataset[:, 0:-1]
datasetY = dataset[:, -1]
# normalize
datasetX = (datasetX - np.min(datasetX, axis=0)) / (
np.max(datasetX, axis=0) - np.min(datasetX, axis=0)
)
print("Output ratio")
hist = plt.hist(datasetY)
# Flatten dataset and divide it into sets
flat_dataset_X = datasetX.reshape((datasetX.shape[0], -1)).T
trainX, trainY = flat_dataset_X[:, 100:], datasetY[100:]
testX, testY = flat_dataset_X[:, :100], datasetY[:100]
trainY = trainY.reshape(trainY.shape[0], 1).T
testY = testY.reshape(testY.shape[0], 1).T
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def calc_cost(A, Y, m):
return -np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) / m
def train(X, Y, alpha, itercount, print_cost, print_at=100, stored_cost_len=5000):
assert X.shape[1] == Y.shape[1]
m = X.shape[1]
w = np.zeros((X.shape[0], 1))
b = 0.0
count = 0
costs = []
print("training...")
for _ in range(itercount):
Z = np.dot(w.T, X) + b
A = sigmoid(Z)
dZ = (A - Y) / m
dW = np.dot(X, dZ.T)
db = np.sum(dZ)
w = w - alpha * dW
b = b - alpha * db
cost = calc_cost(A, Y, m)
if len(costs) == stored_cost_len:
costs.pop(0)
costs.append(cost)
if print_cost:
if count % print_at == 0:
print(f"Cost at iteration {count} = {cost}")
count += 1
print("finished...")
print("Cost=", cost)
return w, b, costs
w, b, costs = train(
trainX, trainY, 0.1, 50000, True, print_at=5000, stored_cost_len=10000
)
plot = plt.plot(costs)
plt.title("last few costs")
# TEST
def test(X, Y, w, b):
Z = np.dot(w.T, X) + b
A = sigmoid(Z)
A = np.rint(A)
accuracy = np.sum(A == Y) / A.shape[1]
return 100 * accuracy
print("Accuracy on test set=", test(testX, testY, w, b), "%")
print("Accuracy on train set=", test(trainX, trainY, w, b), "%")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534966.ipynb
|
heart-disease-prediction-using-logistic-regression
|
dileep070
|
[{"Id": 69534966, "ScriptId": 18948495, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5595869, "CreationDate": "08/01/2021 08:17:53", "VersionNumber": 1.0, "Title": "notebook4a7e742c4d", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 89.0, "LinesInsertedFromPrevious": 89.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92831698, "KernelVersionId": 69534966, "SourceDatasetVersionId": 478477}]
|
[{"Id": 478477, "DatasetId": 222487, "DatasourceVersionId": 494455, "CreatorUserId": 3280617, "LicenseName": "Unknown", "CreationDate": "06/07/2019 06:12:56", "VersionNumber": 1.0, "Title": "Logistic regression To predict heart disease", "Slug": "heart-disease-prediction-using-logistic-regression", "Subtitle": "heart disease prediction", "Description": "**LOGISTIC REGRESSION - HEART DISEASE PREDICTION**\n\n**Introduction**\nWorld Health Organization has estimated 12 million deaths occur worldwide, every year due to Heart diseases. Half the deaths in the United States and other developed countries are due to cardio vascular diseases. The early prognosis of cardiovascular diseases can aid in making decisions on lifestyle changes in high risk patients and in turn reduce the complications. This research intends to pinpoint the most relevant/risk factors of heart disease as well as predict the overall risk using logistic regression\nData Preparation\n \n\nSource\nThe dataset is publically available on the Kaggle website, and it is from an ongoing cardiovascular study on residents of the town of Framingham, Massachusetts. The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients\u2019 information. It includes over 4,000 records and 15 attributes.\nVariables \nEach attribute is a potential risk factor. There are both demographic, behavioral and medical risk factors.\n\n\nDemographic:\n\u2022\tSex: male or female(Nominal)\n\u2022\tAge: Age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous)\nBehavioral\n\u2022\tCurrent Smoker: whether or not the patient is a current smoker (Nominal)\n\u2022\tCigs Per Day: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarettes, even half a cigarette.)\nMedical( history)\n\u2022\tBP Meds: whether or not the patient was on blood pressure medication (Nominal)\n\u2022\tPrevalent Stroke: whether or not the patient had previously had a stroke (Nominal)\n\u2022\tPrevalent Hyp: whether or not the patient was hypertensive (Nominal)\n\u2022\tDiabetes: whether or not the patient had diabetes (Nominal)\nMedical(current)\n\u2022\tTot Chol: total cholesterol level (Continuous)\n\u2022\tSys BP: systolic blood pressure (Continuous)\n\u2022\tDia BP: diastolic blood pressure (Continuous)\n\u2022\tBMI: Body Mass Index (Continuous)\n\u2022\tHeart Rate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.)\n\u2022\tGlucose: glucose level (Continuous)\nPredict variable (desired target)\n\u2022\t10 year risk of coronary heart disease CHD (binary: \u201c1\u201d, means \u201cYes\u201d, \u201c0\u201d means \u201cNo\u201d)\nLogistic Regression\nLogistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success.\nThe results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elimination approach is used here to remove those attributes with highest P-value one at a time followed by running the regression repeatedly until all attributes have P Values less than 0.05.\nFeature Selection: Backward elimination (P-value approach)\nLogistic regression equation\nP=e\u03b20+\u03b21X1/1+e\u03b20+\u03b21X1P=e\u03b20+\u03b21X1/1+e\u03b20+\u03b21X1\nWhen all features plugged in:\nlogit(p)=log(p/(1\u2212p))=\u03b20+\u03b21\u2217Sexmale+\u03b22\u2217age+\u03b23\u2217cigsPerDay+\u03b24\u2217totChol+\u03b25\u2217sysBP+\u03b26\u2217glucoselogit(p)=log(p/(1\u2212p))=\u03b20+\u03b21\u2217Sexmale+\u03b22\u2217age+\u03b23\u2217cigsPerDay+\u03b24\u2217totChol+\u03b25\u2217sysBP+\u03b26\u2217glucose\n\n\n\n\n\n\n\n\n\n\nInterpreting the results: Odds Ratio, Confidence Intervals and P-values\n\u2022\tThis fitted model shows that, holding all other features constant, the odds of getting diagnosed with heart disease for males (sex_male = 1)over that of females (sex_male = 0) is exp(0.5815) = 1.788687. In terms of percent change, we can say that the odds for males are 78.8% higher than the odds for females. \n\u2022\tThe coefficient for age says that, holding all others constant, we will see 7% increase in the odds of getting diagnosed with CDH for a one year increase in age since exp(0.0655) = 1.067644. \n\u2022\tSimilarly , with every extra cigarette one smokes thers is a 2% increase in the odds of CDH. \n\u2022\tFor Total cholesterol level and glucose level there is no significant change. \n\n\u2022\tThere is a 1.7% increase in odds for every unit increase in systolic Blood Pressure.\n\nModel Evaluation - Statistics\nFrom the above statistics it is clear that the model is highly specific than sensitive. The negative values are predicted more accurately than the positives.\nPredicted probabilities of 0 (No Coronary Heart Disease) and 1 ( Coronary Heart Disease: Yes) for the test data with a default classification threshold of 0.5\nlower the threshold\nSince the model is predicting Heart disease too many type II errors is not advisable. A False Negative ( ignoring the probability of disease when there actually is one) is more dangerous than a False Positive in this case. Hence in order to increase the sensitivity, threshold can be lowered.\n\n\n\nConclusions\n\u2022\tAll attributes selected after the elimination process show P-values lower than 5% and thereby suggesting significant role in the Heart disease prediction.\n\n\u2022\tMen seem to be more susceptible to heart disease than women. Increase in age, number of cigarettes smoked per day and systolic Blood Pressure also show increasing odds of having heart disease\n\n\u2022\tTotal cholesterol shows no significant change in the odds of CHD. This could be due to the presence of 'good cholesterol(HDL) in the total cholesterol reading. Glucose too causes a very negligible change in odds (0.2%)\n\n\u2022\tThe model predicted with 0.88 accuracy. The model is more specific than sensitive. Overall model could be improved with more data\n\nAppendix\nhttp://www.who.int/mediacentre/factsheets/fs317/en/\nData Source References\nhttps://www.kaggle.com/amanajmera1/framingham-heart-study-dataset/data", "VersionNotes": "Initial release", "TotalCompressedBytes": 195955.0, "TotalUncompressedBytes": 195955.0}]
|
[{"Id": 222487, "CreatorUserId": 3280617, "OwnerUserId": 3280617.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 478477.0, "CurrentDatasourceVersionId": 494455.0, "ForumId": 233601, "Type": 2, "CreationDate": "06/07/2019 06:12:56", "LastActivityDate": "06/07/2019", "TotalViews": 249932, "TotalDownloads": 35877, "TotalVotes": 376, "TotalKernels": 176}]
|
[{"Id": 3280617, "UserName": "dileep070", "DisplayName": "Dileep", "RegisterDate": "05/28/2019", "PerformanceTier": 0}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
filepath = "../input/heart-disease-prediction-using-logistic-regression/framingham.csv"
df_org = pd.read_csv(filepath)
# Data clean up
df = df_org.dropna()
new_df = df.drop(df[df["TenYearCHD"] == 0].sample(frac=0.8).index)
dataset = new_df.to_numpy()
np.random.shuffle(dataset)
datasetX = dataset[:, 0:-1]
datasetY = dataset[:, -1]
# normalize
datasetX = (datasetX - np.min(datasetX, axis=0)) / (
np.max(datasetX, axis=0) - np.min(datasetX, axis=0)
)
print("Output ratio")
hist = plt.hist(datasetY)
# Flatten dataset and divide it into sets
flat_dataset_X = datasetX.reshape((datasetX.shape[0], -1)).T
trainX, trainY = flat_dataset_X[:, 100:], datasetY[100:]
testX, testY = flat_dataset_X[:, :100], datasetY[:100]
trainY = trainY.reshape(trainY.shape[0], 1).T
testY = testY.reshape(testY.shape[0], 1).T
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def calc_cost(A, Y, m):
return -np.sum(Y * np.log(A) + (1 - Y) * np.log(1 - A)) / m
def train(X, Y, alpha, itercount, print_cost, print_at=100, stored_cost_len=5000):
assert X.shape[1] == Y.shape[1]
m = X.shape[1]
w = np.zeros((X.shape[0], 1))
b = 0.0
count = 0
costs = []
print("training...")
for _ in range(itercount):
Z = np.dot(w.T, X) + b
A = sigmoid(Z)
dZ = (A - Y) / m
dW = np.dot(X, dZ.T)
db = np.sum(dZ)
w = w - alpha * dW
b = b - alpha * db
cost = calc_cost(A, Y, m)
if len(costs) == stored_cost_len:
costs.pop(0)
costs.append(cost)
if print_cost:
if count % print_at == 0:
print(f"Cost at iteration {count} = {cost}")
count += 1
print("finished...")
print("Cost=", cost)
return w, b, costs
w, b, costs = train(
trainX, trainY, 0.1, 50000, True, print_at=5000, stored_cost_len=10000
)
plot = plt.plot(costs)
plt.title("last few costs")
# TEST
def test(X, Y, w, b):
Z = np.dot(w.T, X) + b
A = sigmoid(Z)
A = np.rint(A)
accuracy = np.sum(A == Y) / A.shape[1]
return 100 * accuracy
print("Accuracy on test set=", test(testX, testY, w, b), "%")
print("Accuracy on train set=", test(trainX, trainY, w, b), "%")
| false | 0 | 868 | 0 | 2,518 | 868 |
||
69534668
|
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
import warnings
warnings.filterwarnings("ignore")
# ## 1) Load Data
train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
print("train shape:", train.shape)
print("test shape:", test.shape)
train.head()
# Train data
X = train.drop(columns=["loss", "id"])
y = train["loss"].values
# Test data
X_test = test.drop(columns=["id"])
print("Train set:", X.shape)
print("Test set:", X_test.shape)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# ## 2) Train Model
import xgboost as xgb
xgbrModel = xgb.XGBRegressor()
xgbrModel.fit(X, y)
from sklearn import metrics
print("R2 score: ", xgbrModel.score(X, y))
predicted = xgbrModel.predict(X)
rmse = metrics.mean_squared_error(y, predicted, squared=False)
print("MSE score: ", rmse)
# ## Feature importances
from xgboost import plot_importance
from xgboost import plot_tree
plt.rcParams["figure.figsize"] = (10, 7)
plot_importance(xgbrModel)
print("Feature importances: ", xgbrModel.feature_importances_)
# ## Test Predict
y_pred = xgbrModel.predict(X_test)
preds = pd.read_csv("../input/tabular-playground-series-aug-2021/sample_submission.csv")
preds.loss = y_pred
preds.head()
preds.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534668.ipynb
| null | null |
[{"Id": 69534668, "ScriptId": 18987770, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3518069, "CreationDate": "08/01/2021 08:13:23", "VersionNumber": 1.0, "Title": "TPS-08-21-XGBoost", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 63.0, "LinesInsertedFromPrevious": 63.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 12}]
| null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
import warnings
warnings.filterwarnings("ignore")
# ## 1) Load Data
train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
print("train shape:", train.shape)
print("test shape:", test.shape)
train.head()
# Train data
X = train.drop(columns=["loss", "id"])
y = train["loss"].values
# Test data
X_test = test.drop(columns=["id"])
print("Train set:", X.shape)
print("Test set:", X_test.shape)
# from sklearn.model_selection import train_test_split
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=42)
# ## 2) Train Model
import xgboost as xgb
xgbrModel = xgb.XGBRegressor()
xgbrModel.fit(X, y)
from sklearn import metrics
print("R2 score: ", xgbrModel.score(X, y))
predicted = xgbrModel.predict(X)
rmse = metrics.mean_squared_error(y, predicted, squared=False)
print("MSE score: ", rmse)
# ## Feature importances
from xgboost import plot_importance
from xgboost import plot_tree
plt.rcParams["figure.figsize"] = (10, 7)
plot_importance(xgbrModel)
print("Feature importances: ", xgbrModel.feature_importances_)
# ## Test Predict
y_pred = xgbrModel.predict(X_test)
preds = pd.read_csv("../input/tabular-playground-series-aug-2021/sample_submission.csv")
preds.loss = y_pred
preds.head()
preds.to_csv("submission.csv", index=False)
| false | 0 | 518 | 12 | 518 | 518 |
||
69534056
|
<jupyter_start><jupyter_text>Mobile Price Classification
### Context
Bob has started his own mobile company. He wants to give tough fight to big companies like Apple,Samsung etc.
He does not know how to estimate price of mobiles his company creates. In this competitive mobile phone market you cannot simply assume things. To solve this problem he collects sales data of mobile phones of various companies.
Bob wants to find out some relation between features of a mobile phone(eg:- RAM,Internal Memory etc) and its selling price. But he is not so good at Machine Learning. So he needs your help to solve this problem.
In this problem you do not have to predict actual price but a price range indicating how high the price is
Kaggle dataset identifier: mobile-price-classification
<jupyter_code>import pandas as pd
df = pd.read_csv('mobile-price-classification/train.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 2000 entries, 0 to 1999
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 battery_power 2000 non-null int64
1 blue 2000 non-null int64
2 clock_speed 2000 non-null float64
3 dual_sim 2000 non-null int64
4 fc 2000 non-null int64
5 four_g 2000 non-null int64
6 int_memory 2000 non-null int64
7 m_dep 2000 non-null float64
8 mobile_wt 2000 non-null int64
9 n_cores 2000 non-null int64
10 pc 2000 non-null int64
11 px_height 2000 non-null int64
12 px_width 2000 non-null int64
13 ram 2000 non-null int64
14 sc_h 2000 non-null int64
15 sc_w 2000 non-null int64
16 talk_time 2000 non-null int64
17 three_g 2000 non-null int64
18 touch_screen 2000 non-null int64
19 wifi 2000 non-null int64
20 price_range 2000 non-null int64
dtypes: float64(2), int64(19)
memory usage: 328.2 KB
<jupyter_text>Examples:
{
"battery_power": 842.0,
"blue": 0.0,
"clock_speed": 2.2,
"dual_sim": 0.0,
"fc": 1.0,
"four_g": 0.0,
"int_memory": 7.0,
"m_dep": 0.6000000000000001,
"mobile_wt": 188.0,
"n_cores": 2.0,
"pc": 2.0,
"px_height": 20.0,
"px_width": 756.0,
"ram": 2549.0,
"sc_h": 9.0,
"sc_w": 7.0,
"talk_time": 19.0,
"three_g": 0.0,
"touch_screen": 0.0,
"wifi": 1.0,
"...": "and 1 more columns"
}
{
"battery_power": 1021.0,
"blue": 1.0,
"clock_speed": 0.5,
"dual_sim": 1.0,
"fc": 0.0,
"four_g": 1.0,
"int_memory": 53.0,
"m_dep": 0.7000000000000001,
"mobile_wt": 136.0,
"n_cores": 3.0,
"pc": 6.0,
"px_height": 905.0,
"px_width": 1988.0,
"ram": 2631.0,
"sc_h": 17.0,
"sc_w": 3.0,
"talk_time": 7.0,
"three_g": 1.0,
"touch_screen": 1.0,
"wifi": 0.0,
"...": "and 1 more columns"
}
{
"battery_power": 563.0,
"blue": 1.0,
"clock_speed": 0.5,
"dual_sim": 1.0,
"fc": 2.0,
"four_g": 1.0,
"int_memory": 41.0,
"m_dep": 0.9,
"mobile_wt": 145.0,
"n_cores": 5.0,
"pc": 6.0,
"px_height": 1263.0,
"px_width": 1716.0,
"ram": 2603.0,
"sc_h": 11.0,
"sc_w": 2.0,
"talk_time": 9.0,
"three_g": 1.0,
"touch_screen": 1.0,
"wifi": 0.0,
"...": "and 1 more columns"
}
{
"battery_power": 615.0,
"blue": 1.0,
"clock_speed": 2.5,
"dual_sim": 0.0,
"fc": 0.0,
"four_g": 0.0,
"int_memory": 10.0,
"m_dep": 0.8,
"mobile_wt": 131.0,
"n_cores": 6.0,
"pc": 9.0,
"px_height": 1216.0,
"px_width": 1786.0,
"ram": 2769.0,
"sc_h": 16.0,
"sc_w": 8.0,
"talk_time": 11.0,
"three_g": 1.0,
"touch_screen": 0.0,
"wifi": 0.0,
"...": "and 1 more columns"
}
<jupyter_code>import pandas as pd
df = pd.read_csv('mobile-price-classification/test.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 1000 non-null int64
1 battery_power 1000 non-null int64
2 blue 1000 non-null int64
3 clock_speed 1000 non-null float64
4 dual_sim 1000 non-null int64
5 fc 1000 non-null int64
6 four_g 1000 non-null int64
7 int_memory 1000 non-null int64
8 m_dep 1000 non-null float64
9 mobile_wt 1000 non-null int64
10 n_cores 1000 non-null int64
11 pc 1000 non-null int64
12 px_height 1000 non-null int64
13 px_width 1000 non-null int64
14 ram 1000 non-null int64
15 sc_h 1000 non-null int64
16 sc_w 1000 non-null int64
17 talk_time 1000 non-null int64
18 three_g 1000 non-null int64
19 touch_screen 1000 non-null int64
20 wifi 1000 non-null int64
dtypes: float64(2), int64(19)
memory usage: 164.2 KB
<jupyter_text>Examples:
{
"id": 1.0,
"battery_power": 1043.0,
"blue": 1.0,
"clock_speed": 1.8,
"dual_sim": 1.0,
"fc": 14.0,
"four_g": 0.0,
"int_memory": 5.0,
"m_dep": 0.1,
"mobile_wt": 193.0,
"n_cores": 3.0,
"pc": 16.0,
"px_height": 226.0,
"px_width": 1412.0,
"ram": 3476.0,
"sc_h": 12.0,
"sc_w": 7.0,
"talk_time": 2.0,
"three_g": 0.0,
"touch_screen": 1.0,
"...": "and 1 more columns"
}
{
"id": 2.0,
"battery_power": 841.0,
"blue": 1.0,
"clock_speed": 0.5,
"dual_sim": 1.0,
"fc": 4.0,
"four_g": 1.0,
"int_memory": 61.0,
"m_dep": 0.8,
"mobile_wt": 191.0,
"n_cores": 5.0,
"pc": 12.0,
"px_height": 746.0,
"px_width": 857.0,
"ram": 3895.0,
"sc_h": 6.0,
"sc_w": 0.0,
"talk_time": 7.0,
"three_g": 1.0,
"touch_screen": 0.0,
"...": "and 1 more columns"
}
{
"id": 3.0,
"battery_power": 1807.0,
"blue": 1.0,
"clock_speed": 2.8,
"dual_sim": 0.0,
"fc": 1.0,
"four_g": 0.0,
"int_memory": 27.0,
"m_dep": 0.9,
"mobile_wt": 186.0,
"n_cores": 3.0,
"pc": 4.0,
"px_height": 1270.0,
"px_width": 1366.0,
"ram": 2396.0,
"sc_h": 17.0,
"sc_w": 10.0,
"talk_time": 10.0,
"three_g": 0.0,
"touch_screen": 1.0,
"...": "and 1 more columns"
}
{
"id": 4.0,
"battery_power": 1546.0,
"blue": 0.0,
"clock_speed": 0.5,
"dual_sim": 1.0,
"fc": 18.0,
"four_g": 1.0,
"int_memory": 25.0,
"m_dep": 0.5,
"mobile_wt": 96.0,
"n_cores": 8.0,
"pc": 20.0,
"px_height": 295.0,
"px_width": 1752.0,
"ram": 3893.0,
"sc_h": 10.0,
"sc_w": 0.0,
"talk_time": 7.0,
"three_g": 1.0,
"touch_screen": 1.0,
"...": "and 1 more columns"
}
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
test_data = pd.read_csv("../input/mobile-price-classification/test.csv")
test_data.head()
train_data = pd.read_csv("../input/mobile-price-classification/train.csv")
train_data.head()
train_data.info()
test_data.info()
# # Data Analysis
train_data.describe().T
corr = train_data.corr()
corr["price_range"].sort_values(ascending=False)
plt.figure(figsize=(14, 10))
sns.heatmap(corr, annot=True, fmt=".2f")
plt.show()
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report, confusion_matrix
X_train = train_data.iloc[:, :-1]
Y_train = train_data["price_range"]
def testing_model(x_train, x_test, y_train, y_test, model):
model.fit(x_train, y_train)
y_predict = model.predict(x_test)
conv_matrix = confusion_matrix(y_test, y_predict)
sns.heatmap(conv_matrix, cmap="coolwarm", annot=True, fmt=".0f")
print(" Confusion matrix \n")
plt.show()
print(" Classification report \n")
print(classification_report(y_test, y_predict))
pca = PCA(n_components=5)
pca.fit(X_train)
pca_features = pca.transform(X_train)
x_train, x_test, y_train, y_test = train_test_split(
pca_features, Y_train, test_size=0.3, random_state=10
)
scaler = preprocessing.StandardScaler()
scaler.fit(pca_features)
norm_x_train = scaler.transform(x_train)
norm_x_test = scaler.transform(x_test)
tree = DecisionTreeClassifier(random_state=10)
testing_model(norm_x_train, norm_x_test, y_train, y_test, tree)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534056.ipynb
|
mobile-price-classification
|
iabhishekofficial
|
[{"Id": 69534056, "ScriptId": 18987603, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7479748, "CreationDate": "08/01/2021 08:03:48", "VersionNumber": 2.0, "Title": "Week-26-Homework-1", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 65.0, "LinesInsertedFromPrevious": 51.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 14.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92829802, "KernelVersionId": 69534056, "SourceDatasetVersionId": 15520}]
|
[{"Id": 15520, "DatasetId": 11167, "DatasourceVersionId": 15520, "CreatorUserId": 907764, "LicenseName": "Unknown", "CreationDate": "01/28/2018 08:44:24", "VersionNumber": 1.0, "Title": "Mobile Price Classification", "Slug": "mobile-price-classification", "Subtitle": "Classify Mobile Price Range", "Description": "### Context\n\nBob has started his own mobile company. He wants to give tough fight to big companies like Apple,Samsung etc.\n\nHe does not know how to estimate price of mobiles his company creates. In this competitive mobile phone market you cannot simply assume things. To solve this problem he collects sales data of mobile phones of various companies.\n\nBob wants to find out some relation between features of a mobile phone(eg:- RAM,Internal Memory etc) and its selling price. But he is not so good at Machine Learning. So he needs your help to solve this problem.\n\nIn this problem you do not have to predict actual price but a price range indicating how high the price is", "VersionNotes": "Initial release", "TotalCompressedBytes": 186253.0, "TotalUncompressedBytes": 186253.0}]
|
[{"Id": 11167, "CreatorUserId": 907764, "OwnerUserId": 907764.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 15520.0, "CurrentDatasourceVersionId": 15520.0, "ForumId": 18557, "Type": 2, "CreationDate": "01/28/2018 08:44:24", "LastActivityDate": "02/06/2018", "TotalViews": 793378, "TotalDownloads": 143007, "TotalVotes": 1700, "TotalKernels": 3248}]
|
[{"Id": 907764, "UserName": "iabhishekofficial", "DisplayName": "Abhishek Sharma", "RegisterDate": "02/11/2017", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
test_data = pd.read_csv("../input/mobile-price-classification/test.csv")
test_data.head()
train_data = pd.read_csv("../input/mobile-price-classification/train.csv")
train_data.head()
train_data.info()
test_data.info()
# # Data Analysis
train_data.describe().T
corr = train_data.corr()
corr["price_range"].sort_values(ascending=False)
plt.figure(figsize=(14, 10))
sns.heatmap(corr, annot=True, fmt=".2f")
plt.show()
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn import preprocessing
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from sklearn.metrics import classification_report, confusion_matrix
X_train = train_data.iloc[:, :-1]
Y_train = train_data["price_range"]
def testing_model(x_train, x_test, y_train, y_test, model):
model.fit(x_train, y_train)
y_predict = model.predict(x_test)
conv_matrix = confusion_matrix(y_test, y_predict)
sns.heatmap(conv_matrix, cmap="coolwarm", annot=True, fmt=".0f")
print(" Confusion matrix \n")
plt.show()
print(" Classification report \n")
print(classification_report(y_test, y_predict))
pca = PCA(n_components=5)
pca.fit(X_train)
pca_features = pca.transform(X_train)
x_train, x_test, y_train, y_test = train_test_split(
pca_features, Y_train, test_size=0.3, random_state=10
)
scaler = preprocessing.StandardScaler()
scaler.fit(pca_features)
norm_x_train = scaler.transform(x_train)
norm_x_test = scaler.transform(x_test)
tree = DecisionTreeClassifier(random_state=10)
testing_model(norm_x_train, norm_x_test, y_train, y_test, tree)
|
[{"mobile-price-classification/train.csv": {"column_names": "[\"battery_power\", \"blue\", \"clock_speed\", \"dual_sim\", \"fc\", \"four_g\", \"int_memory\", \"m_dep\", \"mobile_wt\", \"n_cores\", \"pc\", \"px_height\", \"px_width\", \"ram\", \"sc_h\", \"sc_w\", \"talk_time\", \"three_g\", \"touch_screen\", \"wifi\", \"price_range\"]", "column_data_types": "{\"battery_power\": \"int64\", \"blue\": \"int64\", \"clock_speed\": \"float64\", \"dual_sim\": \"int64\", \"fc\": \"int64\", \"four_g\": \"int64\", \"int_memory\": \"int64\", \"m_dep\": \"float64\", \"mobile_wt\": \"int64\", \"n_cores\": \"int64\", \"pc\": \"int64\", \"px_height\": \"int64\", \"px_width\": \"int64\", \"ram\": \"int64\", \"sc_h\": \"int64\", \"sc_w\": \"int64\", \"talk_time\": \"int64\", \"three_g\": \"int64\", \"touch_screen\": \"int64\", \"wifi\": \"int64\", \"price_range\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 2000 entries, 0 to 1999\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 battery_power 2000 non-null int64 \n 1 blue 2000 non-null int64 \n 2 clock_speed 2000 non-null float64\n 3 dual_sim 2000 non-null int64 \n 4 fc 2000 non-null int64 \n 5 four_g 2000 non-null int64 \n 6 int_memory 2000 non-null int64 \n 7 m_dep 2000 non-null float64\n 8 mobile_wt 2000 non-null int64 \n 9 n_cores 2000 non-null int64 \n 10 pc 2000 non-null int64 \n 11 px_height 2000 non-null int64 \n 12 px_width 2000 non-null int64 \n 13 ram 2000 non-null int64 \n 14 sc_h 2000 non-null int64 \n 15 sc_w 2000 non-null int64 \n 16 talk_time 2000 non-null int64 \n 17 three_g 2000 non-null int64 \n 18 touch_screen 2000 non-null int64 \n 19 wifi 2000 non-null int64 \n 20 price_range 2000 non-null int64 \ndtypes: float64(2), int64(19)\nmemory usage: 328.2 KB\n", "summary": "{\"battery_power\": {\"count\": 2000.0, \"mean\": 1238.5185, \"std\": 439.41820608353135, \"min\": 501.0, \"25%\": 851.75, \"50%\": 1226.0, \"75%\": 1615.25, \"max\": 1998.0}, \"blue\": {\"count\": 2000.0, \"mean\": 0.495, \"std\": 0.5001000400170075, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"clock_speed\": {\"count\": 2000.0, \"mean\": 1.52225, \"std\": 0.8160042088950689, \"min\": 0.5, \"25%\": 0.7, \"50%\": 1.5, \"75%\": 2.2, \"max\": 3.0}, \"dual_sim\": {\"count\": 2000.0, \"mean\": 0.5095, \"std\": 0.500034766175005, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"fc\": {\"count\": 2000.0, \"mean\": 4.3095, \"std\": 4.341443747983894, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 7.0, \"max\": 19.0}, \"four_g\": {\"count\": 2000.0, \"mean\": 0.5215, \"std\": 0.49966246736236386, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"int_memory\": {\"count\": 2000.0, \"mean\": 32.0465, \"std\": 18.145714955206856, \"min\": 2.0, \"25%\": 16.0, \"50%\": 32.0, \"75%\": 48.0, \"max\": 64.0}, \"m_dep\": {\"count\": 2000.0, \"mean\": 0.50175, \"std\": 0.2884155496235117, \"min\": 0.1, \"25%\": 0.2, \"50%\": 0.5, \"75%\": 0.8, \"max\": 1.0}, \"mobile_wt\": {\"count\": 2000.0, \"mean\": 140.249, \"std\": 35.39965489638835, \"min\": 80.0, \"25%\": 109.0, \"50%\": 141.0, \"75%\": 170.0, \"max\": 200.0}, \"n_cores\": {\"count\": 2000.0, \"mean\": 4.5205, \"std\": 2.2878367180426604, \"min\": 1.0, \"25%\": 3.0, \"50%\": 4.0, \"75%\": 7.0, \"max\": 8.0}, \"pc\": {\"count\": 2000.0, \"mean\": 9.9165, \"std\": 6.06431494134778, \"min\": 0.0, \"25%\": 5.0, \"50%\": 10.0, \"75%\": 15.0, \"max\": 20.0}, \"px_height\": {\"count\": 2000.0, \"mean\": 645.108, \"std\": 443.7808108064386, \"min\": 0.0, \"25%\": 282.75, \"50%\": 564.0, \"75%\": 947.25, \"max\": 1960.0}, \"px_width\": {\"count\": 2000.0, \"mean\": 1251.5155, \"std\": 432.19944694633796, \"min\": 500.0, \"25%\": 874.75, \"50%\": 1247.0, \"75%\": 1633.0, \"max\": 1998.0}, \"ram\": {\"count\": 2000.0, \"mean\": 2124.213, \"std\": 1084.7320436099494, \"min\": 256.0, \"25%\": 1207.5, \"50%\": 2146.5, \"75%\": 3064.5, \"max\": 3998.0}, \"sc_h\": {\"count\": 2000.0, \"mean\": 12.3065, \"std\": 4.213245004356306, \"min\": 5.0, \"25%\": 9.0, \"50%\": 12.0, \"75%\": 16.0, \"max\": 19.0}, \"sc_w\": {\"count\": 2000.0, \"mean\": 5.767, \"std\": 4.3563976058264045, \"min\": 0.0, \"25%\": 2.0, \"50%\": 5.0, \"75%\": 9.0, \"max\": 18.0}, \"talk_time\": {\"count\": 2000.0, \"mean\": 11.011, \"std\": 5.463955197766688, \"min\": 2.0, \"25%\": 6.0, \"50%\": 11.0, \"75%\": 16.0, \"max\": 20.0}, \"three_g\": {\"count\": 2000.0, \"mean\": 0.7615, \"std\": 0.42627292231873126, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"touch_screen\": {\"count\": 2000.0, \"mean\": 0.503, \"std\": 0.500116044562674, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"wifi\": {\"count\": 2000.0, \"mean\": 0.507, \"std\": 0.5000760322381083, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"price_range\": {\"count\": 2000.0, \"mean\": 1.5, \"std\": 1.118313602106461, \"min\": 0.0, \"25%\": 0.75, \"50%\": 1.5, \"75%\": 2.25, \"max\": 3.0}}", "examples": "{\"battery_power\":{\"0\":842,\"1\":1021,\"2\":563,\"3\":615},\"blue\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"clock_speed\":{\"0\":2.2,\"1\":0.5,\"2\":0.5,\"3\":2.5},\"dual_sim\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"fc\":{\"0\":1,\"1\":0,\"2\":2,\"3\":0},\"four_g\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"int_memory\":{\"0\":7,\"1\":53,\"2\":41,\"3\":10},\"m_dep\":{\"0\":0.6,\"1\":0.7,\"2\":0.9,\"3\":0.8},\"mobile_wt\":{\"0\":188,\"1\":136,\"2\":145,\"3\":131},\"n_cores\":{\"0\":2,\"1\":3,\"2\":5,\"3\":6},\"pc\":{\"0\":2,\"1\":6,\"2\":6,\"3\":9},\"px_height\":{\"0\":20,\"1\":905,\"2\":1263,\"3\":1216},\"px_width\":{\"0\":756,\"1\":1988,\"2\":1716,\"3\":1786},\"ram\":{\"0\":2549,\"1\":2631,\"2\":2603,\"3\":2769},\"sc_h\":{\"0\":9,\"1\":17,\"2\":11,\"3\":16},\"sc_w\":{\"0\":7,\"1\":3,\"2\":2,\"3\":8},\"talk_time\":{\"0\":19,\"1\":7,\"2\":9,\"3\":11},\"three_g\":{\"0\":0,\"1\":1,\"2\":1,\"3\":1},\"touch_screen\":{\"0\":0,\"1\":1,\"2\":1,\"3\":0},\"wifi\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"price_range\":{\"0\":1,\"1\":2,\"2\":2,\"3\":2}}"}}, {"mobile-price-classification/test.csv": {"column_names": "[\"id\", \"battery_power\", \"blue\", \"clock_speed\", \"dual_sim\", \"fc\", \"four_g\", \"int_memory\", \"m_dep\", \"mobile_wt\", \"n_cores\", \"pc\", \"px_height\", \"px_width\", \"ram\", \"sc_h\", \"sc_w\", \"talk_time\", \"three_g\", \"touch_screen\", \"wifi\"]", "column_data_types": "{\"id\": \"int64\", \"battery_power\": \"int64\", \"blue\": \"int64\", \"clock_speed\": \"float64\", \"dual_sim\": \"int64\", \"fc\": \"int64\", \"four_g\": \"int64\", \"int_memory\": \"int64\", \"m_dep\": \"float64\", \"mobile_wt\": \"int64\", \"n_cores\": \"int64\", \"pc\": \"int64\", \"px_height\": \"int64\", \"px_width\": \"int64\", \"ram\": \"int64\", \"sc_h\": \"int64\", \"sc_w\": \"int64\", \"talk_time\": \"int64\", \"three_g\": \"int64\", \"touch_screen\": \"int64\", \"wifi\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 1000 entries, 0 to 999\nData columns (total 21 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 1000 non-null int64 \n 1 battery_power 1000 non-null int64 \n 2 blue 1000 non-null int64 \n 3 clock_speed 1000 non-null float64\n 4 dual_sim 1000 non-null int64 \n 5 fc 1000 non-null int64 \n 6 four_g 1000 non-null int64 \n 7 int_memory 1000 non-null int64 \n 8 m_dep 1000 non-null float64\n 9 mobile_wt 1000 non-null int64 \n 10 n_cores 1000 non-null int64 \n 11 pc 1000 non-null int64 \n 12 px_height 1000 non-null int64 \n 13 px_width 1000 non-null int64 \n 14 ram 1000 non-null int64 \n 15 sc_h 1000 non-null int64 \n 16 sc_w 1000 non-null int64 \n 17 talk_time 1000 non-null int64 \n 18 three_g 1000 non-null int64 \n 19 touch_screen 1000 non-null int64 \n 20 wifi 1000 non-null int64 \ndtypes: float64(2), int64(19)\nmemory usage: 164.2 KB\n", "summary": "{\"id\": {\"count\": 1000.0, \"mean\": 500.5, \"std\": 288.8194360957494, \"min\": 1.0, \"25%\": 250.75, \"50%\": 500.5, \"75%\": 750.25, \"max\": 1000.0}, \"battery_power\": {\"count\": 1000.0, \"mean\": 1248.51, \"std\": 432.45822690523306, \"min\": 500.0, \"25%\": 895.0, \"50%\": 1246.5, \"75%\": 1629.25, \"max\": 1999.0}, \"blue\": {\"count\": 1000.0, \"mean\": 0.516, \"std\": 0.4999939939579214, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"clock_speed\": {\"count\": 1000.0, \"mean\": 1.5409000000000002, \"std\": 0.8292676738393188, \"min\": 0.5, \"25%\": 0.7, \"50%\": 1.5, \"75%\": 2.3, \"max\": 3.0}, \"dual_sim\": {\"count\": 1000.0, \"mean\": 0.517, \"std\": 0.4999609594367954, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"fc\": {\"count\": 1000.0, \"mean\": 4.593, \"std\": 4.4633252483179255, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 7.0, \"max\": 19.0}, \"four_g\": {\"count\": 1000.0, \"mean\": 0.487, \"std\": 0.5000810745080053, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"int_memory\": {\"count\": 1000.0, \"mean\": 33.652, \"std\": 18.128693983449153, \"min\": 2.0, \"25%\": 18.0, \"50%\": 34.5, \"75%\": 49.0, \"max\": 64.0}, \"m_dep\": {\"count\": 1000.0, \"mean\": 0.5175, \"std\": 0.2808605216698008, \"min\": 0.1, \"25%\": 0.3, \"50%\": 0.5, \"75%\": 0.8, \"max\": 1.0}, \"mobile_wt\": {\"count\": 1000.0, \"mean\": 139.511, \"std\": 34.851549599831415, \"min\": 80.0, \"25%\": 109.75, \"50%\": 139.0, \"75%\": 170.0, \"max\": 200.0}, \"n_cores\": {\"count\": 1000.0, \"mean\": 4.328, \"std\": 2.288154638928858, \"min\": 1.0, \"25%\": 2.0, \"50%\": 4.0, \"75%\": 6.0, \"max\": 8.0}, \"pc\": {\"count\": 1000.0, \"mean\": 10.054, \"std\": 6.095099198063493, \"min\": 0.0, \"25%\": 5.0, \"50%\": 10.0, \"75%\": 16.0, \"max\": 20.0}, \"px_height\": {\"count\": 1000.0, \"mean\": 627.121, \"std\": 432.9296992393609, \"min\": 0.0, \"25%\": 263.75, \"50%\": 564.5, \"75%\": 903.0, \"max\": 1907.0}, \"px_width\": {\"count\": 1000.0, \"mean\": 1239.774, \"std\": 439.6709809567781, \"min\": 501.0, \"25%\": 831.75, \"50%\": 1250.0, \"75%\": 1637.75, \"max\": 1998.0}, \"ram\": {\"count\": 1000.0, \"mean\": 2138.998, \"std\": 1088.0922777047913, \"min\": 263.0, \"25%\": 1237.25, \"50%\": 2153.5, \"75%\": 3065.5, \"max\": 3989.0}, \"sc_h\": {\"count\": 1000.0, \"mean\": 11.995, \"std\": 4.320606744734198, \"min\": 5.0, \"25%\": 8.0, \"50%\": 12.0, \"75%\": 16.0, \"max\": 19.0}, \"sc_w\": {\"count\": 1000.0, \"mean\": 5.316, \"std\": 4.240061570557923, \"min\": 0.0, \"25%\": 2.0, \"50%\": 5.0, \"75%\": 8.0, \"max\": 18.0}, \"talk_time\": {\"count\": 1000.0, \"mean\": 11.085, \"std\": 5.49763576448995, \"min\": 2.0, \"25%\": 6.75, \"50%\": 11.0, \"75%\": 16.0, \"max\": 20.0}, \"three_g\": {\"count\": 1000.0, \"mean\": 0.756, \"std\": 0.42970763159228237, \"min\": 0.0, \"25%\": 1.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"touch_screen\": {\"count\": 1000.0, \"mean\": 0.5, \"std\": 0.5002501876563868, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.5, \"75%\": 1.0, \"max\": 1.0}, \"wifi\": {\"count\": 1000.0, \"mean\": 0.507, \"std\": 0.5002011607355596, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"id\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"battery_power\":{\"0\":1043,\"1\":841,\"2\":1807,\"3\":1546},\"blue\":{\"0\":1,\"1\":1,\"2\":1,\"3\":0},\"clock_speed\":{\"0\":1.8,\"1\":0.5,\"2\":2.8,\"3\":0.5},\"dual_sim\":{\"0\":1,\"1\":1,\"2\":0,\"3\":1},\"fc\":{\"0\":14,\"1\":4,\"2\":1,\"3\":18},\"four_g\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"int_memory\":{\"0\":5,\"1\":61,\"2\":27,\"3\":25},\"m_dep\":{\"0\":0.1,\"1\":0.8,\"2\":0.9,\"3\":0.5},\"mobile_wt\":{\"0\":193,\"1\":191,\"2\":186,\"3\":96},\"n_cores\":{\"0\":3,\"1\":5,\"2\":3,\"3\":8},\"pc\":{\"0\":16,\"1\":12,\"2\":4,\"3\":20},\"px_height\":{\"0\":226,\"1\":746,\"2\":1270,\"3\":295},\"px_width\":{\"0\":1412,\"1\":857,\"2\":1366,\"3\":1752},\"ram\":{\"0\":3476,\"1\":3895,\"2\":2396,\"3\":3893},\"sc_h\":{\"0\":12,\"1\":6,\"2\":17,\"3\":10},\"sc_w\":{\"0\":7,\"1\":0,\"2\":10,\"3\":0},\"talk_time\":{\"0\":2,\"1\":7,\"2\":10,\"3\":7},\"three_g\":{\"0\":0,\"1\":1,\"2\":0,\"3\":1},\"touch_screen\":{\"0\":1,\"1\":0,\"2\":1,\"3\":1},\"wifi\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0}}"}}]
| true | 2 |
<start_data_description><data_path>mobile-price-classification/train.csv:
<column_names>
['battery_power', 'blue', 'clock_speed', 'dual_sim', 'fc', 'four_g', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time', 'three_g', 'touch_screen', 'wifi', 'price_range']
<column_types>
{'battery_power': 'int64', 'blue': 'int64', 'clock_speed': 'float64', 'dual_sim': 'int64', 'fc': 'int64', 'four_g': 'int64', 'int_memory': 'int64', 'm_dep': 'float64', 'mobile_wt': 'int64', 'n_cores': 'int64', 'pc': 'int64', 'px_height': 'int64', 'px_width': 'int64', 'ram': 'int64', 'sc_h': 'int64', 'sc_w': 'int64', 'talk_time': 'int64', 'three_g': 'int64', 'touch_screen': 'int64', 'wifi': 'int64', 'price_range': 'int64'}
<dataframe_Summary>
{'battery_power': {'count': 2000.0, 'mean': 1238.5185, 'std': 439.41820608353135, 'min': 501.0, '25%': 851.75, '50%': 1226.0, '75%': 1615.25, 'max': 1998.0}, 'blue': {'count': 2000.0, 'mean': 0.495, 'std': 0.5001000400170075, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'clock_speed': {'count': 2000.0, 'mean': 1.52225, 'std': 0.8160042088950689, 'min': 0.5, '25%': 0.7, '50%': 1.5, '75%': 2.2, 'max': 3.0}, 'dual_sim': {'count': 2000.0, 'mean': 0.5095, 'std': 0.500034766175005, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'fc': {'count': 2000.0, 'mean': 4.3095, 'std': 4.341443747983894, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 7.0, 'max': 19.0}, 'four_g': {'count': 2000.0, 'mean': 0.5215, 'std': 0.49966246736236386, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'int_memory': {'count': 2000.0, 'mean': 32.0465, 'std': 18.145714955206856, 'min': 2.0, '25%': 16.0, '50%': 32.0, '75%': 48.0, 'max': 64.0}, 'm_dep': {'count': 2000.0, 'mean': 0.50175, 'std': 0.2884155496235117, 'min': 0.1, '25%': 0.2, '50%': 0.5, '75%': 0.8, 'max': 1.0}, 'mobile_wt': {'count': 2000.0, 'mean': 140.249, 'std': 35.39965489638835, 'min': 80.0, '25%': 109.0, '50%': 141.0, '75%': 170.0, 'max': 200.0}, 'n_cores': {'count': 2000.0, 'mean': 4.5205, 'std': 2.2878367180426604, 'min': 1.0, '25%': 3.0, '50%': 4.0, '75%': 7.0, 'max': 8.0}, 'pc': {'count': 2000.0, 'mean': 9.9165, 'std': 6.06431494134778, 'min': 0.0, '25%': 5.0, '50%': 10.0, '75%': 15.0, 'max': 20.0}, 'px_height': {'count': 2000.0, 'mean': 645.108, 'std': 443.7808108064386, 'min': 0.0, '25%': 282.75, '50%': 564.0, '75%': 947.25, 'max': 1960.0}, 'px_width': {'count': 2000.0, 'mean': 1251.5155, 'std': 432.19944694633796, 'min': 500.0, '25%': 874.75, '50%': 1247.0, '75%': 1633.0, 'max': 1998.0}, 'ram': {'count': 2000.0, 'mean': 2124.213, 'std': 1084.7320436099494, 'min': 256.0, '25%': 1207.5, '50%': 2146.5, '75%': 3064.5, 'max': 3998.0}, 'sc_h': {'count': 2000.0, 'mean': 12.3065, 'std': 4.213245004356306, 'min': 5.0, '25%': 9.0, '50%': 12.0, '75%': 16.0, 'max': 19.0}, 'sc_w': {'count': 2000.0, 'mean': 5.767, 'std': 4.3563976058264045, 'min': 0.0, '25%': 2.0, '50%': 5.0, '75%': 9.0, 'max': 18.0}, 'talk_time': {'count': 2000.0, 'mean': 11.011, 'std': 5.463955197766688, 'min': 2.0, '25%': 6.0, '50%': 11.0, '75%': 16.0, 'max': 20.0}, 'three_g': {'count': 2000.0, 'mean': 0.7615, 'std': 0.42627292231873126, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'touch_screen': {'count': 2000.0, 'mean': 0.503, 'std': 0.500116044562674, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'wifi': {'count': 2000.0, 'mean': 0.507, 'std': 0.5000760322381083, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'price_range': {'count': 2000.0, 'mean': 1.5, 'std': 1.118313602106461, 'min': 0.0, '25%': 0.75, '50%': 1.5, '75%': 2.25, 'max': 3.0}}
<dataframe_info>
RangeIndex: 2000 entries, 0 to 1999
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 battery_power 2000 non-null int64
1 blue 2000 non-null int64
2 clock_speed 2000 non-null float64
3 dual_sim 2000 non-null int64
4 fc 2000 non-null int64
5 four_g 2000 non-null int64
6 int_memory 2000 non-null int64
7 m_dep 2000 non-null float64
8 mobile_wt 2000 non-null int64
9 n_cores 2000 non-null int64
10 pc 2000 non-null int64
11 px_height 2000 non-null int64
12 px_width 2000 non-null int64
13 ram 2000 non-null int64
14 sc_h 2000 non-null int64
15 sc_w 2000 non-null int64
16 talk_time 2000 non-null int64
17 three_g 2000 non-null int64
18 touch_screen 2000 non-null int64
19 wifi 2000 non-null int64
20 price_range 2000 non-null int64
dtypes: float64(2), int64(19)
memory usage: 328.2 KB
<some_examples>
{'battery_power': {'0': 842, '1': 1021, '2': 563, '3': 615}, 'blue': {'0': 0, '1': 1, '2': 1, '3': 1}, 'clock_speed': {'0': 2.2, '1': 0.5, '2': 0.5, '3': 2.5}, 'dual_sim': {'0': 0, '1': 1, '2': 1, '3': 0}, 'fc': {'0': 1, '1': 0, '2': 2, '3': 0}, 'four_g': {'0': 0, '1': 1, '2': 1, '3': 0}, 'int_memory': {'0': 7, '1': 53, '2': 41, '3': 10}, 'm_dep': {'0': 0.6, '1': 0.7, '2': 0.9, '3': 0.8}, 'mobile_wt': {'0': 188, '1': 136, '2': 145, '3': 131}, 'n_cores': {'0': 2, '1': 3, '2': 5, '3': 6}, 'pc': {'0': 2, '1': 6, '2': 6, '3': 9}, 'px_height': {'0': 20, '1': 905, '2': 1263, '3': 1216}, 'px_width': {'0': 756, '1': 1988, '2': 1716, '3': 1786}, 'ram': {'0': 2549, '1': 2631, '2': 2603, '3': 2769}, 'sc_h': {'0': 9, '1': 17, '2': 11, '3': 16}, 'sc_w': {'0': 7, '1': 3, '2': 2, '3': 8}, 'talk_time': {'0': 19, '1': 7, '2': 9, '3': 11}, 'three_g': {'0': 0, '1': 1, '2': 1, '3': 1}, 'touch_screen': {'0': 0, '1': 1, '2': 1, '3': 0}, 'wifi': {'0': 1, '1': 0, '2': 0, '3': 0}, 'price_range': {'0': 1, '1': 2, '2': 2, '3': 2}}
<end_description>
<start_data_description><data_path>mobile-price-classification/test.csv:
<column_names>
['id', 'battery_power', 'blue', 'clock_speed', 'dual_sim', 'fc', 'four_g', 'int_memory', 'm_dep', 'mobile_wt', 'n_cores', 'pc', 'px_height', 'px_width', 'ram', 'sc_h', 'sc_w', 'talk_time', 'three_g', 'touch_screen', 'wifi']
<column_types>
{'id': 'int64', 'battery_power': 'int64', 'blue': 'int64', 'clock_speed': 'float64', 'dual_sim': 'int64', 'fc': 'int64', 'four_g': 'int64', 'int_memory': 'int64', 'm_dep': 'float64', 'mobile_wt': 'int64', 'n_cores': 'int64', 'pc': 'int64', 'px_height': 'int64', 'px_width': 'int64', 'ram': 'int64', 'sc_h': 'int64', 'sc_w': 'int64', 'talk_time': 'int64', 'three_g': 'int64', 'touch_screen': 'int64', 'wifi': 'int64'}
<dataframe_Summary>
{'id': {'count': 1000.0, 'mean': 500.5, 'std': 288.8194360957494, 'min': 1.0, '25%': 250.75, '50%': 500.5, '75%': 750.25, 'max': 1000.0}, 'battery_power': {'count': 1000.0, 'mean': 1248.51, 'std': 432.45822690523306, 'min': 500.0, '25%': 895.0, '50%': 1246.5, '75%': 1629.25, 'max': 1999.0}, 'blue': {'count': 1000.0, 'mean': 0.516, 'std': 0.4999939939579214, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'clock_speed': {'count': 1000.0, 'mean': 1.5409000000000002, 'std': 0.8292676738393188, 'min': 0.5, '25%': 0.7, '50%': 1.5, '75%': 2.3, 'max': 3.0}, 'dual_sim': {'count': 1000.0, 'mean': 0.517, 'std': 0.4999609594367954, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'fc': {'count': 1000.0, 'mean': 4.593, 'std': 4.4633252483179255, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 7.0, 'max': 19.0}, 'four_g': {'count': 1000.0, 'mean': 0.487, 'std': 0.5000810745080053, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'int_memory': {'count': 1000.0, 'mean': 33.652, 'std': 18.128693983449153, 'min': 2.0, '25%': 18.0, '50%': 34.5, '75%': 49.0, 'max': 64.0}, 'm_dep': {'count': 1000.0, 'mean': 0.5175, 'std': 0.2808605216698008, 'min': 0.1, '25%': 0.3, '50%': 0.5, '75%': 0.8, 'max': 1.0}, 'mobile_wt': {'count': 1000.0, 'mean': 139.511, 'std': 34.851549599831415, 'min': 80.0, '25%': 109.75, '50%': 139.0, '75%': 170.0, 'max': 200.0}, 'n_cores': {'count': 1000.0, 'mean': 4.328, 'std': 2.288154638928858, 'min': 1.0, '25%': 2.0, '50%': 4.0, '75%': 6.0, 'max': 8.0}, 'pc': {'count': 1000.0, 'mean': 10.054, 'std': 6.095099198063493, 'min': 0.0, '25%': 5.0, '50%': 10.0, '75%': 16.0, 'max': 20.0}, 'px_height': {'count': 1000.0, 'mean': 627.121, 'std': 432.9296992393609, 'min': 0.0, '25%': 263.75, '50%': 564.5, '75%': 903.0, 'max': 1907.0}, 'px_width': {'count': 1000.0, 'mean': 1239.774, 'std': 439.6709809567781, 'min': 501.0, '25%': 831.75, '50%': 1250.0, '75%': 1637.75, 'max': 1998.0}, 'ram': {'count': 1000.0, 'mean': 2138.998, 'std': 1088.0922777047913, 'min': 263.0, '25%': 1237.25, '50%': 2153.5, '75%': 3065.5, 'max': 3989.0}, 'sc_h': {'count': 1000.0, 'mean': 11.995, 'std': 4.320606744734198, 'min': 5.0, '25%': 8.0, '50%': 12.0, '75%': 16.0, 'max': 19.0}, 'sc_w': {'count': 1000.0, 'mean': 5.316, 'std': 4.240061570557923, 'min': 0.0, '25%': 2.0, '50%': 5.0, '75%': 8.0, 'max': 18.0}, 'talk_time': {'count': 1000.0, 'mean': 11.085, 'std': 5.49763576448995, 'min': 2.0, '25%': 6.75, '50%': 11.0, '75%': 16.0, 'max': 20.0}, 'three_g': {'count': 1000.0, 'mean': 0.756, 'std': 0.42970763159228237, 'min': 0.0, '25%': 1.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'touch_screen': {'count': 1000.0, 'mean': 0.5, 'std': 0.5002501876563868, 'min': 0.0, '25%': 0.0, '50%': 0.5, '75%': 1.0, 'max': 1.0}, 'wifi': {'count': 1000.0, 'mean': 0.507, 'std': 0.5002011607355596, 'min': 0.0, '25%': 0.0, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 1000 entries, 0 to 999
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 1000 non-null int64
1 battery_power 1000 non-null int64
2 blue 1000 non-null int64
3 clock_speed 1000 non-null float64
4 dual_sim 1000 non-null int64
5 fc 1000 non-null int64
6 four_g 1000 non-null int64
7 int_memory 1000 non-null int64
8 m_dep 1000 non-null float64
9 mobile_wt 1000 non-null int64
10 n_cores 1000 non-null int64
11 pc 1000 non-null int64
12 px_height 1000 non-null int64
13 px_width 1000 non-null int64
14 ram 1000 non-null int64
15 sc_h 1000 non-null int64
16 sc_w 1000 non-null int64
17 talk_time 1000 non-null int64
18 three_g 1000 non-null int64
19 touch_screen 1000 non-null int64
20 wifi 1000 non-null int64
dtypes: float64(2), int64(19)
memory usage: 164.2 KB
<some_examples>
{'id': {'0': 1, '1': 2, '2': 3, '3': 4}, 'battery_power': {'0': 1043, '1': 841, '2': 1807, '3': 1546}, 'blue': {'0': 1, '1': 1, '2': 1, '3': 0}, 'clock_speed': {'0': 1.8, '1': 0.5, '2': 2.8, '3': 0.5}, 'dual_sim': {'0': 1, '1': 1, '2': 0, '3': 1}, 'fc': {'0': 14, '1': 4, '2': 1, '3': 18}, 'four_g': {'0': 0, '1': 1, '2': 0, '3': 1}, 'int_memory': {'0': 5, '1': 61, '2': 27, '3': 25}, 'm_dep': {'0': 0.1, '1': 0.8, '2': 0.9, '3': 0.5}, 'mobile_wt': {'0': 193, '1': 191, '2': 186, '3': 96}, 'n_cores': {'0': 3, '1': 5, '2': 3, '3': 8}, 'pc': {'0': 16, '1': 12, '2': 4, '3': 20}, 'px_height': {'0': 226, '1': 746, '2': 1270, '3': 295}, 'px_width': {'0': 1412, '1': 857, '2': 1366, '3': 1752}, 'ram': {'0': 3476, '1': 3895, '2': 2396, '3': 3893}, 'sc_h': {'0': 12, '1': 6, '2': 17, '3': 10}, 'sc_w': {'0': 7, '1': 0, '2': 10, '3': 0}, 'talk_time': {'0': 2, '1': 7, '2': 10, '3': 7}, 'three_g': {'0': 0, '1': 1, '2': 0, '3': 1}, 'touch_screen': {'0': 1, '1': 0, '2': 1, '3': 1}, 'wifi': {'0': 0, '1': 0, '2': 1, '3': 0}}
<end_description>
| 573 | 0 | 3,694 | 573 |
69534753
|
<jupyter_start><jupyter_text>Chest X-Ray Images (Pneumonia)
### Context
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5

Figure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6
The normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse ‘‘interstitial’’ pattern in both lungs.
http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5
### Content
The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
Kaggle dataset identifier: chest-xray-pneumonia
<jupyter_script>import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import pathlib
import os
import seaborn as sns
import random
from PIL import Image
import cv2 as cv
import requests
from sklearn.metrics import confusion_matrix
# # 1- Pneumonia Dataset
# ## 1.1- what is the Pneumonia?
# Pneumonia is a disease that causes the air sacs in the lungs to become inflamed.
# These air sacs of the infected person may be filled with pus,
# which leads to coughing with phlegm and difficulty breathing,
# and this pneumonia may be caused by bacteria, viruses and fungi.Pneumonia is most dangerous for children,
# people over 65 years of age and those with health problems
# **Symptoms** vary depending on the person's overall health, which often resemble those of a cold or flu,
# some of which are: chest pain when breathing or coughing, fever, sweating, and chills,
# nausea and vomiting or diarrhea, and shortness of breath.
# Soure: https://www.mayoclinic.org/diseases-conditions/pneumonia/symptoms-causes/syc-20354204
# ## 1.2- Pneumonia Dataset
# **Content**
# The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
# Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
# For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
# # 2- Basic Exploratory Data Analysis
# **In this section we will do some basic EDA on the trainig data to explore and understand the data we have**
training_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/train")
validatoin_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/val")
testing_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/test")
# loading the data for exploration
# This code will generate a DataFrame with 2 columns
# one of then is the path of the image and the other is the label NORMAL or PNEUMONIA
normal_data = [
img_path
for img_path in os.listdir(training_data_path / "NORMAL")
if img_path[-5:] == ".jpeg"
]
NORMAL = ["NORMAL"] * len(normal_data)
normal_df = pd.DataFrame({"path": normal_data, "label": NORMAL})
pneumonia_data = [
img_path
for img_path in os.listdir(training_data_path / "PNEUMONIA")
if img_path[-5:] == ".jpeg"
]
PNEUMONIA = ["PNEUMONIA"] * len(pneumonia_data)
pneumonia_df = pd.DataFrame({"path": pneumonia_data, "label": PNEUMONIA})
path_label_df = pd.concat([normal_df, pneumonia_df])
path_label_df.index = np.arange(5216)
f, ax = plt.subplots(1, 2, figsize=(18, 8))
path_label_df["label"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[0], shadow=True
)
ax[0].set_title("NORMAL Vs. PNEUMONIA")
sns.countplot(x=path_label_df["label"])
ax[1].set_title("NORMAL Vs. PNEUMONIA")
plt.show()
# As we see that in the above figure we have an imbalanced dataset.
# So we need different ways to measure the performance of the data.
# I will present some of the usefull tools in this situation where we have imbalanced dataset.
# **Imbalanced DataSet**
# You can find the full details about Imbalanced Dataset in my discussion at:
# **Source:** Imbalanced Dataset Metrics in binary classification problem explained/
# For a binary classification problem, sometimes the dataset is imbalanced, where we have for example a 99% of the data
# from a class (negative) and the reminding 1% from the other class (positive) we call this dataset an Imbalanced Dataset.
# f we use the accuracy metric in these kind of problem we will get an optimistic misleading results, if we use a classifier that always predict the negative class we will get a model with 99% accuracy!!!
# In these kind of problems we use a **confusion matrix** and other related metrics to measure the performance so what is a confusion matrix?
# A confusion matrix is a 2 x 2 matrix (for binary classification) where the rows represent the actual classes and the columns represent the predicted classes.
# you can see an example of confusion matrix at the link bellow:
# https://cdn-images-1.medium.com/max/950/1*PPgItHcPSaskyjLMWFC-Kw.png
# - **True Positive (TP):** they are a positive class instances and correctly classified as positive.
# - **True Negative (TN):** they are a negative class instances and correctly classified as negative.
# - **False Positive (FP):** they are a negative class instances and incorrectly classified as positive.
# - **False Negative (FN):** they are a positive class instances and incorrectly classified as negative.
# **We can use the confusion matrix to find other useful metrics:**
# **Precision:** is the accuracy of the positive predictions, (out of the instances that classified as positive, how many of them are truly positive)
# - Precision = TP / TP + FP
# **Recall** (other names: sensitivity, true positive rate(TPR)): out of the instances that are actually positive how many of them are classified correctly as positive class.
# - Recall = TP / TP + FN
# If we need a single value to compare two classifiers, we can use the **F1-score** which is the harmonic mean of precision and recall:
# - F1 = 2 / ( 1/precision + 1/recall)
# Other metrics also will be usefull **precision_recall_curve()**, **average_precision_score**,
# **Receiver Operating Characteristic (ROC) curve**, and **the area under the curve (AUC) of the roc_curve**
# Visualization of normal images
# sampling different 4 normal images randomly
rand_normal_imgs = random.sample(normal_data, 4)
plt.figure(figsize=(7, 7))
for i in range(4):
norm_img = Image.open(training_data_path / "NORMAL" / rand_normal_imgs[i])
norm_img.resize(size=(180, 180))
ax = plt.subplot(2, 2, i + 1)
plt.imshow(norm_img, cmap="gray")
label = "NORMAL"
plt.title("NAORMAL")
plt.axis("off")
# Visualization of pneumonia images
# sampling different 4 pneumonia images randomly
rand_pneumonia_imgs = random.sample(pneumonia_data, 4)
plt.figure(figsize=(7, 7))
for i in range(4):
pneumonia_img = Image.open(
training_data_path / "PNEUMONIA" / rand_pneumonia_imgs[i]
)
pneumonia_img.resize(size=(180, 180))
ax = plt.subplot(2, 2, i + 1)
plt.imshow(pneumonia_img, cmap="gray")
label = "PNEUMONIA"
plt.title("PNEUMONIA")
plt.axis("off")
# As we can see from the above images, we **can not differentiate** between an infected and uninfected person by our **naked eyes**,
# so the task here to build a good classifier that will help to detect the infected person, this model will help save lives.
# # 3- ImageDataGenerator
# The data is stored as a jped files and it's should be formated as a floating point numbers befor feeding them to the
# network, so we should read the files, decoding then into RGB grids of pixels, convert them into floating point tensors,
# and rescaling them. The class `ImageDataGenerator` can automatically turn image files on disk into batches of floating point
# numbers ready for training using python generator.
# We can get more data and fight overfitting by using data augmantation, by appling random transformations to the data using
# the `ImageDataGenerator` class also.
# `rotation_range`: randomly rotating the image.
# `zoom_range`: randomly zooming inside pictures.
# `horizontal_flip` , `vertical_flip`: randomly flipping half the half the images horizontally and vertically.
# **source:** Deep Learning with python by: Francois Chollet
img_height = 224
img_width = 224
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
)
training_generator = training_data_gen.flow_from_directory(
training_data_path,
target_size=(img_height, img_width),
batch_size=32,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_directory(
testing_data_path,
target_size=(img_height, img_width),
batch_size=8,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_directory(
validatoin_data_path,
target_size=(img_height, img_width),
batch_size=16,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
# # 3- creating a simple model as a baseline
# **define the convolutional base using a common pattern: a stack of `Conv2D` and `MaxPooling2D` layers.**
# The CNN takes a tensor of shape (height, width, channels) 3D tensor (R, G, B) without givving it the batch size.
# W will give the input shape to your first layer (180, 180, 3) by passing the argument `input_shape`.
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
baseline_model = tf.keras.models.Sequential()
baseline_model.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=(5, 5),
strides=(1, 1),
activation="relu",
input_shape=(224, 224, 3),
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=128, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.summary()
# The output of every Conv2D and MaxPooling2D layer is a 3D tensor of shape (height, width, channels). as we can see
# the height and the width shrink as we go depper in the network, we can control the number of output channels
# by for each Conv2D by the first argument (e.g., filters=32)
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
# **Add Dense layers on top**
# Finally we will feed the last output tensor from the the convolutional base (of shape (20, 20, 64)) into Dense layers
# that will perform the classification. First we need to Flatten the 3D tensor to 1D a `Flatten` layer, then we will add
# one or more dense layers on top of the flatten layer, we have 275 classes so the final dense layer
# will have 275 units.
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
# The final layer I will use the softmax activations that used for multicalss classification,
# for the layer that preceding the final layer I will use 512 units to avoid information bottleneck.
# After the first two layers I will add `BatchNormalization` layer to reduce the danger of **Vanishing/Exploding Gradient**
# problems
baseline_model.add(tf.keras.layers.Flatten())
baseline_model.add(
tf.keras.layers.Dense(256, activation="relu", kernel_initializer="he_normal")
)
baseline_model.add(tf.keras.layers.BatchNormalization())
baseline_model.add(tf.keras.layers.Dropout(0.4))
baseline_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
baseline_model.summary()
# **Compile and train the model**
def decayed_learning_rate(epoch):
return 0.01 * 0.1 ** (epoch / 20)
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(decayed_learning_rate)
# ------------------------------------------------------------------------------------------------------
early_stopting = tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy", restore_best_weights=True, patience=5
)
baseline_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
baseline_history = baseline_model.fit(
training_generator,
steps_per_epoch=163,
epochs=10,
validation_data=validation_generator,
validation_steps=78,
callbacks=[lr_scheduler, early_stopting],
)
baseline_train_loss = baseline_history.history["loss"]
baseline_val_loss = baseline_history.history["val_loss"]
plt.plot(baseline_history.epoch, baseline_train_loss, label="Training Loss")
plt.plot(baseline_history.epoch, baseline_val_loss, label="Validation Loss")
plt.grid(True)
plt.legend()
baseline_train_acc = baseline_history.history["accuracy"]
baseline_val_acc = baseline_history.history["val_accuracy"]
plt.plot(baseline_history.epoch, baseline_train_acc, label="Training Accuracy")
plt.plot(baseline_history.epoch, baseline_val_acc, label="Validation Accuracy")
plt.grid(True)
plt.legend()
baseline_testset_loss, baseline_testest_acc = baseline_model.evaluate(test_generator)
print(
"The test set loss: {}, Test set Accuracy: {}".format(
baseline_testset_loss, baseline_testest_acc
)
)
print(
"And by now we have a baseline model to beat, The accuracy of the baseline model on the testset is {}".format(
baseline_testest_acc
)
)
# # 4- Implementing a simple ResNet CNN
# Residual Network or (RezNet) won the ILSVRC 2015 challenge bt Kaiming He et al. the idea behind the RezNet is using a skip
# connections (shortcut connections), it works by adding the input signal the fed to the layer with the output of the layer, by these
# skip connections the signal can make its way across the network. The RezNet can be seen as a deep stack of Residual Units.
# The Residual Unit composed of two Conv layers without pooling, BatchNormalization, Relu activation, 3 x 3 kernels, stride 1 and
# "same" padding.
# **source:** hands-on machine learning with scikit-learn, keras, and tensorflow: concepts, tools, and techniques to build intelligent systems
# by: Aurélien Géron
# by using the Residual Units we can train deeper network without suffering a problem of vanishing/Exploding gradient.
class ResidualUnit(tf.keras.layers.Layer):
def __init__(self, filters, strides, activation, **kwargs):
super().__init__(**kwargs)
self.activation = tf.keras.activations.get(activation)
self.main_layers = [
tf.keras.layers.Conv2D(
filters,
kernel_size=(3, 3),
strides=strides,
padding="same",
use_bias=False,
),
tf.keras.layers.BatchNormalization(),
self.activation,
tf.keras.layers.Conv2D(
filters, kernel_size=(3, 3), strides=1, padding="same", use_bias=False
),
tf.keras.layers.BatchNormalization(),
]
self.skip_con_layers = []
if strides > 1:
self.skip_con_layers = [
tf.keras.layers.Conv2D(
filters,
kernel_size=(1, 1),
strides=strides,
padding="same",
use_bias=False,
),
tf.keras.layers.BatchNormalization(),
]
def call(self, inputs):
z = inputs
for layer in self.main_layers:
z = layer(z)
skip_z = inputs
for layer in self.skip_con_layers:
skip_z = layer(skip_z)
return self.activation(z + skip_z)
simple_ResNet_model = tf.keras.models.Sequential()
simple_ResNet_model.add(
tf.keras.layers.Conv2D(64, 7, strides=2, input_shape=(224, 224, 3), use_bias=False)
)
simple_ResNet_model.add(tf.keras.layers.MaxPool2D(pool_size=3))
simple_ResNet_model.add(tf.keras.layers.BatchNormalization())
simple_ResNet_model.add(tf.keras.layers.Activation("relu"))
simple_ResNet_model.add(
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same")
)
simple_ResNet_model.add(ResidualUnit(filters=64, strides=1, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=64, strides=1, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=128, strides=2, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=128, strides=1, activation="relu"))
simple_ResNet_model.add(tf.keras.layers.Flatten())
simple_ResNet_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
tf.keras.utils.plot_model(
simple_ResNet_model,
show_shapes=True,
show_dtype=True,
show_layer_names=True,
expand_nested=True,
)
simple_ResNet_model.summary()
# As I mentioned this is an imabalanced data set and I explain in section 2 that we need to deal with this problem with
# different tools.
# As a metrics to measure I will use the following: `TruePositives`, `FalsePositives`, `TrueNegatives`, `FalseNegatives`,
# `BinaryAccuracy`, `Precision`, `Recall`, and `AUC` to measure the performance of the model.
from tensorflow.keras import backend as K
def F1_score(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon())
return f1_val
# **Source:** Link to Souce of the F1_score code/
METRICS = [
tf.keras.metrics.TruePositives(name="tp"),
tf.keras.metrics.FalsePositives(name="fp"),
tf.keras.metrics.TrueNegatives(name="tn"),
tf.keras.metrics.FalseNegatives(name="fn"),
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
tf.keras.metrics.AUC(name="auc"),
tf.keras.metrics.AUC(name="prc", curve="PR"),
F1_score,
]
# **Source:** Link to Souce of the METRICS code/
initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=750, decay_rate=0.92, staircase=True
)
simple_ResNet_model.compile(
optimizer=tf.keras.optimizers.Adam(lr_schedule),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
simple_ResNet_history = simple_ResNet_model.fit(
training_generator,
steps_per_epoch=163,
epochs=20,
validation_data=validation_generator,
validation_steps=78,
callbacks=[early_stopting],
)
# **For test data:**
simple_ResNet_mode_results = simple_ResNet_model.evaluate(test_generator)
for name, value in zip(simple_ResNet_model.metrics_names, simple_ResNet_mode_results):
print(name, ": ", value)
print()
# **For training data:**
# positive: PNEUMONIA
# negative: NORMAL
actual_positive = len(os.listdir(training_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(training_data_path / "NORMAL"))
simple_ResNet_train_precision = simple_ResNet_history.history["precision"][0]
simple_ResNet_train_recall = simple_ResNet_history.history["recall"][0]
simple_ResNet_train_ROC_AUC = simple_ResNet_history.history["auc"][0]
simple_ResNet_train_prc = simple_ResNet_history.history["prc"][0]
simple_ResNet_train_F1_score = simple_ResNet_history.history["F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
simple_ResNet_history.history["fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(simple_ResNet_train_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
simple_ResNet_history.history["fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(simple_ResNet_train_recall))
print("\n")
print("The F1-score: {}".format(simple_ResNet_train_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
simple_ResNet_train_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
simple_ResNet_train_prc
)
)
print("\n")
# **For Validation data**
actual_positive = len(os.listdir(testing_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(testing_data_path / "NORMAL"))
simple_ResNet_val_precision = simple_ResNet_history.history["val_precision"][0]
simple_ResNet_val_recall = simple_ResNet_history.history["val_recall"][0]
simple_ResNet_val_ROC_AUC = simple_ResNet_history.history["val_auc"][0]
simple_ResNet_val_prc = simple_ResNet_history.history["val_prc"][0]
simple_ResNet_val_F1_score = simple_ResNet_history.history["val_F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
simple_ResNet_history.history["val_fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(simple_ResNet_val_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
simple_ResNet_history.history["val_fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(simple_ResNet_val_recall))
print("\n")
print("The F1-score: {}".format(simple_ResNet_val_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
simple_ResNet_val_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
simple_ResNet_val_prc
)
)
print("\n")
# We can get best results using transfer learning and thats the next section topic.
# But for this kind of problem diagnoses of illnesses, **it's better to have as lowest as possible of FalseNegatives**,
# FalseNegatives is where the patient is actually have the disease (positive) but he diagnoses as negative (he don't have it)
# because if the patient is trully have the disease we have to diagnoses his correctly and to undergo treatment.
# but in the other side it's okay ho have some FalsePositives where the patient is actually don't have the disease (negative) but he diagnoses as positive (he have it), because after more investigation the truth will come out.
# But mental health is also important, if we have a high FalsePositives and we tell a healthy patient that
# he has the disease we may harm his mental health for no reason.
# so we have to have a single metric that we can concentrate on which is the F1 score.
# # 5- Using pretrained models (Transfer learning (ResNet50V2))
# Transfer learning is the process of taking the knowledge of pretrained model in different task or different domain
# to another domain or task. Task is defined by the input and the expected output (for example image classification
# and image detection are different tasks), while the domain (different data distributions but the same tasks),
# for example when the task is image classification images taken fom the web and images by the user camera.
# **source:** Hands-On Computer Vision with TensorFlow 2 by: Benjamin Planche, Eliot Andres
# As we know the first layers in the ConvNet learned local genaric features while higher layers extract more
# abstract concept, we can we can use this information and apply transfer learning.
# Here we are taking the convolutional base without the densly connected layer by setting `include_top` to False
# because the representations learned by the convolutional base are more genaric and reusable, which will be usefule
# regardless tho computer vision problem at hand, while the representations learned by the classifier are
# more specific to the classes which the model was trained.
# **source:** Deep Learning with Python by: Francois Chollet
# There are **two ways to use a pretrained models**:
# - Feature Extraction
# - Fine Tuning
# **source:** Deep Learning with Python by: Francois Chollet
# I will used the Fine Tuning method.
#
base_model = tf.keras.applications.ResNet50V2(
weights="imagenet", input_shape=(224, 224, 3), include_top=False
)
#
# **steps for Fine-Tuning a network:**
# Add your custom network on top of an already-trained base network
# Freeze the base netweork
# Train the part you added
# Un freeze some layers in the base network
# Jointly train both these layers and the part you added
# **Why we do the first 3 steps? why we just Un freeze some layers in the base network and directly Jointly train both these layers and the part you added?**
# freezing the base netweork weights means preventing them from being updating during training, if we do not do this
# the presentations that was previesly learned by the base netweorkwill modified during trainin.
# The densw layers on the top that we added are randomly inialized and very large weight updated will be propagated through the
# network destroying the presentations that was previesly learned.
# **source:** Deep Learning with Python by: Francois Chollet
TL_model = tf.keras.models.Sequential()
# step 1: Add your custom network on top of an already-trained base network
TL_model.add(base_model)
TL_model.add(tf.keras.layers.Flatten())
TL_model.add(tf.keras.layers.Dense(256, activation="relu"))
TL_model.add(tf.keras.layers.Dropout(0.3))
TL_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
# step 2: Freeze the base netweork
base_model.trainable = False
# step 3: Train the part you added
TL_model.compile(
optimizer=tf.keras.optimizers.Adam(lr_schedule),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
TL_model_history = TL_model.fit(
training_generator,
steps_per_epoch=163,
epochs=10,
validation_data=validation_generator,
validation_steps=78,
)
baseline_results = TL_model.evaluate(test_generator)
for name, value in zip(TL_model.metrics_names, baseline_results):
print(name, ": ", value)
print()
# **Fine Tuning**
# step 4: Un freeze some layers in the base network
base_model.trainable = True
set_trainable = False
for layer in base_model.layers:
if layer.name == "conv5_block3_preact_bn":
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
# Here we use a **very low learning rate** to limit the magnitude of the modifications we make to the representations
# of the layers that we are fine tuning.
# **source:** Deep Learning with Python by: Francois Chollet
# step 5: Jointly train both these layers and the part you added
TL_model.compile(
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-5),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
TL_model_history = TL_model.fit(
training_generator,
steps_per_epoch=163,
epochs=20,
validation_data=validation_generator,
validation_steps=78,
callbacks=[early_stopting],
)
# # 6- evaluation
baseline_results = TL_model.evaluate(test_generator)
for name, value in zip(TL_model.metrics_names, baseline_results):
print(name, ": ", value)
print()
# **For the training data**
actual_positive = len(os.listdir(training_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(training_data_path / "NORMAL"))
TL_model_train_precision = TL_model_history.history["precision"][0]
TL_model_train_recall = TL_model_history.history["recall"][0]
TL_model_train_ROC_AUC = TL_model_history.history["auc"][0]
TL_model_train_prc = TL_model_history.history["prc"][0]
TL_model_train_F1_score = TL_model_history.history["F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
TL_model_history.history["fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(TL_model_train_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
TL_model_history.history["fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(TL_model_train_recall))
print("\n")
print("The F1-score: {}".format(TL_model_train_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
TL_model_train_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
TL_model_train_prc
)
)
print("\n")
# **For validation data**
actual_positive = len(os.listdir(testing_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(testing_data_path / "NORMAL"))
TL_model_val_precision = TL_model_history.history["val_precision"][0]
TL_model_val_recall = TL_model_history.history["val_recall"][0]
TL_model_val_ROC_AUC = TL_model_history.history["val_auc"][0]
TL_model_val_prc = TL_model_history.history["val_prc"][0]
TL_model_val_F1_score = TL_model_history.history["val_F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
TL_model_history.history["val_fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(TL_model_val_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
TL_model_history.history["val_fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(TL_model_val_recall))
print("\n")
print("The F1-score: {}".format(TL_model_val_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
TL_model_val_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
TL_model_val_prc
)
)
print("\n")
def plot_cm(labels, predictions, p=0.5):
cm = confusion_matrix(labels, predictions > p)
plt.figure(figsize=(5, 5))
sns.heatmap(cm, annot=True, fmt="d")
plt.title("Confusion matrix @{:.2f}".format(p))
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
predictions = TL_model.predict(test_generator)
plot_cm(test_generator.labels, predictions, p=0.5)
predictions = TL_model.predict(validation_generator)
plot_cm(validation_generator.labels, predictions, p=0.5)
test_generator.class_indices
# positive: PNEUMONIA
# negative: NORMAL
# # 7- (Trying) Oversampling (Building Input Pipeline using tf.data)
# Resample the dataset by oversampling the minority class, you can balance the dataset manually
# by choosing the right number of random indices from the positive examples.
# If you're using tf.data the easiest way to produce balanced examples is to start with a positive and a negative dataset,
# and merge them.
# **Source:** Tensorflow Oversampling/
BUFFER_SIZE = 256
# reading the images from PNEUMONIA and NORMAL directories
# training_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\train")
pos_training_data_path = training_data_path / "PNEUMONIA"
neg_training_data_path = training_data_path / "NORMAL"
# reading the images from validation_data_path directory
# validatoin_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\val")
test_image_count = len(list(validatoin_data_path.glob(r"*\\*.jpeg")))
test_list_ds = tf.data.Dataset.list_files(
str(validatoin_data_path / "*/*"), shuffle=False
)
test_list_ds = test_list_ds.shuffle(test_image_count, reshuffle_each_iteration=False)
# reading the images from testing_data_path directory
# testing_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\test")
val_image_count = len(list(testing_data_path.glob(r"*\\*.jpeg")))
val_list_ds = tf.data.Dataset.list_files(str(testing_data_path / "*/*"), shuffle=False)
val_list_ds = val_list_ds.shuffle(val_image_count, reshuffle_each_iteration=False)
# reading the images from PNEUMONIA and NORMAL directories
def make_ds(path):
ds = tf.data.Dataset.list_files(str(path / "*.jpeg"), shuffle=False)
ds = ds.shuffle(BUFFER_SIZE).repeat()
return ds
pos_ds = make_ds(pos_training_data_path)
neg_ds = make_ds(neg_training_data_path)
# converts a file path to an (img, label) pair
def process_path(file_path):
class_indices = {"NORMAL": 0, "PNEUMONIA": 1}
img_height = 224
img_width = 224
parts = tf.strings.split(file_path, os.path.sep)
if parts[-2] == "NORMAL":
label = class_indices["NORMAL"]
else:
label = class_indices["PNEUMONIA"]
img = tf.io.read_file(file_path)
img = tf.io.decode_jpeg(img, channels=3)
img = tf.image.resize(img, [img_height, img_width])
return img, label
pos_ds = pos_ds.map(process_path, num_parallel_calls=tf.data.AUTOTUNE)
neg_ds = neg_ds.map(process_path, num_parallel_calls=tf.data.AUTOTUNE)
# Merge the two together using `experimental.sample_from_datasets`
resampled_ds = tf.data.experimental.sample_from_datasets(
[pos_ds, neg_ds], weights=[0.5, 0.5]
)
resampled_ds = resampled_ds.batch(BATCH_SIZE).prefetch(2)
# To use this dataset, you'll need the number of steps per epoch.
# The definition of "epoch" in this case is less clear.
# Say it's the number of batches required to see each negative example once:
# **Source:** Tensorflow Oversampling/
neg, pos = len(os.listdir(training_data_path / "NORMAL")), len(
os.listdir(training_data_path / "PNEUMONIA")
)
resampled_steps_per_epoch = np.ceil(2.0 * neg / BATCH_SIZE)
resampled_steps_per_epoch
# ---
# converts a file path to an (img, label) pair
def test_val_process_path(file_path):
parts = tf.strings.split(file_path, os.path.sep)
one_hot = parts[-2] == class_names
label = tf.argmax(one_hot)
img = tf.io.read_file(file_path)
img = tf.io.decode_jpeg(img, channels=3)
img = tf.image.resize(img, [img_height, img_width])
return img, label
class_names = np.array(
sorted(
[item.name for item in training_data_path.glob("*") if item.name != ".DS_Store"]
)
)
num_classes = len(
[item for item in training_data_path.glob("*") if item.name != ".DS_Store"]
)
print("The number of classes is: {}".format(num_classes))
val_ds = val_list_ds.map(test_val_process_path, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_list_ds.map(test_val_process_path, num_parallel_calls=tf.data.AUTOTUNE)
# **Configure dataset for performance**
# To train a model with this dataset you will want the data:
# - To be well shuffled.
# - To be batched.
# - Batches to be available as soon as possible.
# These features can be added using the tf.data API. For more details:
# **Source:** Configure dataset for performance/
# `.cache()` keeps the images in memory after they're loaded off disk during the first epoch.
# This will ensure the dataset does not become a bottleneck while training your model.
# If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache.
# `.prefetch()` overlaps data preprocessing and model execution while training.
#
# **Source:** Configure dataset for performance/
def configure_for_performance(ds):
ds = ds.cache()
ds = ds.shuffle(buffer_size=1024)
ds = ds.batch(64)
ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)
return ds
val_ds = configure_for_performance(val_ds)
test_ds = configure_for_performance(test_ds)
test_ds.take(1)
# **Train on the oversampled data**
resampled_model = tf.keras.models.Sequential()
resampled_model.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=(5, 5),
strides=(1, 1),
activation="relu",
input_shape=(224, 224, 3),
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=128, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(tf.keras.layers.Flatten())
resampled_model.add(
tf.keras.layers.Dense(256, activation="relu", kernel_initializer="he_normal")
)
resampled_model.add(tf.keras.layers.BatchNormalization())
resampled_model.add(tf.keras.layers.Dropout(0.4))
resampled_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
resampled_model.compile(
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
resampled_history = resampled_model.fit(
resampled_ds,
epochs=50,
steps_per_epoch=resampled_steps_per_epoch,
validation_data=test_ds,
)
resampled_train_acc = resampled_history.history["accuracy"]
resampled_val_acc = resampled_history.history["val_accuracy"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training Accuracy")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation Accuracy")
plt.grid(True)
plt.legend()
resampled_train_acc = resampled_history.history["F1_score"]
resampled_val_acc = resampled_history.history["val_F1_score"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training F1_score")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation F1_score")
plt.grid(True)
plt.legend()
resampled_train_acc = resampled_history.history["prc"]
resampled_val_acc = resampled_history.history["val_prc"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training prc")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation prc")
plt.grid(True)
plt.legend()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534753.ipynb
|
chest-xray-pneumonia
|
paultimothymooney
|
[{"Id": 69534753, "ScriptId": 18987496, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7424338, "CreationDate": "08/01/2021 08:14:35", "VersionNumber": 1.0, "Title": "Classification on imbalanced data & transfer learn", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 955.0, "LinesInsertedFromPrevious": 955.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 92831267, "KernelVersionId": 69534753, "SourceDatasetVersionId": 23812}]
|
[{"Id": 23812, "DatasetId": 17810, "DatasourceVersionId": 23851, "CreatorUserId": 1314380, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2018 19:41:59", "VersionNumber": 2.0, "Title": "Chest X-Ray Images (Pneumonia)", "Slug": "chest-xray-pneumonia", "Subtitle": "5,863 images, 2 categories", "Description": "### Context\n\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n\n\nFigure S6. Illustrative Examples of Chest X-Rays in Patients with Pneumonia, Related to Figure 6\nThe normal chest X-ray (left panel) depicts clear lungs without any areas of abnormal opacification in the image. Bacterial pneumonia (middle) typically exhibits a focal lobar consolidation, in this case in the right upper lobe (white arrows), whereas viral pneumonia (right) manifests with a more diffuse \u2018\u2018interstitial\u2019\u2019 pattern in both lungs.\nhttp://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n### Content\n\nThe dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal). \n\nChest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children\u2019s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients\u2019 routine clinical care. \n\nFor the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.\n\n### Acknowledgements\n\nData: https://data.mendeley.com/datasets/rscbjbr9sj/2\n\nLicense: [CC BY 4.0][1]\n\nCitation: http://www.cell.com/cell/fulltext/S0092-8674(18)30154-5\n\n![enter image description here][2]\n\n\n### Inspiration\n\nAutomated methods to detect and classify human diseases from medical images.\n\n\n [1]: https://creativecommons.org/licenses/by/4.0/\n [2]: https://i.imgur.com/8AUJkin.png", "VersionNotes": "train/test/val", "TotalCompressedBytes": 1237249419.0, "TotalUncompressedBytes": 1237249419.0}]
|
[{"Id": 17810, "CreatorUserId": 1314380, "OwnerUserId": 1314380.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23812.0, "CurrentDatasourceVersionId": 23851.0, "ForumId": 25540, "Type": 2, "CreationDate": "03/22/2018 05:42:41", "LastActivityDate": "03/22/2018", "TotalViews": 2063138, "TotalDownloads": 237932, "TotalVotes": 5834, "TotalKernels": 2058}]
|
[{"Id": 1314380, "UserName": "paultimothymooney", "DisplayName": "Paul Mooney", "RegisterDate": "10/05/2017", "PerformanceTier": 5}]
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tensorflow_datasets as tfds
import pathlib
import os
import seaborn as sns
import random
from PIL import Image
import cv2 as cv
import requests
from sklearn.metrics import confusion_matrix
# # 1- Pneumonia Dataset
# ## 1.1- what is the Pneumonia?
# Pneumonia is a disease that causes the air sacs in the lungs to become inflamed.
# These air sacs of the infected person may be filled with pus,
# which leads to coughing with phlegm and difficulty breathing,
# and this pneumonia may be caused by bacteria, viruses and fungi.Pneumonia is most dangerous for children,
# people over 65 years of age and those with health problems
# **Symptoms** vary depending on the person's overall health, which often resemble those of a cold or flu,
# some of which are: chest pain when breathing or coughing, fever, sweating, and chills,
# nausea and vomiting or diarrhea, and shortness of breath.
# Soure: https://www.mayoclinic.org/diseases-conditions/pneumonia/symptoms-causes/syc-20354204
# ## 1.2- Pneumonia Dataset
# **Content**
# The dataset is organized into 3 folders (train, test, val) and contains subfolders for each image category (Pneumonia/Normal). There are 5,863 X-Ray images (JPEG) and 2 categories (Pneumonia/Normal).
# Chest X-ray images (anterior-posterior) were selected from retrospective cohorts of pediatric patients of one to five years old from Guangzhou Women and Children’s Medical Center, Guangzhou. All chest X-ray imaging was performed as part of patients’ routine clinical care.
# For the analysis of chest x-ray images, all chest radiographs were initially screened for quality control by removing all low quality or unreadable scans. The diagnoses for the images were then graded by two expert physicians before being cleared for training the AI system. In order to account for any grading errors, the evaluation set was also checked by a third expert.
# # 2- Basic Exploratory Data Analysis
# **In this section we will do some basic EDA on the trainig data to explore and understand the data we have**
training_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/train")
validatoin_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/val")
testing_data_path = pathlib.Path(r"../input/chest-xray-pneumonia/chest_xray/test")
# loading the data for exploration
# This code will generate a DataFrame with 2 columns
# one of then is the path of the image and the other is the label NORMAL or PNEUMONIA
normal_data = [
img_path
for img_path in os.listdir(training_data_path / "NORMAL")
if img_path[-5:] == ".jpeg"
]
NORMAL = ["NORMAL"] * len(normal_data)
normal_df = pd.DataFrame({"path": normal_data, "label": NORMAL})
pneumonia_data = [
img_path
for img_path in os.listdir(training_data_path / "PNEUMONIA")
if img_path[-5:] == ".jpeg"
]
PNEUMONIA = ["PNEUMONIA"] * len(pneumonia_data)
pneumonia_df = pd.DataFrame({"path": pneumonia_data, "label": PNEUMONIA})
path_label_df = pd.concat([normal_df, pneumonia_df])
path_label_df.index = np.arange(5216)
f, ax = plt.subplots(1, 2, figsize=(18, 8))
path_label_df["label"].value_counts().plot.pie(
explode=[0, 0.1], autopct="%1.1f%%", ax=ax[0], shadow=True
)
ax[0].set_title("NORMAL Vs. PNEUMONIA")
sns.countplot(x=path_label_df["label"])
ax[1].set_title("NORMAL Vs. PNEUMONIA")
plt.show()
# As we see that in the above figure we have an imbalanced dataset.
# So we need different ways to measure the performance of the data.
# I will present some of the usefull tools in this situation where we have imbalanced dataset.
# **Imbalanced DataSet**
# You can find the full details about Imbalanced Dataset in my discussion at:
# **Source:** Imbalanced Dataset Metrics in binary classification problem explained/
# For a binary classification problem, sometimes the dataset is imbalanced, where we have for example a 99% of the data
# from a class (negative) and the reminding 1% from the other class (positive) we call this dataset an Imbalanced Dataset.
# f we use the accuracy metric in these kind of problem we will get an optimistic misleading results, if we use a classifier that always predict the negative class we will get a model with 99% accuracy!!!
# In these kind of problems we use a **confusion matrix** and other related metrics to measure the performance so what is a confusion matrix?
# A confusion matrix is a 2 x 2 matrix (for binary classification) where the rows represent the actual classes and the columns represent the predicted classes.
# you can see an example of confusion matrix at the link bellow:
# https://cdn-images-1.medium.com/max/950/1*PPgItHcPSaskyjLMWFC-Kw.png
# - **True Positive (TP):** they are a positive class instances and correctly classified as positive.
# - **True Negative (TN):** they are a negative class instances and correctly classified as negative.
# - **False Positive (FP):** they are a negative class instances and incorrectly classified as positive.
# - **False Negative (FN):** they are a positive class instances and incorrectly classified as negative.
# **We can use the confusion matrix to find other useful metrics:**
# **Precision:** is the accuracy of the positive predictions, (out of the instances that classified as positive, how many of them are truly positive)
# - Precision = TP / TP + FP
# **Recall** (other names: sensitivity, true positive rate(TPR)): out of the instances that are actually positive how many of them are classified correctly as positive class.
# - Recall = TP / TP + FN
# If we need a single value to compare two classifiers, we can use the **F1-score** which is the harmonic mean of precision and recall:
# - F1 = 2 / ( 1/precision + 1/recall)
# Other metrics also will be usefull **precision_recall_curve()**, **average_precision_score**,
# **Receiver Operating Characteristic (ROC) curve**, and **the area under the curve (AUC) of the roc_curve**
# Visualization of normal images
# sampling different 4 normal images randomly
rand_normal_imgs = random.sample(normal_data, 4)
plt.figure(figsize=(7, 7))
for i in range(4):
norm_img = Image.open(training_data_path / "NORMAL" / rand_normal_imgs[i])
norm_img.resize(size=(180, 180))
ax = plt.subplot(2, 2, i + 1)
plt.imshow(norm_img, cmap="gray")
label = "NORMAL"
plt.title("NAORMAL")
plt.axis("off")
# Visualization of pneumonia images
# sampling different 4 pneumonia images randomly
rand_pneumonia_imgs = random.sample(pneumonia_data, 4)
plt.figure(figsize=(7, 7))
for i in range(4):
pneumonia_img = Image.open(
training_data_path / "PNEUMONIA" / rand_pneumonia_imgs[i]
)
pneumonia_img.resize(size=(180, 180))
ax = plt.subplot(2, 2, i + 1)
plt.imshow(pneumonia_img, cmap="gray")
label = "PNEUMONIA"
plt.title("PNEUMONIA")
plt.axis("off")
# As we can see from the above images, we **can not differentiate** between an infected and uninfected person by our **naked eyes**,
# so the task here to build a good classifier that will help to detect the infected person, this model will help save lives.
# # 3- ImageDataGenerator
# The data is stored as a jped files and it's should be formated as a floating point numbers befor feeding them to the
# network, so we should read the files, decoding then into RGB grids of pixels, convert them into floating point tensors,
# and rescaling them. The class `ImageDataGenerator` can automatically turn image files on disk into batches of floating point
# numbers ready for training using python generator.
# We can get more data and fight overfitting by using data augmantation, by appling random transformations to the data using
# the `ImageDataGenerator` class also.
# `rotation_range`: randomly rotating the image.
# `zoom_range`: randomly zooming inside pictures.
# `horizontal_flip` , `vertical_flip`: randomly flipping half the half the images horizontally and vertically.
# **source:** Deep Learning with python by: Francois Chollet
img_height = 224
img_width = 224
training_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1 / 255.0,
rotation_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
vertical_flip=True,
)
training_generator = training_data_gen.flow_from_directory(
training_data_path,
target_size=(img_height, img_width),
batch_size=32,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
val_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
validation_generator = val_data_gen.flow_from_directory(
testing_data_path,
target_size=(img_height, img_width),
batch_size=8,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
test_data_gen = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1 / 255.0)
test_generator = test_data_gen.flow_from_directory(
validatoin_data_path,
target_size=(img_height, img_width),
batch_size=16,
class_mode="binary",
)
# ------------------------------------------------------------------------------------------------------
# # 3- creating a simple model as a baseline
# **define the convolutional base using a common pattern: a stack of `Conv2D` and `MaxPooling2D` layers.**
# The CNN takes a tensor of shape (height, width, channels) 3D tensor (R, G, B) without givving it the batch size.
# W will give the input shape to your first layer (180, 180, 3) by passing the argument `input_shape`.
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
baseline_model = tf.keras.models.Sequential()
baseline_model.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=(5, 5),
strides=(1, 1),
activation="relu",
input_shape=(224, 224, 3),
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=128, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.add(
tf.keras.layers.Conv2D(
filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
baseline_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
baseline_model.summary()
# The output of every Conv2D and MaxPooling2D layer is a 3D tensor of shape (height, width, channels). as we can see
# the height and the width shrink as we go depper in the network, we can control the number of output channels
# by for each Conv2D by the first argument (e.g., filters=32)
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
# **Add Dense layers on top**
# Finally we will feed the last output tensor from the the convolutional base (of shape (20, 20, 64)) into Dense layers
# that will perform the classification. First we need to Flatten the 3D tensor to 1D a `Flatten` layer, then we will add
# one or more dense layers on top of the flatten layer, we have 275 classes so the final dense layer
# will have 275 units.
# **Source:** Tensorflow Convolutional Neural Network (CNN)/
# The final layer I will use the softmax activations that used for multicalss classification,
# for the layer that preceding the final layer I will use 512 units to avoid information bottleneck.
# After the first two layers I will add `BatchNormalization` layer to reduce the danger of **Vanishing/Exploding Gradient**
# problems
baseline_model.add(tf.keras.layers.Flatten())
baseline_model.add(
tf.keras.layers.Dense(256, activation="relu", kernel_initializer="he_normal")
)
baseline_model.add(tf.keras.layers.BatchNormalization())
baseline_model.add(tf.keras.layers.Dropout(0.4))
baseline_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
baseline_model.summary()
# **Compile and train the model**
def decayed_learning_rate(epoch):
return 0.01 * 0.1 ** (epoch / 20)
lr_scheduler = tf.keras.callbacks.LearningRateScheduler(decayed_learning_rate)
# ------------------------------------------------------------------------------------------------------
early_stopting = tf.keras.callbacks.EarlyStopping(
monitor="val_accuracy", restore_best_weights=True, patience=5
)
baseline_model.compile(
optimizer=tf.keras.optimizers.SGD(learning_rate=0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["accuracy"],
)
baseline_history = baseline_model.fit(
training_generator,
steps_per_epoch=163,
epochs=10,
validation_data=validation_generator,
validation_steps=78,
callbacks=[lr_scheduler, early_stopting],
)
baseline_train_loss = baseline_history.history["loss"]
baseline_val_loss = baseline_history.history["val_loss"]
plt.plot(baseline_history.epoch, baseline_train_loss, label="Training Loss")
plt.plot(baseline_history.epoch, baseline_val_loss, label="Validation Loss")
plt.grid(True)
plt.legend()
baseline_train_acc = baseline_history.history["accuracy"]
baseline_val_acc = baseline_history.history["val_accuracy"]
plt.plot(baseline_history.epoch, baseline_train_acc, label="Training Accuracy")
plt.plot(baseline_history.epoch, baseline_val_acc, label="Validation Accuracy")
plt.grid(True)
plt.legend()
baseline_testset_loss, baseline_testest_acc = baseline_model.evaluate(test_generator)
print(
"The test set loss: {}, Test set Accuracy: {}".format(
baseline_testset_loss, baseline_testest_acc
)
)
print(
"And by now we have a baseline model to beat, The accuracy of the baseline model on the testset is {}".format(
baseline_testest_acc
)
)
# # 4- Implementing a simple ResNet CNN
# Residual Network or (RezNet) won the ILSVRC 2015 challenge bt Kaiming He et al. the idea behind the RezNet is using a skip
# connections (shortcut connections), it works by adding the input signal the fed to the layer with the output of the layer, by these
# skip connections the signal can make its way across the network. The RezNet can be seen as a deep stack of Residual Units.
# The Residual Unit composed of two Conv layers without pooling, BatchNormalization, Relu activation, 3 x 3 kernels, stride 1 and
# "same" padding.
# **source:** hands-on machine learning with scikit-learn, keras, and tensorflow: concepts, tools, and techniques to build intelligent systems
# by: Aurélien Géron
# by using the Residual Units we can train deeper network without suffering a problem of vanishing/Exploding gradient.
class ResidualUnit(tf.keras.layers.Layer):
def __init__(self, filters, strides, activation, **kwargs):
super().__init__(**kwargs)
self.activation = tf.keras.activations.get(activation)
self.main_layers = [
tf.keras.layers.Conv2D(
filters,
kernel_size=(3, 3),
strides=strides,
padding="same",
use_bias=False,
),
tf.keras.layers.BatchNormalization(),
self.activation,
tf.keras.layers.Conv2D(
filters, kernel_size=(3, 3), strides=1, padding="same", use_bias=False
),
tf.keras.layers.BatchNormalization(),
]
self.skip_con_layers = []
if strides > 1:
self.skip_con_layers = [
tf.keras.layers.Conv2D(
filters,
kernel_size=(1, 1),
strides=strides,
padding="same",
use_bias=False,
),
tf.keras.layers.BatchNormalization(),
]
def call(self, inputs):
z = inputs
for layer in self.main_layers:
z = layer(z)
skip_z = inputs
for layer in self.skip_con_layers:
skip_z = layer(skip_z)
return self.activation(z + skip_z)
simple_ResNet_model = tf.keras.models.Sequential()
simple_ResNet_model.add(
tf.keras.layers.Conv2D(64, 7, strides=2, input_shape=(224, 224, 3), use_bias=False)
)
simple_ResNet_model.add(tf.keras.layers.MaxPool2D(pool_size=3))
simple_ResNet_model.add(tf.keras.layers.BatchNormalization())
simple_ResNet_model.add(tf.keras.layers.Activation("relu"))
simple_ResNet_model.add(
tf.keras.layers.MaxPool2D(pool_size=3, strides=2, padding="same")
)
simple_ResNet_model.add(ResidualUnit(filters=64, strides=1, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=64, strides=1, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=128, strides=2, activation="relu"))
simple_ResNet_model.add(ResidualUnit(filters=128, strides=1, activation="relu"))
simple_ResNet_model.add(tf.keras.layers.Flatten())
simple_ResNet_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
tf.keras.utils.plot_model(
simple_ResNet_model,
show_shapes=True,
show_dtype=True,
show_layer_names=True,
expand_nested=True,
)
simple_ResNet_model.summary()
# As I mentioned this is an imabalanced data set and I explain in section 2 that we need to deal with this problem with
# different tools.
# As a metrics to measure I will use the following: `TruePositives`, `FalsePositives`, `TrueNegatives`, `FalseNegatives`,
# `BinaryAccuracy`, `Precision`, `Recall`, and `AUC` to measure the performance of the model.
from tensorflow.keras import backend as K
def F1_score(y_true, y_pred):
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
recall = true_positives / (possible_positives + K.epsilon())
f1_val = 2 * (precision * recall) / (precision + recall + K.epsilon())
return f1_val
# **Source:** Link to Souce of the F1_score code/
METRICS = [
tf.keras.metrics.TruePositives(name="tp"),
tf.keras.metrics.FalsePositives(name="fp"),
tf.keras.metrics.TrueNegatives(name="tn"),
tf.keras.metrics.FalseNegatives(name="fn"),
tf.keras.metrics.BinaryAccuracy(name="accuracy"),
tf.keras.metrics.Precision(name="precision"),
tf.keras.metrics.Recall(name="recall"),
tf.keras.metrics.AUC(name="auc"),
tf.keras.metrics.AUC(name="prc", curve="PR"),
F1_score,
]
# **Source:** Link to Souce of the METRICS code/
initial_learning_rate = 0.001
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
initial_learning_rate, decay_steps=750, decay_rate=0.92, staircase=True
)
simple_ResNet_model.compile(
optimizer=tf.keras.optimizers.Adam(lr_schedule),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
simple_ResNet_history = simple_ResNet_model.fit(
training_generator,
steps_per_epoch=163,
epochs=20,
validation_data=validation_generator,
validation_steps=78,
callbacks=[early_stopting],
)
# **For test data:**
simple_ResNet_mode_results = simple_ResNet_model.evaluate(test_generator)
for name, value in zip(simple_ResNet_model.metrics_names, simple_ResNet_mode_results):
print(name, ": ", value)
print()
# **For training data:**
# positive: PNEUMONIA
# negative: NORMAL
actual_positive = len(os.listdir(training_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(training_data_path / "NORMAL"))
simple_ResNet_train_precision = simple_ResNet_history.history["precision"][0]
simple_ResNet_train_recall = simple_ResNet_history.history["recall"][0]
simple_ResNet_train_ROC_AUC = simple_ResNet_history.history["auc"][0]
simple_ResNet_train_prc = simple_ResNet_history.history["prc"][0]
simple_ResNet_train_F1_score = simple_ResNet_history.history["F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
simple_ResNet_history.history["fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(simple_ResNet_train_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
simple_ResNet_history.history["fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(simple_ResNet_train_recall))
print("\n")
print("The F1-score: {}".format(simple_ResNet_train_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
simple_ResNet_train_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
simple_ResNet_train_prc
)
)
print("\n")
# **For Validation data**
actual_positive = len(os.listdir(testing_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(testing_data_path / "NORMAL"))
simple_ResNet_val_precision = simple_ResNet_history.history["val_precision"][0]
simple_ResNet_val_recall = simple_ResNet_history.history["val_recall"][0]
simple_ResNet_val_ROC_AUC = simple_ResNet_history.history["val_auc"][0]
simple_ResNet_val_prc = simple_ResNet_history.history["val_prc"][0]
simple_ResNet_val_F1_score = simple_ResNet_history.history["val_F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
simple_ResNet_history.history["val_fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(simple_ResNet_val_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
simple_ResNet_history.history["val_fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(simple_ResNet_val_recall))
print("\n")
print("The F1-score: {}".format(simple_ResNet_val_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
simple_ResNet_val_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
simple_ResNet_val_prc
)
)
print("\n")
# We can get best results using transfer learning and thats the next section topic.
# But for this kind of problem diagnoses of illnesses, **it's better to have as lowest as possible of FalseNegatives**,
# FalseNegatives is where the patient is actually have the disease (positive) but he diagnoses as negative (he don't have it)
# because if the patient is trully have the disease we have to diagnoses his correctly and to undergo treatment.
# but in the other side it's okay ho have some FalsePositives where the patient is actually don't have the disease (negative) but he diagnoses as positive (he have it), because after more investigation the truth will come out.
# But mental health is also important, if we have a high FalsePositives and we tell a healthy patient that
# he has the disease we may harm his mental health for no reason.
# so we have to have a single metric that we can concentrate on which is the F1 score.
# # 5- Using pretrained models (Transfer learning (ResNet50V2))
# Transfer learning is the process of taking the knowledge of pretrained model in different task or different domain
# to another domain or task. Task is defined by the input and the expected output (for example image classification
# and image detection are different tasks), while the domain (different data distributions but the same tasks),
# for example when the task is image classification images taken fom the web and images by the user camera.
# **source:** Hands-On Computer Vision with TensorFlow 2 by: Benjamin Planche, Eliot Andres
# As we know the first layers in the ConvNet learned local genaric features while higher layers extract more
# abstract concept, we can we can use this information and apply transfer learning.
# Here we are taking the convolutional base without the densly connected layer by setting `include_top` to False
# because the representations learned by the convolutional base are more genaric and reusable, which will be usefule
# regardless tho computer vision problem at hand, while the representations learned by the classifier are
# more specific to the classes which the model was trained.
# **source:** Deep Learning with Python by: Francois Chollet
# There are **two ways to use a pretrained models**:
# - Feature Extraction
# - Fine Tuning
# **source:** Deep Learning with Python by: Francois Chollet
# I will used the Fine Tuning method.
#
base_model = tf.keras.applications.ResNet50V2(
weights="imagenet", input_shape=(224, 224, 3), include_top=False
)
#
# **steps for Fine-Tuning a network:**
# Add your custom network on top of an already-trained base network
# Freeze the base netweork
# Train the part you added
# Un freeze some layers in the base network
# Jointly train both these layers and the part you added
# **Why we do the first 3 steps? why we just Un freeze some layers in the base network and directly Jointly train both these layers and the part you added?**
# freezing the base netweork weights means preventing them from being updating during training, if we do not do this
# the presentations that was previesly learned by the base netweorkwill modified during trainin.
# The densw layers on the top that we added are randomly inialized and very large weight updated will be propagated through the
# network destroying the presentations that was previesly learned.
# **source:** Deep Learning with Python by: Francois Chollet
TL_model = tf.keras.models.Sequential()
# step 1: Add your custom network on top of an already-trained base network
TL_model.add(base_model)
TL_model.add(tf.keras.layers.Flatten())
TL_model.add(tf.keras.layers.Dense(256, activation="relu"))
TL_model.add(tf.keras.layers.Dropout(0.3))
TL_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
# step 2: Freeze the base netweork
base_model.trainable = False
# step 3: Train the part you added
TL_model.compile(
optimizer=tf.keras.optimizers.Adam(lr_schedule),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
TL_model_history = TL_model.fit(
training_generator,
steps_per_epoch=163,
epochs=10,
validation_data=validation_generator,
validation_steps=78,
)
baseline_results = TL_model.evaluate(test_generator)
for name, value in zip(TL_model.metrics_names, baseline_results):
print(name, ": ", value)
print()
# **Fine Tuning**
# step 4: Un freeze some layers in the base network
base_model.trainable = True
set_trainable = False
for layer in base_model.layers:
if layer.name == "conv5_block3_preact_bn":
set_trainable = True
if set_trainable:
layer.trainable = True
else:
layer.trainable = False
# Here we use a **very low learning rate** to limit the magnitude of the modifications we make to the representations
# of the layers that we are fine tuning.
# **source:** Deep Learning with Python by: Francois Chollet
# step 5: Jointly train both these layers and the part you added
TL_model.compile(
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-5),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
TL_model_history = TL_model.fit(
training_generator,
steps_per_epoch=163,
epochs=20,
validation_data=validation_generator,
validation_steps=78,
callbacks=[early_stopting],
)
# # 6- evaluation
baseline_results = TL_model.evaluate(test_generator)
for name, value in zip(TL_model.metrics_names, baseline_results):
print(name, ": ", value)
print()
# **For the training data**
actual_positive = len(os.listdir(training_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(training_data_path / "NORMAL"))
TL_model_train_precision = TL_model_history.history["precision"][0]
TL_model_train_recall = TL_model_history.history["recall"][0]
TL_model_train_ROC_AUC = TL_model_history.history["auc"][0]
TL_model_train_prc = TL_model_history.history["prc"][0]
TL_model_train_F1_score = TL_model_history.history["F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
TL_model_history.history["fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(TL_model_train_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
TL_model_history.history["fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(TL_model_train_recall))
print("\n")
print("The F1-score: {}".format(TL_model_train_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
TL_model_train_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
TL_model_train_prc
)
)
print("\n")
# **For validation data**
actual_positive = len(os.listdir(testing_data_path / "PNEUMONIA"))
actual_negative = len(os.listdir(testing_data_path / "NORMAL"))
TL_model_val_precision = TL_model_history.history["val_precision"][0]
TL_model_val_recall = TL_model_history.history["val_recall"][0]
TL_model_val_ROC_AUC = TL_model_history.history["val_auc"][0]
TL_model_val_prc = TL_model_history.history["val_prc"][0]
TL_model_val_F1_score = TL_model_history.history["val_F1_score"][0]
print(
"The FalsePositives was {} out of {} that are truly negative that were calssified as positive.".format(
TL_model_history.history["val_fp"][0], actual_negative
)
)
print("so we have a precesion of {}".format(TL_model_val_precision))
print("\n")
print(
"The FalseNegatives was {} out of {} that are truly positive that were calssified as negative.".format(
TL_model_history.history["val_fn"][0], actual_positive
)
)
print("so we have a recall of {}".format(TL_model_val_recall))
print("\n")
print("The F1-score: {}".format(TL_model_val_F1_score))
print("\n")
print(
"The Area Under the Curve of a Receiver Operating Characteristic curve ROC-AUC: {}".format(
TL_model_val_ROC_AUC
)
)
print(
"And the Area Under the Curve of the Precision-Recall Curve prc: {}".format(
TL_model_val_prc
)
)
print("\n")
def plot_cm(labels, predictions, p=0.5):
cm = confusion_matrix(labels, predictions > p)
plt.figure(figsize=(5, 5))
sns.heatmap(cm, annot=True, fmt="d")
plt.title("Confusion matrix @{:.2f}".format(p))
plt.ylabel("Actual label")
plt.xlabel("Predicted label")
predictions = TL_model.predict(test_generator)
plot_cm(test_generator.labels, predictions, p=0.5)
predictions = TL_model.predict(validation_generator)
plot_cm(validation_generator.labels, predictions, p=0.5)
test_generator.class_indices
# positive: PNEUMONIA
# negative: NORMAL
# # 7- (Trying) Oversampling (Building Input Pipeline using tf.data)
# Resample the dataset by oversampling the minority class, you can balance the dataset manually
# by choosing the right number of random indices from the positive examples.
# If you're using tf.data the easiest way to produce balanced examples is to start with a positive and a negative dataset,
# and merge them.
# **Source:** Tensorflow Oversampling/
BUFFER_SIZE = 256
# reading the images from PNEUMONIA and NORMAL directories
# training_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\train")
pos_training_data_path = training_data_path / "PNEUMONIA"
neg_training_data_path = training_data_path / "NORMAL"
# reading the images from validation_data_path directory
# validatoin_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\val")
test_image_count = len(list(validatoin_data_path.glob(r"*\\*.jpeg")))
test_list_ds = tf.data.Dataset.list_files(
str(validatoin_data_path / "*/*"), shuffle=False
)
test_list_ds = test_list_ds.shuffle(test_image_count, reshuffle_each_iteration=False)
# reading the images from testing_data_path directory
# testing_data_path = pathlib.Path(r"H:\AI\DataSets\Chest X-Ray Images (Pneumonia)\chest_xray\chest_xray\test")
val_image_count = len(list(testing_data_path.glob(r"*\\*.jpeg")))
val_list_ds = tf.data.Dataset.list_files(str(testing_data_path / "*/*"), shuffle=False)
val_list_ds = val_list_ds.shuffle(val_image_count, reshuffle_each_iteration=False)
# reading the images from PNEUMONIA and NORMAL directories
def make_ds(path):
ds = tf.data.Dataset.list_files(str(path / "*.jpeg"), shuffle=False)
ds = ds.shuffle(BUFFER_SIZE).repeat()
return ds
pos_ds = make_ds(pos_training_data_path)
neg_ds = make_ds(neg_training_data_path)
# converts a file path to an (img, label) pair
def process_path(file_path):
class_indices = {"NORMAL": 0, "PNEUMONIA": 1}
img_height = 224
img_width = 224
parts = tf.strings.split(file_path, os.path.sep)
if parts[-2] == "NORMAL":
label = class_indices["NORMAL"]
else:
label = class_indices["PNEUMONIA"]
img = tf.io.read_file(file_path)
img = tf.io.decode_jpeg(img, channels=3)
img = tf.image.resize(img, [img_height, img_width])
return img, label
pos_ds = pos_ds.map(process_path, num_parallel_calls=tf.data.AUTOTUNE)
neg_ds = neg_ds.map(process_path, num_parallel_calls=tf.data.AUTOTUNE)
# Merge the two together using `experimental.sample_from_datasets`
resampled_ds = tf.data.experimental.sample_from_datasets(
[pos_ds, neg_ds], weights=[0.5, 0.5]
)
resampled_ds = resampled_ds.batch(BATCH_SIZE).prefetch(2)
# To use this dataset, you'll need the number of steps per epoch.
# The definition of "epoch" in this case is less clear.
# Say it's the number of batches required to see each negative example once:
# **Source:** Tensorflow Oversampling/
neg, pos = len(os.listdir(training_data_path / "NORMAL")), len(
os.listdir(training_data_path / "PNEUMONIA")
)
resampled_steps_per_epoch = np.ceil(2.0 * neg / BATCH_SIZE)
resampled_steps_per_epoch
# ---
# converts a file path to an (img, label) pair
def test_val_process_path(file_path):
parts = tf.strings.split(file_path, os.path.sep)
one_hot = parts[-2] == class_names
label = tf.argmax(one_hot)
img = tf.io.read_file(file_path)
img = tf.io.decode_jpeg(img, channels=3)
img = tf.image.resize(img, [img_height, img_width])
return img, label
class_names = np.array(
sorted(
[item.name for item in training_data_path.glob("*") if item.name != ".DS_Store"]
)
)
num_classes = len(
[item for item in training_data_path.glob("*") if item.name != ".DS_Store"]
)
print("The number of classes is: {}".format(num_classes))
val_ds = val_list_ds.map(test_val_process_path, num_parallel_calls=tf.data.AUTOTUNE)
test_ds = test_list_ds.map(test_val_process_path, num_parallel_calls=tf.data.AUTOTUNE)
# **Configure dataset for performance**
# To train a model with this dataset you will want the data:
# - To be well shuffled.
# - To be batched.
# - Batches to be available as soon as possible.
# These features can be added using the tf.data API. For more details:
# **Source:** Configure dataset for performance/
# `.cache()` keeps the images in memory after they're loaded off disk during the first epoch.
# This will ensure the dataset does not become a bottleneck while training your model.
# If your dataset is too large to fit into memory, you can also use this method to create a performant on-disk cache.
# `.prefetch()` overlaps data preprocessing and model execution while training.
#
# **Source:** Configure dataset for performance/
def configure_for_performance(ds):
ds = ds.cache()
ds = ds.shuffle(buffer_size=1024)
ds = ds.batch(64)
ds = ds.prefetch(buffer_size=tf.data.AUTOTUNE)
return ds
val_ds = configure_for_performance(val_ds)
test_ds = configure_for_performance(test_ds)
test_ds.take(1)
# **Train on the oversampled data**
resampled_model = tf.keras.models.Sequential()
resampled_model.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=(5, 5),
strides=(1, 1),
activation="relu",
input_shape=(224, 224, 3),
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=64, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=128, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(
tf.keras.layers.Conv2D(
filters=256, kernel_size=(3, 3), strides=(1, 1), activation="relu"
)
)
resampled_model.add(tf.keras.layers.MaxPool2D(pool_size=(2, 2)))
resampled_model.add(tf.keras.layers.Flatten())
resampled_model.add(
tf.keras.layers.Dense(256, activation="relu", kernel_initializer="he_normal")
)
resampled_model.add(tf.keras.layers.BatchNormalization())
resampled_model.add(tf.keras.layers.Dropout(0.4))
resampled_model.add(tf.keras.layers.Dense(1, activation="sigmoid"))
resampled_model.compile(
optimizer=tf.keras.optimizers.RMSprop(learning_rate=1e-3),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=METRICS,
)
resampled_history = resampled_model.fit(
resampled_ds,
epochs=50,
steps_per_epoch=resampled_steps_per_epoch,
validation_data=test_ds,
)
resampled_train_acc = resampled_history.history["accuracy"]
resampled_val_acc = resampled_history.history["val_accuracy"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training Accuracy")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation Accuracy")
plt.grid(True)
plt.legend()
resampled_train_acc = resampled_history.history["F1_score"]
resampled_val_acc = resampled_history.history["val_F1_score"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training F1_score")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation F1_score")
plt.grid(True)
plt.legend()
resampled_train_acc = resampled_history.history["prc"]
resampled_val_acc = resampled_history.history["val_prc"]
plt.plot(resampled_history.epoch, resampled_train_acc, label="Training prc")
plt.plot(resampled_history.epoch, resampled_val_acc, label="Validation prc")
plt.grid(True)
plt.legend()
| false | 0 | 11,606 | 1 | 12,083 | 11,606 |
||
69534330
|
# Still a **WIP**.
# Recently, I was lucky to get accepted into the [TRC](https://sites.research.google/trc/) program (short for TPU Research Cloud). I have also participated a bit in the HuggingFace TPU program (even though I wasn't much active due to personal things).
# In a nutshell, this is a free (more details later) TPU program. To apply, here is the link.
# The only requirement is that you follow "ethical" AI practices (as defined here) and share as much as possible with the team, write a blog post, a research paper, i.e. contribute something back.
# The requriements don't seem that hard to achieve.
# To contribute my own share, I am writing this notebook detailing my journey. I hope it is good enough. :D
# Alright, once you have access to the program, you cant start using TPUs.
# You might ask: but how, given I have 0 experience with TPUs?
# Don't panic, I will try to make the process as smooth as possible. The documentation is good enough but there are some rough edges and I have tried to take notes of the harder bits.
# Let's got!
# Before that, we will make a short detour to explain what **TPUs** are and why they might be useful to
# some of your use cases.
# # What are TPUs?
# **[TPU](https://en.wikipedia.org/wiki/Tensor_Processing_Unit)s**, short for **Tensor Processing Units**, are custom hardware developed by Google around 2015 (probably even before), annonced in the 2016's Google I/O event and made available to the public around 2018.
# Similar to GPU which are custom hardaware adapted to graphical processing and later on to deep learning and
# other data processing tasks, TPUs have been designed from the get go to work and scale with machine learning workloads. They are, what is called **ASIC**: application-specific integrated circuit.
# So far, there are mainly two versions (check the graph below) that you can use:
# - v2.8
# - v3.8
# There are slight variations of each major version, check it in this table:
# 
# Next question is: how to access these TPUs?
# # TPU VMs
# Before we start, notice that there are many other methods but I will only focus on the
# most convenient and scalable one I have found. Also, this method
# Indeed, TPUs are available using at least these methods:
# # TODO: Finish adapting
# Instead of setting your own TPU pod/instance, you can also take advantage of the Colab offered TPU and the Kaggle one.
# Here is how to do it in Colab: https://colab.research.google.com/notebooks/tpu.ipynb
# And here is how to do it in Kaggle: https://www.kaggle.com/docs/tpu
# - Kaggle of course
# - TPU pods
# - Colab
# - TPU VMs
# The last method is the one we will focus on. This is probably the easist why to get started and have your env setup with everything needed.
# In fact, since you have access to a VM, what you do will be persisted.
# Notice that this is still an early feature (as of July 2021) so be careful (don't use it in
# production) and help the dev by reporting bugs.
# # What is XLA?
# In order to use something different than Tensorflow which is equipped to understand low level TPU things,
# PyTorch needs some understanding of these things.
# For that, there is [XLA](https://www.tensorflow.org/xla?hl=en).
# Here is an extract from the [XLA Github](https://github.com/pytorch/xla) repo:
#
# > PyTorch/XLA is a Python package that uses the XLA deep learning compiler to connect the PyTorch deep learning framework and Cloud TPUs. You can try it right now, for free, on a single Cloud TPU with Google Colab, and use it in production and on Cloud TPU Pods with Google Cloud.
# Under the hood, it uses the TF XLA [compiler](https://www.tensorflow.org/xla).
# # Application
# Alright, enough with exposition and theory. Time for some application.
# For that, we will use the Kaggle TPU and PyTorch thanks to XLA.
# We will also use the CommonLit readability [dataset](https://www.kaggle.com/c/commonlitreadabilityprize).
# ## Dataset
# We start as usual with any PyTorch dataset.
# TODO: Finish adapting this dataset.
from torch.utils.data import Dataset
import torch
class DatasetRetriever(Dataset):
def __init__(self, data, tokenizer, max_len, is_test=False):
self.data = data
self.excerpts = self.data.excerpt.values.tolist()
self.tokenizer = tokenizer
self.is_test = is_test
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, item):
if not self.is_test:
excerpt, label = self.excerpts[item], self.targets[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
"label": torch.tensor(label, dtype=torch.double),
}
else:
excerpt = self.excerpts[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
}
# ## Model
from torch import nn
# TODO: Adapt this to Pytorch Lightning.
class CommonLitModel(nn.Module):
def __init__(
self, model_name, config, multisample_dropout=False, output_hidden_states=False
):
super(CommonLitModel, self).__init__()
self.config = config
self.roberta = RobertaModel.from_pretrained(
model_name, output_hidden_states=output_hidden_states
)
self.layer_norm = nn.LayerNorm(config.hidden_size)
if multisample_dropout:
self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)])
else:
self.dropouts = nn.ModuleList([nn.Dropout(0.3)])
self.regressor = nn.Linear(config.hidden_size, 1)
self._init_weights(self.layer_norm)
self._init_weights(self.regressor)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(
self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
sequence_output = outputs[1]
sequence_output = self.layer_norm(sequence_output)
# multi-sample dropout
for i, dropout in enumerate(self.dropouts):
if i == 0:
logits = self.regressor(dropout(sequence_output))
else:
logits += self.regressor(dropout(sequence_output))
logits /= len(self.dropouts)
# calculate loss
loss = None
if labels is not None:
loss_fn = torch.nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = torch.sqrt(loss_fn(logits, labels.view(-1)))
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
# TODO: Integrate with the TPU model.
# That's it for today, I hope you found something useful here (or in the different links).
# Stay tuned for the upcoming notebook: JAX meets TPUs.
# TODO: What about JAX? Maybe not.
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534330.ipynb
| null | null |
[{"Id": 69534330, "ScriptId": 17685538, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 172860, "CreationDate": "08/01/2021 08:08:17", "VersionNumber": 128.0, "Title": "PyTorch, XLA, and TPUs 101", "EvaluationDate": "08/01/2021", "IsChange": false, "TotalLines": 259.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 259.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Still a **WIP**.
# Recently, I was lucky to get accepted into the [TRC](https://sites.research.google/trc/) program (short for TPU Research Cloud). I have also participated a bit in the HuggingFace TPU program (even though I wasn't much active due to personal things).
# In a nutshell, this is a free (more details later) TPU program. To apply, here is the link.
# The only requirement is that you follow "ethical" AI practices (as defined here) and share as much as possible with the team, write a blog post, a research paper, i.e. contribute something back.
# The requriements don't seem that hard to achieve.
# To contribute my own share, I am writing this notebook detailing my journey. I hope it is good enough. :D
# Alright, once you have access to the program, you cant start using TPUs.
# You might ask: but how, given I have 0 experience with TPUs?
# Don't panic, I will try to make the process as smooth as possible. The documentation is good enough but there are some rough edges and I have tried to take notes of the harder bits.
# Let's got!
# Before that, we will make a short detour to explain what **TPUs** are and why they might be useful to
# some of your use cases.
# # What are TPUs?
# **[TPU](https://en.wikipedia.org/wiki/Tensor_Processing_Unit)s**, short for **Tensor Processing Units**, are custom hardware developed by Google around 2015 (probably even before), annonced in the 2016's Google I/O event and made available to the public around 2018.
# Similar to GPU which are custom hardaware adapted to graphical processing and later on to deep learning and
# other data processing tasks, TPUs have been designed from the get go to work and scale with machine learning workloads. They are, what is called **ASIC**: application-specific integrated circuit.
# So far, there are mainly two versions (check the graph below) that you can use:
# - v2.8
# - v3.8
# There are slight variations of each major version, check it in this table:
# 
# Next question is: how to access these TPUs?
# # TPU VMs
# Before we start, notice that there are many other methods but I will only focus on the
# most convenient and scalable one I have found. Also, this method
# Indeed, TPUs are available using at least these methods:
# # TODO: Finish adapting
# Instead of setting your own TPU pod/instance, you can also take advantage of the Colab offered TPU and the Kaggle one.
# Here is how to do it in Colab: https://colab.research.google.com/notebooks/tpu.ipynb
# And here is how to do it in Kaggle: https://www.kaggle.com/docs/tpu
# - Kaggle of course
# - TPU pods
# - Colab
# - TPU VMs
# The last method is the one we will focus on. This is probably the easist why to get started and have your env setup with everything needed.
# In fact, since you have access to a VM, what you do will be persisted.
# Notice that this is still an early feature (as of July 2021) so be careful (don't use it in
# production) and help the dev by reporting bugs.
# # What is XLA?
# In order to use something different than Tensorflow which is equipped to understand low level TPU things,
# PyTorch needs some understanding of these things.
# For that, there is [XLA](https://www.tensorflow.org/xla?hl=en).
# Here is an extract from the [XLA Github](https://github.com/pytorch/xla) repo:
#
# > PyTorch/XLA is a Python package that uses the XLA deep learning compiler to connect the PyTorch deep learning framework and Cloud TPUs. You can try it right now, for free, on a single Cloud TPU with Google Colab, and use it in production and on Cloud TPU Pods with Google Cloud.
# Under the hood, it uses the TF XLA [compiler](https://www.tensorflow.org/xla).
# # Application
# Alright, enough with exposition and theory. Time for some application.
# For that, we will use the Kaggle TPU and PyTorch thanks to XLA.
# We will also use the CommonLit readability [dataset](https://www.kaggle.com/c/commonlitreadabilityprize).
# ## Dataset
# We start as usual with any PyTorch dataset.
# TODO: Finish adapting this dataset.
from torch.utils.data import Dataset
import torch
class DatasetRetriever(Dataset):
def __init__(self, data, tokenizer, max_len, is_test=False):
self.data = data
self.excerpts = self.data.excerpt.values.tolist()
self.tokenizer = tokenizer
self.is_test = is_test
self.max_len = max_len
def __len__(self):
return len(self.data)
def __getitem__(self, item):
if not self.is_test:
excerpt, label = self.excerpts[item], self.targets[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
"label": torch.tensor(label, dtype=torch.double),
}
else:
excerpt = self.excerpts[item]
features = convert_examples_to_features(
excerpt, self.tokenizer, self.max_len, self.is_test
)
return {
"input_ids": torch.tensor(features["input_ids"], dtype=torch.long),
"token_type_ids": torch.tensor(
features["token_type_ids"], dtype=torch.long
),
"attention_mask": torch.tensor(
features["attention_mask"], dtype=torch.long
),
}
# ## Model
from torch import nn
# TODO: Adapt this to Pytorch Lightning.
class CommonLitModel(nn.Module):
def __init__(
self, model_name, config, multisample_dropout=False, output_hidden_states=False
):
super(CommonLitModel, self).__init__()
self.config = config
self.roberta = RobertaModel.from_pretrained(
model_name, output_hidden_states=output_hidden_states
)
self.layer_norm = nn.LayerNorm(config.hidden_size)
if multisample_dropout:
self.dropouts = nn.ModuleList([nn.Dropout(0.5) for _ in range(5)])
else:
self.dropouts = nn.ModuleList([nn.Dropout(0.3)])
self.regressor = nn.Linear(config.hidden_size, 1)
self._init_weights(self.layer_norm)
self._init_weights(self.regressor)
def _init_weights(self, module):
if isinstance(module, nn.Linear):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.Embedding):
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
if module.padding_idx is not None:
module.weight.data[module.padding_idx].zero_()
elif isinstance(module, nn.LayerNorm):
module.bias.data.zero_()
module.weight.data.fill_(1.0)
def forward(
self, input_ids=None, attention_mask=None, token_type_ids=None, labels=None
):
outputs = self.roberta(
input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
)
sequence_output = outputs[1]
sequence_output = self.layer_norm(sequence_output)
# multi-sample dropout
for i, dropout in enumerate(self.dropouts):
if i == 0:
logits = self.regressor(dropout(sequence_output))
else:
logits += self.regressor(dropout(sequence_output))
logits /= len(self.dropouts)
# calculate loss
loss = None
if labels is not None:
loss_fn = torch.nn.MSELoss()
logits = logits.view(-1).to(labels.dtype)
loss = torch.sqrt(loss_fn(logits, labels.view(-1)))
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
# TODO: Integrate with the TPU model.
# That's it for today, I hope you found something useful here (or in the different links).
# Stay tuned for the upcoming notebook: JAX meets TPUs.
# TODO: What about JAX? Maybe not.
| false | 0 | 2,223 | 0 | 2,223 | 2,223 |
||
69534707
|
<jupyter_start><jupyter_text>seti-tfrec-dataset-train-2
Kaggle dataset identifier: setitfrecdatasettrain2
<jupyter_script># Install and import package
# !pip install tensorflow_datasets
import os
import seaborn as sns
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import numpy as np
from tensorflow.keras.layers.experimental import preprocessing
from kaggle_datasets import KaggleDatasets
from kaggle_secrets import UserSecretsClient
from tensorflow.keras import layers
# Turn on tpu
# Detect TPU, return appropriate distribution strategy
strategy = tf.distribute.get_strategy()
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Running on TPU ", tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
# If you use private dataset, uncomment it
# user_secrets = UserSecretsClient()
# user_credential = user_secrets.get_gcloud_credential()
# user_secrets.set_tensorflow_credential(user_credential)
ds_name = [
"setitfrecdatasettest02",
"setitfrecdatasettest34",
"setitfrecdatasettrain0",
"setitfrecdatasettrain1",
"setitfrecdatasettrain2",
]
ds_path = list(map(lambda name: KaggleDatasets().get_gcs_path(name), ds_name))
train_filenames = tf.io.gfile.glob(
list(map(lambda path: path + "/train*.tfrecords", ds_path))
)
# val_filenames = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
test_filenames = tf.io.gfile.glob(
list(map(lambda path: path + "/test*.tfrecords", ds_path))
)
# List dir with real regex
# [x for x in os.listdir('.') if re.match('index_[0-9]*.csv', x)]
# Read train data
train_tfrec = tf.data.TFRecordDataset(train_filenames)
# Read val data
# val_tfrec = tf.data.TFRecordDataset(val_filenames)
# Read test dataset
test_tfrec = tf.data.TFRecordDataset(test_filenames)
# Parse an train example to get feature_description
for raw_record in train_tfrec.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
# print(example)
def split_dataset(dataset: tf.data.Dataset, validation_data_percent: int):
"""
Splits a dataset of type tf.data.Dataset into a training and validation dataset using given ratio. Fractions are
rounded up to two decimal places.
@param dataset: the input dataset to split.
@param validation_data_fraction: the fraction of the validation data as a float between 0 and 1.
@return: a tuple of two tf.data.Datasets as (training, validation)
"""
if not (0 <= validation_data_percent <= 100):
raise ValueError("validation data percent must be ∈ [0,100]")
dataset = dataset.enumerate()
train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)
validation_dataset = dataset.filter(
lambda f, data: f % 100 <= validation_data_percent
)
# remove enumeration
train_dataset = train_dataset.map(lambda f, data: data)
validation_dataset = validation_dataset.map(lambda f, data: data)
return train_dataset, validation_dataset
# parse tfrecord to get feature and label
feature_description = {
"image": tf.io.FixedLenFeature([], tf.string, default_value=""),
"image_id": tf.io.FixedLenFeature([], tf.string, default_value=""),
"target": tf.io.FixedLenFeature([], tf.int64, default_value=0),
}
def parse_labeled_data(example_proto):
# Parse the input `tf.train.Example` proto using the dictionary above.
parsed = tf.io.parse_single_example(example_proto, feature_description)
image = tf.io.decode_raw(parsed["image"], tf.float16)
image = tf.reshape(image, [6, 273, 256])
# image = tf.transpose(image, [1, 0, 2])
image = tf.reshape(image, (273 * 6, 256))
image = tf.expand_dims(image, axis=2) # shape(273*6, 256, 1)
return image, parsed["target"]
def parse_unlabeled_data(example_proto):
# Parse the input `tf.train.Example` proto using the dictionary above.
parsed = tf.io.parse_single_example(example_proto, feature_description)
image = tf.io.decode_raw(parsed["image"], tf.float16)
image = tf.reshape(image, [6, 273, 256])
# image = tf.transpose(image, [1, 0, 2])
image = tf.reshape(image, (273 * 6, 256))
image = tf.expand_dims(image, axis=2) # shape(273*6, 256, 1)
return image, parsed["image_id"]
dataset = train_tfrec.map(parse_labeled_data, num_parallel_calls=10)
train_dataset, val_dataset = split_dataset(dataset, 20)
train_dataset = train_dataset.shuffle(60000).batch(128)
train_dataset = train_dataset.prefetch(10)
val_dataset = val_dataset.shuffle(60000).batch(128).cache()
val_dataset = val_dataset.prefetch(10)
test_dataset = test_tfrec.map(parse_unlabeled_data).batch(32)
test_dataset = test_dataset.cache()
test_dataset = test_dataset.prefetch(10)
# Create model
with strategy.scope():
pretrained_model = tf.keras.applications.efficientnet.EfficientNetB2(
include_top=False, weights="imagenet"
)
# pretrained_model.trainable = False
model = tf.keras.Sequential(
[
layers.Conv2D(3, (1, 1), input_shape=(273 * 6, 256, 1)),
pretrained_model,
layers.GlobalAveragePooling2D(),
layers.Dense(768, activation="relu"),
layers.Dropout(0.2),
layers.Dense(768, activation="relu"),
layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=2e-5),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.AUC()],
)
model.summary()
# Train model
checkpoint_filepath = "best_checkpoint"
options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor="val_auc",
mode="max",
save_best_only=True,
options=options,
)
model.fit(
train_dataset,
epochs=30,
validation_data=val_dataset,
callbacks=[model_checkpoint_callback],
)
# save_locally = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')
# model.save('saved-model', options=save_locally)
# Load model, predict and write submission file
model.load_weights(checkpoint_filepath, options=options)
test_images_ds = test_dataset.map(lambda image, idnum: image)
predictions = model.predict(test_images_ds)
print(predictions)
print("Generating submission.csv file...")
test_ids_ds = test_dataset.map(lambda image, idnum: idnum).unbatch()
test_ids = (
next(iter(test_ids_ds.batch(np.size(predictions)))).numpy().astype("U")
) # all in one batch
data = {
"id": test_ids,
}
submission = pd.DataFrame(data)
submission = submission.assign(target=predictions)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/534/69534707.ipynb
|
setitfrecdatasettrain2
|
vungocbinh
|
[{"Id": 69534707, "ScriptId": 18610122, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6875167, "CreationDate": "08/01/2021 08:13:55", "VersionNumber": 21.0, "Title": "seti-tensorflow-tfrec", "EvaluationDate": "08/01/2021", "IsChange": true, "TotalLines": 209.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 207.0, "LinesInsertedFromFork": 95.0, "LinesDeletedFromFork": 52.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 114.0, "TotalVotes": 0}]
|
[{"Id": 92831167, "KernelVersionId": 69534707, "SourceDatasetVersionId": 2429644}, {"Id": 92831169, "KernelVersionId": 69534707, "SourceDatasetVersionId": 2429648}, {"Id": 92831170, "KernelVersionId": 69534707, "SourceDatasetVersionId": 2429650}, {"Id": 92831171, "KernelVersionId": 69534707, "SourceDatasetVersionId": 2429659}, {"Id": 92831168, "KernelVersionId": 69534707, "SourceDatasetVersionId": 2429646}]
|
[{"Id": 2429644, "DatasetId": 1470276, "DatasourceVersionId": 2471872, "CreatorUserId": 6875167, "LicenseName": "Unknown", "CreationDate": "07/16/2021 02:58:12", "VersionNumber": 1.0, "Title": "seti-tfrec-dataset-train-2", "Slug": "setitfrecdatasettrain2", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1470276, "CreatorUserId": 6875167, "OwnerUserId": 6875167.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2429644.0, "CurrentDatasourceVersionId": 2471872.0, "ForumId": 1489916, "Type": 2, "CreationDate": "07/16/2021 02:58:12", "LastActivityDate": "07/16/2021", "TotalViews": 638, "TotalDownloads": 1, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 6875167, "UserName": "vungocbinh", "DisplayName": "Vu Ngoc Binh", "RegisterDate": "03/06/2021", "PerformanceTier": 1}]
|
# Install and import package
# !pip install tensorflow_datasets
import os
import seaborn as sns
import pandas as pd
import tensorflow as tf
import tensorflow_addons as tfa
import tensorflow_datasets as tfds
import numpy as np
from tensorflow.keras.layers.experimental import preprocessing
from kaggle_datasets import KaggleDatasets
from kaggle_secrets import UserSecretsClient
from tensorflow.keras import layers
# Turn on tpu
# Detect TPU, return appropriate distribution strategy
strategy = tf.distribute.get_strategy()
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print("Running on TPU ", tpu.master())
except ValueError:
tpu = None
if tpu:
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
else:
strategy = tf.distribute.get_strategy()
print("REPLICAS: ", strategy.num_replicas_in_sync)
# If you use private dataset, uncomment it
# user_secrets = UserSecretsClient()
# user_credential = user_secrets.get_gcloud_credential()
# user_secrets.set_tensorflow_credential(user_credential)
ds_name = [
"setitfrecdatasettest02",
"setitfrecdatasettest34",
"setitfrecdatasettrain0",
"setitfrecdatasettrain1",
"setitfrecdatasettrain2",
]
ds_path = list(map(lambda name: KaggleDatasets().get_gcs_path(name), ds_name))
train_filenames = tf.io.gfile.glob(
list(map(lambda path: path + "/train*.tfrecords", ds_path))
)
# val_filenames = tf.io.gfile.glob(GCS_PATH + '/val/*.tfrec')
test_filenames = tf.io.gfile.glob(
list(map(lambda path: path + "/test*.tfrecords", ds_path))
)
# List dir with real regex
# [x for x in os.listdir('.') if re.match('index_[0-9]*.csv', x)]
# Read train data
train_tfrec = tf.data.TFRecordDataset(train_filenames)
# Read val data
# val_tfrec = tf.data.TFRecordDataset(val_filenames)
# Read test dataset
test_tfrec = tf.data.TFRecordDataset(test_filenames)
# Parse an train example to get feature_description
for raw_record in train_tfrec.take(1):
example = tf.train.Example()
example.ParseFromString(raw_record.numpy())
# print(example)
def split_dataset(dataset: tf.data.Dataset, validation_data_percent: int):
"""
Splits a dataset of type tf.data.Dataset into a training and validation dataset using given ratio. Fractions are
rounded up to two decimal places.
@param dataset: the input dataset to split.
@param validation_data_fraction: the fraction of the validation data as a float between 0 and 1.
@return: a tuple of two tf.data.Datasets as (training, validation)
"""
if not (0 <= validation_data_percent <= 100):
raise ValueError("validation data percent must be ∈ [0,100]")
dataset = dataset.enumerate()
train_dataset = dataset.filter(lambda f, data: f % 100 > validation_data_percent)
validation_dataset = dataset.filter(
lambda f, data: f % 100 <= validation_data_percent
)
# remove enumeration
train_dataset = train_dataset.map(lambda f, data: data)
validation_dataset = validation_dataset.map(lambda f, data: data)
return train_dataset, validation_dataset
# parse tfrecord to get feature and label
feature_description = {
"image": tf.io.FixedLenFeature([], tf.string, default_value=""),
"image_id": tf.io.FixedLenFeature([], tf.string, default_value=""),
"target": tf.io.FixedLenFeature([], tf.int64, default_value=0),
}
def parse_labeled_data(example_proto):
# Parse the input `tf.train.Example` proto using the dictionary above.
parsed = tf.io.parse_single_example(example_proto, feature_description)
image = tf.io.decode_raw(parsed["image"], tf.float16)
image = tf.reshape(image, [6, 273, 256])
# image = tf.transpose(image, [1, 0, 2])
image = tf.reshape(image, (273 * 6, 256))
image = tf.expand_dims(image, axis=2) # shape(273*6, 256, 1)
return image, parsed["target"]
def parse_unlabeled_data(example_proto):
# Parse the input `tf.train.Example` proto using the dictionary above.
parsed = tf.io.parse_single_example(example_proto, feature_description)
image = tf.io.decode_raw(parsed["image"], tf.float16)
image = tf.reshape(image, [6, 273, 256])
# image = tf.transpose(image, [1, 0, 2])
image = tf.reshape(image, (273 * 6, 256))
image = tf.expand_dims(image, axis=2) # shape(273*6, 256, 1)
return image, parsed["image_id"]
dataset = train_tfrec.map(parse_labeled_data, num_parallel_calls=10)
train_dataset, val_dataset = split_dataset(dataset, 20)
train_dataset = train_dataset.shuffle(60000).batch(128)
train_dataset = train_dataset.prefetch(10)
val_dataset = val_dataset.shuffle(60000).batch(128).cache()
val_dataset = val_dataset.prefetch(10)
test_dataset = test_tfrec.map(parse_unlabeled_data).batch(32)
test_dataset = test_dataset.cache()
test_dataset = test_dataset.prefetch(10)
# Create model
with strategy.scope():
pretrained_model = tf.keras.applications.efficientnet.EfficientNetB2(
include_top=False, weights="imagenet"
)
# pretrained_model.trainable = False
model = tf.keras.Sequential(
[
layers.Conv2D(3, (1, 1), input_shape=(273 * 6, 256, 1)),
pretrained_model,
layers.GlobalAveragePooling2D(),
layers.Dense(768, activation="relu"),
layers.Dropout(0.2),
layers.Dense(768, activation="relu"),
layers.Dense(1, activation="sigmoid"),
]
)
model.compile(
optimizer=tf.optimizers.Adam(learning_rate=2e-5),
loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.AUC()],
)
model.summary()
# Train model
checkpoint_filepath = "best_checkpoint"
options = tf.train.CheckpointOptions(experimental_io_device="/job:localhost")
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor="val_auc",
mode="max",
save_best_only=True,
options=options,
)
model.fit(
train_dataset,
epochs=30,
validation_data=val_dataset,
callbacks=[model_checkpoint_callback],
)
# save_locally = tf.saved_model.SaveOptions(experimental_io_device='/job:localhost')
# model.save('saved-model', options=save_locally)
# Load model, predict and write submission file
model.load_weights(checkpoint_filepath, options=options)
test_images_ds = test_dataset.map(lambda image, idnum: image)
predictions = model.predict(test_images_ds)
print(predictions)
print("Generating submission.csv file...")
test_ids_ds = test_dataset.map(lambda image, idnum: idnum).unbatch()
test_ids = (
next(iter(test_ids_ds.batch(np.size(predictions)))).numpy().astype("U")
) # all in one batch
data = {
"id": test_ids,
}
submission = pd.DataFrame(data)
submission = submission.assign(target=predictions)
submission.to_csv("submission.csv", index=False)
| false | 0 | 2,101 | 0 | 2,131 | 2,101 |
||
69207698
|
<jupyter_start><jupyter_text>Latest Covid-19 India Statewise Data
This dataset contains latest Covid-19 India state-wise data as on **July 25, 2021**. This dataset can be used to analyze covid condition in India. This dataset is great for **Exploratory Data Analysis**
**If you find this dataset useful, please consider upvotting** ❤️
Kaggle dataset identifier: latest-covid19-india-statewise-data
<jupyter_code>import pandas as pd
df = pd.read_csv('latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 36 entries, 0 to 35
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 State/UTs 36 non-null object
1 Total Cases 36 non-null int64
2 Active 36 non-null int64
3 Discharged 36 non-null int64
4 Deaths 36 non-null int64
5 Active Ratio 36 non-null float64
6 Discharge Ratio 36 non-null float64
7 Death Ratio 36 non-null float64
8 Population 36 non-null int64
dtypes: float64(3), int64(5), object(1)
memory usage: 2.7+ KB
<jupyter_text>Examples:
{
"State/UTs": "Andaman and Nicobar",
"Total Cases": 10766,
"Active": 0,
"Discharged": 10637,
"Deaths": 129,
"Active Ratio": 0,
"Discharge Ratio": 98.8,
"Death Ratio": 1.2,
"Population": 100896618
}
{
"State/UTs": "Andhra Pradesh",
"Total Cases": 2340676,
"Active": 0,
"Discharged": 2325943,
"Deaths": 14733,
"Active Ratio": 0,
"Discharge Ratio": 99.37,
"Death Ratio": 0.63,
"Population": 128500364
}
{
"State/UTs": "Arunachal Pradesh",
"Total Cases": 67049,
"Active": 0,
"Discharged": 66753,
"Deaths": 296,
"Active Ratio": 0,
"Discharge Ratio": 99.56,
"Death Ratio": 0.44,
"Population": 658019
}
{
"State/UTs": "Assam",
"Total Cases": 746159,
"Active": 5,
"Discharged": 738119,
"Deaths": 8035,
"Active Ratio": 0,
"Discharge Ratio": 98.92,
"Death Ratio": 1.08,
"Population": 290492
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # ****Reading the csv file as pandas data frame and showing the data frame
covid_data = pd.read_csv(
"../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv"
)
covid_data
# # Finding out total number of cases in India
Total_cases = sum(covid_data["Total Cases"])
Total_cases
filtered_case = covid_data[covid_data["Total Cases"] >= 10000000]
filtered_case
# # The number seems to be impractical and incorrect
# # Taking out the maximum case state in india, But certainly the case data is supposed to be incorrect
max_case = covid_data[covid_data["Total Cases"] == max(covid_data["Total Cases"])]
max_case
# # Taking out the minimum case state in india
min_case = covid_data[covid_data["Total Cases"] == min(covid_data["Total Cases"])]
min_case
# # So we saw because of the incorrect data in bihar the number of cases are whole total high which will make any further analysis incorrect
# lets replace it with correct value
covid_data["Total Cases"] = covid_data["Total Cases"].replace(72439075, 724471)
covid_data
Ttal_cases = sum(covid_data["Total Cases"])
Ttal_cases
# This seems nearly correct
# # lets find out the state most affected
new_max = covid_data[covid_data["Total Cases"] == max(covid_data["Total Cases"])]
new_max
max_death = covid_data[covid_data["Deaths"] == max(covid_data["Deaths"])]
max_death
# # Lets check the Death ratio
x = covid_data["State/UTs"]
y = covid_data["Death Ratio (%)"]
plt.bar(x, y)
plt.xticks(rotation=90)
plt.show()
# # Lets check the active ratio
x = covid_data["State/UTs"]
y = covid_data["Active Ratio (%)"]
plt.bar(x, y)
plt.xticks(rotation=90)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207698.ipynb
|
latest-covid19-india-statewise-data
|
anandhuh
|
[{"Id": 69207698, "ScriptId": 18866396, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7478497, "CreationDate": "07/28/2021 05:46:50", "VersionNumber": 1.0, "Title": "notebookaa1b563d65", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 87.0, "LinesInsertedFromPrevious": 87.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 92098013, "KernelVersionId": 69207698, "SourceDatasetVersionId": 2460211}]
|
[{"Id": 2460211, "DatasetId": 1390187, "DatasourceVersionId": 2502627, "CreatorUserId": 6096594, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "07/25/2021 05:49:57", "VersionNumber": 6.0, "Title": "Latest Covid-19 India Statewise Data", "Slug": "latest-covid19-india-statewise-data", "Subtitle": "Covid-19 India Statewise Data as on January 17, 2023", "Description": "This dataset contains latest Covid-19 India state-wise data as on **July 25, 2021**. This dataset can be used to analyze covid condition in India. This dataset is great for **Exploratory Data Analysis** \n\n**If you find this dataset useful, please consider upvotting** \u2764\ufe0f", "VersionNotes": "As on July 25,2021", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1390187, "CreatorUserId": 6096594, "OwnerUserId": 6096594.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4863173.0, "CurrentDatasourceVersionId": 4930054.0, "ForumId": 1409416, "Type": 2, "CreationDate": "06/05/2021 09:55:51", "LastActivityDate": "06/05/2021", "TotalViews": 119547, "TotalDownloads": 25461, "TotalVotes": 756, "TotalKernels": 125}]
|
[{"Id": 6096594, "UserName": "anandhuh", "DisplayName": "Anandhu H", "RegisterDate": "11/04/2020", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # ****Reading the csv file as pandas data frame and showing the data frame
covid_data = pd.read_csv(
"../input/latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv"
)
covid_data
# # Finding out total number of cases in India
Total_cases = sum(covid_data["Total Cases"])
Total_cases
filtered_case = covid_data[covid_data["Total Cases"] >= 10000000]
filtered_case
# # The number seems to be impractical and incorrect
# # Taking out the maximum case state in india, But certainly the case data is supposed to be incorrect
max_case = covid_data[covid_data["Total Cases"] == max(covid_data["Total Cases"])]
max_case
# # Taking out the minimum case state in india
min_case = covid_data[covid_data["Total Cases"] == min(covid_data["Total Cases"])]
min_case
# # So we saw because of the incorrect data in bihar the number of cases are whole total high which will make any further analysis incorrect
# lets replace it with correct value
covid_data["Total Cases"] = covid_data["Total Cases"].replace(72439075, 724471)
covid_data
Ttal_cases = sum(covid_data["Total Cases"])
Ttal_cases
# This seems nearly correct
# # lets find out the state most affected
new_max = covid_data[covid_data["Total Cases"] == max(covid_data["Total Cases"])]
new_max
max_death = covid_data[covid_data["Deaths"] == max(covid_data["Deaths"])]
max_death
# # Lets check the Death ratio
x = covid_data["State/UTs"]
y = covid_data["Death Ratio (%)"]
plt.bar(x, y)
plt.xticks(rotation=90)
plt.show()
# # Lets check the active ratio
x = covid_data["State/UTs"]
y = covid_data["Active Ratio (%)"]
plt.bar(x, y)
plt.xticks(rotation=90)
plt.show()
|
[{"latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv": {"column_names": "[\"State/UTs\", \"Total Cases\", \"Active\", \"Discharged\", \"Deaths\", \"Active Ratio\", \"Discharge Ratio\", \"Death Ratio\", \"Population\"]", "column_data_types": "{\"State/UTs\": \"object\", \"Total Cases\": \"int64\", \"Active\": \"int64\", \"Discharged\": \"int64\", \"Deaths\": \"int64\", \"Active Ratio\": \"float64\", \"Discharge Ratio\": \"float64\", \"Death Ratio\": \"float64\", \"Population\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 36 entries, 0 to 35\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 State/UTs 36 non-null object \n 1 Total Cases 36 non-null int64 \n 2 Active 36 non-null int64 \n 3 Discharged 36 non-null int64 \n 4 Deaths 36 non-null int64 \n 5 Active Ratio 36 non-null float64\n 6 Discharge Ratio 36 non-null float64\n 7 Death Ratio 36 non-null float64\n 8 Population 36 non-null int64 \ndtypes: float64(3), int64(5), object(1)\nmemory usage: 2.7+ KB\n", "summary": "{\"Total Cases\": {\"count\": 36.0, \"mean\": 1249974.5833333333, \"std\": 1846037.7549725461, \"min\": 10766.0, \"25%\": 106543.0, \"50%\": 614091.0, \"75%\": 1331951.0, \"max\": 8171048.0}, \"Active\": {\"count\": 36.0, \"mean\": 50.333333333333336, \"std\": 207.11804777538273, \"min\": 0.0, \"25%\": 0.0, \"50%\": 1.5, \"75%\": 9.0, \"max\": 1233.0}, \"Discharged\": {\"count\": 36.0, \"mean\": 1235145.7222222222, \"std\": 1820328.2106248958, \"min\": 10637.0, \"25%\": 105539.5, \"50%\": 607675.0, \"75%\": 1322329.0, \"max\": 8022276.0}, \"Deaths\": {\"count\": 36.0, \"mean\": 14778.527777777777, \"std\": 27221.14063579063, \"min\": 4.0, \"25%\": 1124.25, \"50%\": 6551.0, \"75%\": 14325.75, \"max\": 148558.0}, \"Active Ratio\": {\"count\": 36.0, \"mean\": 0.005000000000000001, \"std\": 0.026672618383439064, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 0.16}, \"Discharge Ratio\": {\"count\": 36.0, \"mean\": 98.89722222222221, \"std\": 0.5068536622346096, \"min\": 97.41, \"25%\": 98.69, \"50%\": 98.935, \"75%\": 99.16, \"max\": 99.97}, \"Death Ratio\": {\"count\": 36.0, \"mean\": 1.0977777777777777, \"std\": 0.4947906403498128, \"min\": 0.03, \"25%\": 0.84, \"50%\": 1.065, \"75%\": 1.31, \"max\": 2.44}, \"Population\": {\"count\": 36.0, \"mean\": 39718607.777777776, \"std\": 50509132.95972144, \"min\": 66001.0, \"25%\": 1695472.75, \"50%\": 24100881.5, \"75%\": 69799859.75, \"max\": 231502578.0}}", "examples": "{\"State\\/UTs\":{\"0\":\"Andaman and Nicobar\",\"1\":\"Andhra Pradesh\",\"2\":\"Arunachal Pradesh\",\"3\":\"Assam\"},\"Total Cases\":{\"0\":10766,\"1\":2340676,\"2\":67049,\"3\":746159},\"Active\":{\"0\":0,\"1\":0,\"2\":0,\"3\":5},\"Discharged\":{\"0\":10637,\"1\":2325943,\"2\":66753,\"3\":738119},\"Deaths\":{\"0\":129,\"1\":14733,\"2\":296,\"3\":8035},\"Active Ratio\":{\"0\":0.0,\"1\":0.0,\"2\":0.0,\"3\":0.0},\"Discharge Ratio\":{\"0\":98.8,\"1\":99.37,\"2\":99.56,\"3\":98.92},\"Death Ratio\":{\"0\":1.2,\"1\":0.63,\"2\":0.44,\"3\":1.08},\"Population\":{\"0\":100896618,\"1\":128500364,\"2\":658019,\"3\":290492}}"}}]
| true | 1 |
<start_data_description><data_path>latest-covid19-india-statewise-data/Latest Covid-19 India Status.csv:
<column_names>
['State/UTs', 'Total Cases', 'Active', 'Discharged', 'Deaths', 'Active Ratio', 'Discharge Ratio', 'Death Ratio', 'Population']
<column_types>
{'State/UTs': 'object', 'Total Cases': 'int64', 'Active': 'int64', 'Discharged': 'int64', 'Deaths': 'int64', 'Active Ratio': 'float64', 'Discharge Ratio': 'float64', 'Death Ratio': 'float64', 'Population': 'int64'}
<dataframe_Summary>
{'Total Cases': {'count': 36.0, 'mean': 1249974.5833333333, 'std': 1846037.7549725461, 'min': 10766.0, '25%': 106543.0, '50%': 614091.0, '75%': 1331951.0, 'max': 8171048.0}, 'Active': {'count': 36.0, 'mean': 50.333333333333336, 'std': 207.11804777538273, 'min': 0.0, '25%': 0.0, '50%': 1.5, '75%': 9.0, 'max': 1233.0}, 'Discharged': {'count': 36.0, 'mean': 1235145.7222222222, 'std': 1820328.2106248958, 'min': 10637.0, '25%': 105539.5, '50%': 607675.0, '75%': 1322329.0, 'max': 8022276.0}, 'Deaths': {'count': 36.0, 'mean': 14778.527777777777, 'std': 27221.14063579063, 'min': 4.0, '25%': 1124.25, '50%': 6551.0, '75%': 14325.75, 'max': 148558.0}, 'Active Ratio': {'count': 36.0, 'mean': 0.005000000000000001, 'std': 0.026672618383439064, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 0.16}, 'Discharge Ratio': {'count': 36.0, 'mean': 98.89722222222221, 'std': 0.5068536622346096, 'min': 97.41, '25%': 98.69, '50%': 98.935, '75%': 99.16, 'max': 99.97}, 'Death Ratio': {'count': 36.0, 'mean': 1.0977777777777777, 'std': 0.4947906403498128, 'min': 0.03, '25%': 0.84, '50%': 1.065, '75%': 1.31, 'max': 2.44}, 'Population': {'count': 36.0, 'mean': 39718607.777777776, 'std': 50509132.95972144, 'min': 66001.0, '25%': 1695472.75, '50%': 24100881.5, '75%': 69799859.75, 'max': 231502578.0}}
<dataframe_info>
RangeIndex: 36 entries, 0 to 35
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 State/UTs 36 non-null object
1 Total Cases 36 non-null int64
2 Active 36 non-null int64
3 Discharged 36 non-null int64
4 Deaths 36 non-null int64
5 Active Ratio 36 non-null float64
6 Discharge Ratio 36 non-null float64
7 Death Ratio 36 non-null float64
8 Population 36 non-null int64
dtypes: float64(3), int64(5), object(1)
memory usage: 2.7+ KB
<some_examples>
{'State/UTs': {'0': 'Andaman and Nicobar', '1': 'Andhra Pradesh', '2': 'Arunachal Pradesh', '3': 'Assam'}, 'Total Cases': {'0': 10766, '1': 2340676, '2': 67049, '3': 746159}, 'Active': {'0': 0, '1': 0, '2': 0, '3': 5}, 'Discharged': {'0': 10637, '1': 2325943, '2': 66753, '3': 738119}, 'Deaths': {'0': 129, '1': 14733, '2': 296, '3': 8035}, 'Active Ratio': {'0': 0.0, '1': 0.0, '2': 0.0, '3': 0.0}, 'Discharge Ratio': {'0': 98.8, '1': 99.37, '2': 99.56, '3': 98.92}, 'Death Ratio': {'0': 1.2, '1': 0.63, '2': 0.44, '3': 1.08}, 'Population': {'0': 100896618, '1': 128500364, '2': 658019, '3': 290492}}
<end_description>
| 748 | 1 | 1,588 | 748 |
69207193
|
from zipfile import ZipFile as zf
train_zip = zf("/kaggle/input/dogs-vs-cats/train.zip", "r")
train_zip.extractall()
train_zip.close()
test_zip = zf("/kaggle/input/dogs-vs-cats/test1.zip", "r")
test_zip.extractall()
test_zip.close()
import os
os.mkdir("./train1")
os.mkdir("./train1/cats")
os.mkdir("./train1/dogs")
import shutil
for file_name in os.listdir("./train"):
if file_name.split(".")[0] == "cat":
shutil.copy(
os.path.join("./train/", file_name),
os.path.join("./train1/cats", file_name),
)
elif file_name.split(".")[0] == "dog":
shutil.copy(
os.path.join("./train/", file_name),
os.path.join("./train1/dogs", file_name),
)
os.mkdir("./training_set")
os.mkdir("./training_set/cats")
os.mkdir("./training_set/dogs")
os.mkdir("./val_set")
os.mkdir("./val_set/cats")
os.mkdir("./val_set/dogs")
import random
traincats = os.listdir("./train1/cats")
random.shuffle(traincats)
traindogs = os.listdir("./train1/dogs")
random.shuffle(traindogs)
for file_name in traincats[:10000]:
shutil.copy(
os.path.join("./train1/cats", file_name),
os.path.join("./training_set/cats", file_name),
)
for file_name in traincats[10000:]:
shutil.copy(
os.path.join("./train1/cats", file_name),
os.path.join("./val_set/cats", file_name),
)
for file_name in traindogs[:10000]:
shutil.copy(
os.path.join("./train1/dogs", file_name),
os.path.join("./training_set/dogs", file_name),
)
for file_name in traindogs[10000:]:
shutil.copy(
os.path.join("./train1/dogs", file_name),
os.path.join("./val_set/dogs", file_name),
)
print(len(os.listdir("./training_set/cats/")))
print(len(os.listdir("./training_set/dogs/")))
print(len(os.listdir("./val_set/cats/")))
print(len(os.listdir("./val_set/dogs/")))
import sys
from matplotlib import pyplot
from keras.utils import to_categorical
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
# define cnn model
def define_model():
# load model
model = VGG16(include_top=False, input_shape=(224, 224, 3))
# mark loaded layers as not trainable
for layer in model.layers:
layer.trainable = False
# add new classifier layers
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(128, activation="relu", kernel_initializer="he_uniform")(flat1)
dropout = Dropout(0.5)(class1)
output = Dense(1, activation="sigmoid")(dropout)
# define new model
model = Model(inputs=model.inputs, outputs=output)
model.compile(
optimizer=Adam(learning_rate=0.001),
loss="binary_crossentropy",
metrics=["binary_accuracy"],
)
# compile model
return model
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(211)
pyplot.title("Cross Entropy Loss")
pyplot.plot(history.history["loss"], color="blue", label="train")
pyplot.plot(history.history["val_loss"], color="orange", label="test")
# plot accuracy
pyplot.subplot(212)
pyplot.title("Classification Accuracy")
pyplot.plot(history.history["binary_accuracy"], color="blue", label="train")
pyplot.plot(history.history["val_binary_accuracy"], color="orange", label="test")
# save plot to file
filename = sys.argv[0].split("/")[-1]
pyplot.savefig(filename + "_plot.png")
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
model = define_model()
# create data generator
datagen = ImageDataGenerator(featurewise_center=True)
# specify imagenet mean values for centering
datagen.mean = [123.68, 116.779, 103.939]
# prepare iterator
train_it = datagen.flow_from_directory(
"./training_set", class_mode="binary", batch_size=64, target_size=(224, 224)
)
test_it = datagen.flow_from_directory(
"./val_set/", class_mode="binary", batch_size=64, target_size=(224, 224)
)
# fit model
history = model.fit(
train_it,
steps_per_epoch=len(train_it),
validation_data=test_it,
validation_steps=len(test_it),
epochs=10,
verbose=1,
)
_, acc = model.evaluate(test_it, steps=len(test_it), verbose=0)
print("> %.3f" % (acc * 100.0))
print(history.history)
# learning curves
summarize_diagnostics(history)
model.save("final_model.h5")
run_test_harness()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207193.ipynb
| null | null |
[{"Id": 69207193, "ScriptId": 18880412, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7782660, "CreationDate": "07/28/2021 05:37:55", "VersionNumber": 3.0, "Title": "notebook904a3662aa", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 115.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 108.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from zipfile import ZipFile as zf
train_zip = zf("/kaggle/input/dogs-vs-cats/train.zip", "r")
train_zip.extractall()
train_zip.close()
test_zip = zf("/kaggle/input/dogs-vs-cats/test1.zip", "r")
test_zip.extractall()
test_zip.close()
import os
os.mkdir("./train1")
os.mkdir("./train1/cats")
os.mkdir("./train1/dogs")
import shutil
for file_name in os.listdir("./train"):
if file_name.split(".")[0] == "cat":
shutil.copy(
os.path.join("./train/", file_name),
os.path.join("./train1/cats", file_name),
)
elif file_name.split(".")[0] == "dog":
shutil.copy(
os.path.join("./train/", file_name),
os.path.join("./train1/dogs", file_name),
)
os.mkdir("./training_set")
os.mkdir("./training_set/cats")
os.mkdir("./training_set/dogs")
os.mkdir("./val_set")
os.mkdir("./val_set/cats")
os.mkdir("./val_set/dogs")
import random
traincats = os.listdir("./train1/cats")
random.shuffle(traincats)
traindogs = os.listdir("./train1/dogs")
random.shuffle(traindogs)
for file_name in traincats[:10000]:
shutil.copy(
os.path.join("./train1/cats", file_name),
os.path.join("./training_set/cats", file_name),
)
for file_name in traincats[10000:]:
shutil.copy(
os.path.join("./train1/cats", file_name),
os.path.join("./val_set/cats", file_name),
)
for file_name in traindogs[:10000]:
shutil.copy(
os.path.join("./train1/dogs", file_name),
os.path.join("./training_set/dogs", file_name),
)
for file_name in traindogs[10000:]:
shutil.copy(
os.path.join("./train1/dogs", file_name),
os.path.join("./val_set/dogs", file_name),
)
print(len(os.listdir("./training_set/cats/")))
print(len(os.listdir("./training_set/dogs/")))
print(len(os.listdir("./val_set/cats/")))
print(len(os.listdir("./val_set/dogs/")))
import sys
from matplotlib import pyplot
from keras.utils import to_categorical
from keras.applications.vgg16 import VGG16
from keras.models import Model
from keras.layers import Dense
from keras.layers import Flatten
from keras.layers import Dropout
from keras.optimizers import Adam
from keras.preprocessing.image import ImageDataGenerator
# define cnn model
def define_model():
# load model
model = VGG16(include_top=False, input_shape=(224, 224, 3))
# mark loaded layers as not trainable
for layer in model.layers:
layer.trainable = False
# add new classifier layers
flat1 = Flatten()(model.layers[-1].output)
class1 = Dense(128, activation="relu", kernel_initializer="he_uniform")(flat1)
dropout = Dropout(0.5)(class1)
output = Dense(1, activation="sigmoid")(dropout)
# define new model
model = Model(inputs=model.inputs, outputs=output)
model.compile(
optimizer=Adam(learning_rate=0.001),
loss="binary_crossentropy",
metrics=["binary_accuracy"],
)
# compile model
return model
# plot diagnostic learning curves
def summarize_diagnostics(history):
# plot loss
pyplot.subplot(211)
pyplot.title("Cross Entropy Loss")
pyplot.plot(history.history["loss"], color="blue", label="train")
pyplot.plot(history.history["val_loss"], color="orange", label="test")
# plot accuracy
pyplot.subplot(212)
pyplot.title("Classification Accuracy")
pyplot.plot(history.history["binary_accuracy"], color="blue", label="train")
pyplot.plot(history.history["val_binary_accuracy"], color="orange", label="test")
# save plot to file
filename = sys.argv[0].split("/")[-1]
pyplot.savefig(filename + "_plot.png")
pyplot.show()
# run the test harness for evaluating a model
def run_test_harness():
model = define_model()
# create data generator
datagen = ImageDataGenerator(featurewise_center=True)
# specify imagenet mean values for centering
datagen.mean = [123.68, 116.779, 103.939]
# prepare iterator
train_it = datagen.flow_from_directory(
"./training_set", class_mode="binary", batch_size=64, target_size=(224, 224)
)
test_it = datagen.flow_from_directory(
"./val_set/", class_mode="binary", batch_size=64, target_size=(224, 224)
)
# fit model
history = model.fit(
train_it,
steps_per_epoch=len(train_it),
validation_data=test_it,
validation_steps=len(test_it),
epochs=10,
verbose=1,
)
_, acc = model.evaluate(test_it, steps=len(test_it), verbose=0)
print("> %.3f" % (acc * 100.0))
print(history.history)
# learning curves
summarize_diagnostics(history)
model.save("final_model.h5")
run_test_harness()
| false | 0 | 1,467 | 0 | 1,467 | 1,467 |
||
69207978
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **Classification of the MNIST dataset**
# ### This notebook will demonstrate how to train a neural network to recognize images of handwritten digits using TensorFlow. While there are many [MNIST](http://yann.lecun.com/exdb/mnist/) computer vision tutorials that cover the basics, this guide will focus on developing a self contained model that can handle preprocessing.
# ### The contents of this notebook include:
# * Data exploration
# * Data preprocessing
# * Building a Convolutional Neural Network
# * Model training
# * Evaluation of results
# * Predicting outcomes with test data
# # Import libraries and verify the versions of Python and TensorFlow
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from platform import python_version
print("Tensorflow Version:", tf.__version__)
print("Python Version:", python_version())
# # Load the training .csv file and explore the data
path = "../input/digit-recognizer/train.csv"
df = pd.read_csv(path)
print(df.shape)
df.head()
# ### Isolate the label column from the pixel data.
y_labels = df.pop("label")
x_pixels = df
# ### Review the labels to confirm the number of unique values.
unique_labels = np.unique(y_labels)
NUM_LABELS = len(unique_labels)
print("Labels: ", unique_labels)
print("No. Labels: ", NUM_LABELS)
# ### The training data has 42,000 images. Let's examine one of them.
plt.figure()
plt.imshow(
np.array(x_pixels.iloc[0]).reshape(28, 28)
) # Reshape the array to 28x28 dimensions
plt.colorbar()
plt.show()
# ### The colorbar shows the pixel values are within the grayscale range of 0 to 255. These values will need to be scaled to a range of 0 to 1 which will be covered later in this guide.
# ### Let's visualize some more images, this time matched with their labels.
plt.figure()
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(np.array(x_pixels.iloc[i]).reshape(28, 28))
plt.title(y_labels[i])
plt.axis("off")
# ### With the data verified, it's time to create Datasets for training and validation.
x_train, x_val, y_train, y_val = train_test_split(
x_pixels, y_labels, test_size=0.1, random_state=9
)
# ### For this split 90% of the images will be used for training, and 10% for validation.
print(x_train.shape)
print(x_val.shape)
print(y_train.shape)
print(y_val.shape)
# # Create Datasets from the training and validation data
# ### The Dataset object is a Python iterable that can be configured for improved performance. Datasets are used to build efficient input pipelines in TensorFlow.
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
# ### The following code shows 4 different Dataset methods:
# * *Cache()*: Keeps the Dataset elements in memory to be used in later iterations.
# * *Shuffle()*: Randomly shuffles the elements of the Dataset so each batch can reflect the overall distribution. This reduces overfitting and variance when training.
# * *Batch()*: Packs the elements of the Dataset into batches. This determines the number of samples processed during an iteration.
# * *Prefetch()*: Reduces time by overlapping data preprocessing and model execution of a training step.
AUTOTUNE = tf.data.experimental.AUTOTUNE
BUFFER_SIZE = 1000 # Shuffle buffer
BATCH_SIZE = 128
def configure_dataset(dataset, shuffle=False, test=False):
# Configure the tf dataset for cache, shuffle, batch, and prefetch
if shuffle:
dataset = (
dataset.cache()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(AUTOTUNE)
)
elif test:
dataset = (
dataset.cache().batch(BATCH_SIZE, drop_remainder=False).prefetch(AUTOTUNE)
)
else:
dataset = (
dataset.cache().batch(BATCH_SIZE, drop_remainder=True).prefetch(AUTOTUNE)
)
return dataset
train_ds = configure_dataset(train_ds, shuffle=True)
val_ds = configure_dataset(val_ds)
# ### Observe the shape of the training and validation Datasets. This is the batch size (128) and number of pixel columns (784).
print(train_ds.element_spec)
print(val_ds.element_spec)
# # Build a sequential CNN with preprocessing layers
# ### The reshaping and rescaling tasks are included in the model. These preprocessing layers can then be saved and exported with the CNN. This is the preferred approach only if you're training on a GPU.
# ### A CNN requires image dimensions and a color channel to function. Here inputs are reshaped to include the grayscale color channel.
# ### Inputs need to be normalized to a range of 0 to 1. Divide the pixel values by 255 to achieve this scale. Insert this layer after reshaping.
# ### In this example two convolutional layers are used, each with max pooling and a dropout layer is inserted before the final classification dense layer.
# Include the decimal point when dividing to output floats
rescale_layer = layers.experimental.preprocessing.Rescaling(scale=1.0 / 255)
reshape_layer = layers.Reshape((28, 28, 1)) # Reshape to (height, width, color channel)
model = Sequential(
[
layers.InputLayer(input_shape=[28, 28]),
reshape_layer,
rescale_layer,
layers.Conv2D(64, kernel_size=3, activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(128, kernel_size=3, activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dropout(0.5), # Reduce overfitting
layers.Dense(NUM_LABELS, activation="softmax"),
]
)
model.summary()
# # Train the CNN model
# ### Sparse categorical crossentropy is used as the loss function with Adam as the optimizer.
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=Adam(lr=1e-3),
metrics=["accuracy"],
)
EPOCHS = 20
history = model.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)
# # Visualize and evaluate the training and validation results
# Assign the loss and accuracy metrics
train_loss = history.history["loss"]
val_loss = history.history["val_loss"]
train_acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
# Create a function to plot the metrics
def plot_history(train_history, val_history, label):
plt.plot(train_history, label=f"Training {label}")
plt.plot(val_history, label=f"Validation {label}")
plt.xlabel("Epochs")
plt.legend()
return plt.show()
# ### The training and validation plots should converge for optimal results.
plot_history(train_loss, val_loss, "Loss")
plot_history(train_acc, val_acc, "Accuracy")
# # Load the test .csv file
path_test = "../input/digit-recognizer/test.csv"
df_test = pd.read_csv(path_test)
print(df_test.shape)
df_test.head()
# # Create a Dataset from the test data
test_ds = tf.data.Dataset.from_tensor_slices((df_test))
test_ds = configure_dataset(
test_ds, test=True
) # Set test argument to True so that remainder samples are not dropped
test_ds.element_spec
# # Predict label outcomes from the test Dataset
y_pred = np.argmax(
model.predict(test_ds), axis=-1
) # Returns the highest probability label for each image
y_pred = pd.DataFrame(y_pred)
y_pred.columns = ["Label"]
print(y_pred.shape)
y_pred.head()
image_id = pd.DataFrame(y_pred.index + 1)
image_id.columns = ["ImageId"]
image_id.head()
submission = pd.concat([image_id, y_pred], axis=1)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207978.ipynb
| null | null |
[{"Id": 69207978, "ScriptId": 18672863, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4365934, "CreationDate": "07/28/2021 05:51:35", "VersionNumber": 21.0, "Title": "Recognize Digits with TensorFlow", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 222.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 216.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **Classification of the MNIST dataset**
# ### This notebook will demonstrate how to train a neural network to recognize images of handwritten digits using TensorFlow. While there are many [MNIST](http://yann.lecun.com/exdb/mnist/) computer vision tutorials that cover the basics, this guide will focus on developing a self contained model that can handle preprocessing.
# ### The contents of this notebook include:
# * Data exploration
# * Data preprocessing
# * Building a Convolutional Neural Network
# * Model training
# * Evaluation of results
# * Predicting outcomes with test data
# # Import libraries and verify the versions of Python and TensorFlow
import pandas as pd
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras.optimizers import Adam
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from platform import python_version
print("Tensorflow Version:", tf.__version__)
print("Python Version:", python_version())
# # Load the training .csv file and explore the data
path = "../input/digit-recognizer/train.csv"
df = pd.read_csv(path)
print(df.shape)
df.head()
# ### Isolate the label column from the pixel data.
y_labels = df.pop("label")
x_pixels = df
# ### Review the labels to confirm the number of unique values.
unique_labels = np.unique(y_labels)
NUM_LABELS = len(unique_labels)
print("Labels: ", unique_labels)
print("No. Labels: ", NUM_LABELS)
# ### The training data has 42,000 images. Let's examine one of them.
plt.figure()
plt.imshow(
np.array(x_pixels.iloc[0]).reshape(28, 28)
) # Reshape the array to 28x28 dimensions
plt.colorbar()
plt.show()
# ### The colorbar shows the pixel values are within the grayscale range of 0 to 255. These values will need to be scaled to a range of 0 to 1 which will be covered later in this guide.
# ### Let's visualize some more images, this time matched with their labels.
plt.figure()
for i in range(8):
ax = plt.subplot(2, 4, i + 1)
plt.imshow(np.array(x_pixels.iloc[i]).reshape(28, 28))
plt.title(y_labels[i])
plt.axis("off")
# ### With the data verified, it's time to create Datasets for training and validation.
x_train, x_val, y_train, y_val = train_test_split(
x_pixels, y_labels, test_size=0.1, random_state=9
)
# ### For this split 90% of the images will be used for training, and 10% for validation.
print(x_train.shape)
print(x_val.shape)
print(y_train.shape)
print(y_val.shape)
# # Create Datasets from the training and validation data
# ### The Dataset object is a Python iterable that can be configured for improved performance. Datasets are used to build efficient input pipelines in TensorFlow.
train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
val_ds = tf.data.Dataset.from_tensor_slices((x_val, y_val))
# ### The following code shows 4 different Dataset methods:
# * *Cache()*: Keeps the Dataset elements in memory to be used in later iterations.
# * *Shuffle()*: Randomly shuffles the elements of the Dataset so each batch can reflect the overall distribution. This reduces overfitting and variance when training.
# * *Batch()*: Packs the elements of the Dataset into batches. This determines the number of samples processed during an iteration.
# * *Prefetch()*: Reduces time by overlapping data preprocessing and model execution of a training step.
AUTOTUNE = tf.data.experimental.AUTOTUNE
BUFFER_SIZE = 1000 # Shuffle buffer
BATCH_SIZE = 128
def configure_dataset(dataset, shuffle=False, test=False):
# Configure the tf dataset for cache, shuffle, batch, and prefetch
if shuffle:
dataset = (
dataset.cache()
.shuffle(BUFFER_SIZE)
.batch(BATCH_SIZE, drop_remainder=True)
.prefetch(AUTOTUNE)
)
elif test:
dataset = (
dataset.cache().batch(BATCH_SIZE, drop_remainder=False).prefetch(AUTOTUNE)
)
else:
dataset = (
dataset.cache().batch(BATCH_SIZE, drop_remainder=True).prefetch(AUTOTUNE)
)
return dataset
train_ds = configure_dataset(train_ds, shuffle=True)
val_ds = configure_dataset(val_ds)
# ### Observe the shape of the training and validation Datasets. This is the batch size (128) and number of pixel columns (784).
print(train_ds.element_spec)
print(val_ds.element_spec)
# # Build a sequential CNN with preprocessing layers
# ### The reshaping and rescaling tasks are included in the model. These preprocessing layers can then be saved and exported with the CNN. This is the preferred approach only if you're training on a GPU.
# ### A CNN requires image dimensions and a color channel to function. Here inputs are reshaped to include the grayscale color channel.
# ### Inputs need to be normalized to a range of 0 to 1. Divide the pixel values by 255 to achieve this scale. Insert this layer after reshaping.
# ### In this example two convolutional layers are used, each with max pooling and a dropout layer is inserted before the final classification dense layer.
# Include the decimal point when dividing to output floats
rescale_layer = layers.experimental.preprocessing.Rescaling(scale=1.0 / 255)
reshape_layer = layers.Reshape((28, 28, 1)) # Reshape to (height, width, color channel)
model = Sequential(
[
layers.InputLayer(input_shape=[28, 28]),
reshape_layer,
rescale_layer,
layers.Conv2D(64, kernel_size=3, activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Conv2D(128, kernel_size=3, activation="relu"),
layers.MaxPooling2D(2, 2),
layers.Flatten(),
layers.Dropout(0.5), # Reduce overfitting
layers.Dense(NUM_LABELS, activation="softmax"),
]
)
model.summary()
# # Train the CNN model
# ### Sparse categorical crossentropy is used as the loss function with Adam as the optimizer.
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=Adam(lr=1e-3),
metrics=["accuracy"],
)
EPOCHS = 20
history = model.fit(train_ds, epochs=EPOCHS, validation_data=val_ds)
# # Visualize and evaluate the training and validation results
# Assign the loss and accuracy metrics
train_loss = history.history["loss"]
val_loss = history.history["val_loss"]
train_acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
# Create a function to plot the metrics
def plot_history(train_history, val_history, label):
plt.plot(train_history, label=f"Training {label}")
plt.plot(val_history, label=f"Validation {label}")
plt.xlabel("Epochs")
plt.legend()
return plt.show()
# ### The training and validation plots should converge for optimal results.
plot_history(train_loss, val_loss, "Loss")
plot_history(train_acc, val_acc, "Accuracy")
# # Load the test .csv file
path_test = "../input/digit-recognizer/test.csv"
df_test = pd.read_csv(path_test)
print(df_test.shape)
df_test.head()
# # Create a Dataset from the test data
test_ds = tf.data.Dataset.from_tensor_slices((df_test))
test_ds = configure_dataset(
test_ds, test=True
) # Set test argument to True so that remainder samples are not dropped
test_ds.element_spec
# # Predict label outcomes from the test Dataset
y_pred = np.argmax(
model.predict(test_ds), axis=-1
) # Returns the highest probability label for each image
y_pred = pd.DataFrame(y_pred)
y_pred.columns = ["Label"]
print(y_pred.shape)
y_pred.head()
image_id = pd.DataFrame(y_pred.index + 1)
image_id.columns = ["ImageId"]
image_id.head()
submission = pd.concat([image_id, y_pred], axis=1)
submission.to_csv("submission.csv", index=False)
| false | 0 | 2,359 | 0 | 2,359 | 2,359 |
||
69207628
|
# MSSubClass: Identifies the type of dwelling involved in the sale.
# 20 1-STORY 1946 & NEWER ALL STYLES
# 30 1-STORY 1945 & OLDER
# 40 1-STORY W/FINISHED ATTIC ALL AGES
# 45 1-1/2 STORY - UNFINISHED ALL AGES
# 50 1-1/2 STORY FINISHED ALL AGES
# 60 2-STORY 1946 & NEWER
# 70 2-STORY 1945 & OLDER
# 75 2-1/2 STORY ALL AGES
# 80 SPLIT OR MULTI-LEVEL
# 85 SPLIT FOYER
# 90 DUPLEX - ALL STYLES AND AGES
# 120 1-STORY PUD (Planned Unit Development) - 1946 & NEWER
# 150 1-1/2 STORY PUD - ALL AGES
# 160 2-STORY PUD - 1946 & NEWER
# 180 PUD - MULTILEVEL - INCL SPLIT LEV/FOYER
# 190 2 FAMILY CONVERSION - ALL STYLES AND AGES
# MSZoning object 一般的なエリアの種別
# A Agriculture
#
# C Commercial
#
# FV Floating Village Residential
#
# I Industrial
#
# RH Residential High Density
#
# RL Residential Low Density
#
# RP Residential Low Density Park
#
# RM Residential Medium Density
#
#
# Alley object 路地へのアクセス(舗装か砂利)
#
# Grvl Gravel
#
# Pave Paved
#
# NA No alley access
# LotShape object 全体的な敷地の形
#
# Reg Regular
#
# IR1 Slightly irregular
#
# IR2 Moderately Irregular
#
# IR3 Irregular
#
# LandContour object 敷地の平坦さ
#
# Lvl Near Flat/Level
#
# Bnk Banked - Quick and significant rise from street grade to building
#
# HLS Hillside - Significant slope from side to side
#
# Low Depression
# Utilities object ライフライン?
#
# AllPub All public Utilities (E,G,W,& S)
#
# NoSewr Electricity, Gas, and Water (Septic Tank)
#
# NoSeWa Electricity and Gas Only
#
# ELO Electricity only
# LotConfig object 建物の構成
#
# Inside Inside lot
#
# Corner Corner lot
#
# CulDSac Cul-de-sac
#
# FR2 Frontage on 2 sides of property
#
# FR3 Frontage on 3 sides of property
# LandSlope object 建物の傾斜
#
# Gtl Gentle slope
#
# Mod Moderate Slope
#
# Sev Severe Slope
# Neighborhood object 地域
# Condition1 object 近くの施設
# Condition2 object 近くの施設(2つ以上ある場合)
# BldgType object 建物のタイプ
#
# 1Fam Single-family Detached
#
# 2FmCon Two-family Conversion; originally built as one-family dwelling
#
# Duplx Duplex
#
# TwnhsE Townhouse End Unit
#
# TwnhsI Townhouse Inside Unit
# HouseStyle object 階建てと完成度合い
# Street object 道へのアクセス(舗装か砂利)
# Alley object 路地へのアクセス(舗装か砂利)
# RoofStyle object 屋根の形
# RoofMatl object 屋根の素材
# Exterior1st object 外装材
# Exterior2nd object 外装材(2つ以上ある場合)
# MasVnrType object 内装材
# ExterQual object 外装の質
# ExterCond object 現在の外装の状態
# Foundation object 基礎の種類
# BsmtQual object 地下室の高さ
# BsmtCond object 地下室の質
# BsmtExposure object どれだけ地上に出てるか
# BsmtFinType1 object 地下室の完成した領域の質
# BsmtFinType2 object 地下室の完成した領域の状態(もし2タイプあるなら)
# Heating object 暖房の種類
# HeatingQC object 暖房の質
# CentralAir object アコンが中央制御か否か
# Electrical object 電気のシステム
# KitchenQual object キッチンのクオリティー
# Functional object 機能性
# FireplaceQu object 暖炉の質
# GarageType object ガレージの場所
# GarageFinish object ガレージの内装の完成状態
# GarageQual object ガレージの質
# GarageCond object ガレージの状態
# PavedDrive object 車庫から道路までの道の舗装状態
# PoolQC object プールの質
# Fence object フェンスの材質
# MiscFeature object その他の特徴
# SaleType object 支払い方法
# SaleCondition object 売り方の種類
# Id int64 -
# MSSubClass int64 建物の等級
# LotFrontage float64 道から敷地までの距離
# LotArea int64 敷地面積
# OverallQual int64 建物の材料の質
# 10 Very Excellent
#
# 9 Excellent
#
# 8 Very Good
#
# 7 Good
#
# 6 Above Average
#
# 5 Average
#
# 4 Below Average
#
# 3 Fair
#
# 2 Poor
#
# 1 Very Poor
# OverallCond int64 家の質
# YearBuilt int64 建設年
# YearRemodAdd int64 建て替え年
# MasVnrArea float64 内装面積
# BsmtFinSF1 int64 完成した地下室の面積
# BsmtFinSF2 int64 完成した地下室の面積
# BsmtUnfSF int64 完成の地下室の面積
# TotalBsmtSF int64 地下室の延べ床面積
# 1stFlrSF int64 一階の面積
# 2ndFlrSF int64 二階の面積
# LowQualFinSF int64 低品質で仕上げた面積
# GrLivArea int64 住居エリアの延べ床面積
# BsmtFullBath int64 地下のバスルームの数
# BsmtHalfBath int64 地下のシャワールームの数
# FullBath int64 地上から上の階のバスルーム
# HalfBath int64 地上から上の階のシャワー
# BedroomAbvGr int64 地上から上のベットルームの数
# KitchenAbvGr int64 地上から上のキッチンの数
# TotRmsAbvGrd int64 地上階の部屋の数
# Fireplaces int64 暖炉の数
# GarageYrBlt float64 ガレージができてからの年数
# GarageCars int64 ガレージのサイズ(台数)
# GarageArea int64 ガレージの面積
# WoodDeckSF int64 ウッドデッキの面積
# OpenPorchSF int64 玄関の外面積(フロントポーチ)
# EnclosedPorch int64 玄関の面積(囲われたポーチ?)
# 3SsnPorch int64 3シーズン遊べちゃうくらいでかいポーチ?
# ScreenPorch int64 ガラス張りの囲われたポーチ
# PoolArea int64 プールの面積
# MiscVal int64 その他の特性の価格
# MoSold int64 月の売り上げ数
# YrSold int64 年間売り上げ
# SalePrice int64
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # モジュール読み込み
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns", 100)
pd.set_option("display.max_rows", 100)
# # データの読み込み
train_df = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/train.csv", sep=","
)
test_df = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/test.csv", sep=","
)
train_df.head(100)
# # データを眺めてみる
# pandasを活用
# df.info, df.columns, df.unique...etc
# ### 行数 = 81
# ### 列数 = 1460
print(train_df.info())
print("rows = ", len(train_df))
print("columns = ", len(train_df.columns))
print(train_df.shape)
print(test_df.info())
print("rows = ", len(test_df))
print("columns = ", len(test_df.columns))
print(test_df.shape)
# # わかること
# Nan値が含まれているものがある(1460 non-null以外のデータ)
# # データタイプの指定¶
# 少しでもデータ(メモリー)サイズを抑える努力をする
# 初期状態では全てのデータは8byte(float64)あるので、int8/uint8(1byte), int16/uint16/float16(2byte), int32/uint32/float32(4byte)に収めるようcastしていく
# # NAの処理
# NAはfloat扱いされてしまうので、int分についてはfillnaかdropnaしてastypeでcastして対処
# ## →一度データを見てみる
train_df.describe()
# # 方針 1
# ## (1)質的変数(数値以外)と量的変数に分ける(数値データ)
# ## (2)著しく数の少ないデータは使わない
# train_質的変数と量的変数に分別---------------------------------------------------------------------------------------------------
Qual_train_df = train_df[
[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
]
]
Qant_train_df = train_df.drop(
columns=[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LotShape",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
],
axis=1,
)
# ------------------------------------------------------------------------------------------------------------------------------
# test_質的変数と量的変数に分別----------------------------------------------------------------------------------------------------
Qual_test_df = test_df[
[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
]
]
Qant_test_df = test_df.drop(
columns=[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LotShape",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
],
axis=1,
)
# --------------------------------------------------------------------------------------------------------------------------------
# 数が著しく少ない変数の削除
Qual_train_df = Qual_train_df.drop(
columns=["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1
)
Qual_test_df = Qual_test_df.drop(
columns=["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1
)
# # NaN値を補完する
# # 方針2
# (1)質的変数は最頻値で埋める
# (2)量的変数は適宜、処理する
# →最頻値、平均値、中央値のどれで埋めるか。
# # (1)質的変数は最頻値で埋める
# ## →とりあえず変数の中身を見てみる
print(Qual_train_df["MSZoning"].value_counts()) # 欠損値あり
# print(Qual_train_df["Street"].value_counts())
# print(Qual_train_df["LotShape"].value_counts())
# print(Qual_train_df["LandContour"].value_counts())
print(Qual_train_df["Utilities"].value_counts()) # 欠損値あり
# print(Qual_train_df["LotConfig"].value_counts())
# print(Qual_train_df["LandSlope"].value_counts())
# print(Qual_train_df["Neighborhood"].value_counts())
# print(Qual_train_df["Condition1"].value_counts())
# print(Qual_train_df["Condition2"].value_counts())
# print(Qual_train_df["HouseStyle"].value_counts())
# print(Qual_train_df["RoofStyle"].value_counts())
# print(Qual_train_df["RoofMatl"].value_counts())
print(Qual_train_df["Exterior1st"].value_counts()) # 欠損値あり
print(Qual_train_df["Exterior2nd"].value_counts()) # 欠損値あり
print(Qual_train_df["MasVnrType"].value_counts()) # 欠損値あり
# print(Qual_train_df["ExterQual"].value_counts())
# print(Qual_train_df["ExterCond"].value_counts())
# print(Qual_train_df["Foundation"].value_counts())
print(Qual_train_df["BsmtQual"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtCond"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtExposure"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtFinType1"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtFinType2"].value_counts()) # 欠損値あり
# print(Qual_train_df["Heating"].value_counts())
# print(Qual_train_df["HeatingQC"].value_counts())
# print(Qual_train_df["CentralAir"].value_counts())
# print(Qual_train_df["Electrical"].value_counts())
print(Qual_train_df["KitchenQual"].value_counts()) # 欠損値あり
print(Qual_train_df["Functional"].value_counts()) # 欠損値あり
print(Qual_train_df["FireplaceQu"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageType"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageFinish"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageQual"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageCond"].value_counts()) # 欠損値あり
print(Qual_train_df["SaleType"].value_counts()) # 欠損値あり
# print(Qual_train_df["SaleCondition"].value_counts())
Qual_train_df["MSZoning"] = Qual_train_df["MSZoning"].fillna("RL")
Qual_test_df["MSZoning"] = Qual_test_df["MSZoning"].fillna("RL")
print("nullnum is ", Qual_train_df["MSZoning"].isnull().sum())
# 1459個が”ALLPub”なので変数的に意味ないかもしれない
Qual_train_df["Utilities"] = Qual_train_df["Utilities"].fillna("AllPub")
Qual_test_df["Utilities"] = Qual_test_df["Utilities"].fillna("AllPub")
print("nullnum is ", Qual_train_df["Utilities"].isnull().sum())
Qual_train_df["Exterior1st"] = Qual_train_df["Exterior1st"].fillna("VinylSd")
Qual_test_df["Exterior1st"] = Qual_test_df["Exterior1st"].fillna("VinylSd")
print("nullnum is ", Qual_train_df["Exterior1st"].isnull().sum())
Qual_train_df["Exterior2nd"] = Qual_train_df["Exterior2nd"].fillna("VinylSd")
Qual_test_df["Exterior2nd"] = Qual_test_df["Exterior2nd"].fillna("VinylSd")
print("nullnum is ", Qual_train_df["Exterior2nd"].isnull().sum())
Qual_train_df["MasVnrType"] = Qual_train_df["MasVnrType"].fillna("None")
Qual_test_df["MasVnrType"] = Qual_test_df["MasVnrType"].fillna("None")
print("nullnum is ", Qual_train_df["MasVnrType"].isnull().sum())
Qual_train_df["BsmtQual"] = Qual_train_df["BsmtQual"].fillna("TA")
Qual_test_df["BsmtQual"] = Qual_test_df["BsmtQual"].fillna("TA")
print("nullnum is ", Qual_train_df["BsmtQual"].isnull().sum())
Qual_train_df["BsmtCond"] = Qual_train_df["BsmtCond"].fillna("TA")
Qual_test_df["BsmtCond"] = Qual_test_df["BsmtCond"].fillna("TA")
print("nullnum is ", Qual_train_df["BsmtCond"].isnull().sum())
Qual_train_df["BsmtExposure"] = Qual_train_df["BsmtExposure"].fillna("No")
Qual_test_df["BsmtExposure"] = Qual_test_df["BsmtExposure"].fillna("No")
print("nullnum is ", Qual_train_df["BsmtExposure"].isnull().sum())
Qual_train_df["BsmtFinType1"] = Qual_train_df["BsmtFinType1"].fillna("Unf")
Qual_test_df["BsmtFinType1"] = Qual_test_df["BsmtFinType1"].fillna("Unf")
print("nullnum is ", Qual_train_df["BsmtFinType1"].isnull().sum())
Qual_train_df["BsmtFinType2"] = Qual_train_df["BsmtFinType2"].fillna("Unf")
Qual_test_df["BsmtFinType2"] = Qual_test_df["BsmtFinType2"].fillna("Unf")
print("nullnum is ", Qual_train_df["BsmtFinType2"].isnull().sum())
Qual_train_df["KitchenQual"] = Qual_train_df["KitchenQual"].fillna("TA")
Qual_test_df["KitchenQual"] = Qual_test_df["KitchenQual"].fillna("TA")
print("nullnum is ", Qual_train_df["KitchenQual"].isnull().sum())
Qual_train_df["Functional"] = Qual_train_df["Functional"].fillna("Typ")
Qual_test_df["Functional"] = Qual_test_df["Functional"].fillna("Typ")
print("nullnum is ", Qual_train_df["Functional"].isnull().sum())
Qual_train_df["FireplaceQu"] = Qual_train_df["FireplaceQu"].fillna("Gd")
Qual_test_df["FireplaceQu"] = Qual_test_df["FireplaceQu"].fillna("Gd")
print("nullnum is ", Qual_train_df["FireplaceQu"].isnull().sum())
Qual_train_df["GarageType"] = Qual_train_df["GarageType"].fillna("Attchd")
Qual_test_df["GarageType"] = Qual_test_df["GarageType"].fillna("Attchd")
print("nullnum is ", Qual_train_df["GarageType"].isnull().sum())
Qual_train_df["GarageFinish"] = Qual_train_df["GarageFinish"].fillna("Unf")
Qual_test_df["GarageFinish"] = Qual_test_df["GarageFinish"].fillna("Unf")
print("nullnum is ", Qual_train_df["GarageFinish"].isnull().sum())
Qual_train_df["GarageQual"] = Qual_train_df["GarageQual"].fillna("TA")
Qual_test_df["GarageQual"] = Qual_test_df["GarageQual"].fillna("TA")
print("nullnum is ", Qual_train_df["GarageQual"].isnull().sum())
Qual_train_df["GarageCond"] = Qual_train_df["GarageCond"].fillna("TA")
Qual_test_df["GarageCond"] = Qual_test_df["GarageCond"].fillna("TA")
print("nullnum is ", Qual_train_df["GarageCond"].isnull().sum())
Qual_train_df["SaleType"] = Qual_train_df["SaleType"].fillna("WD")
Qual_test_df["SaleType"] = Qual_test_df["SaleType"].fillna("WD")
print("nullnum is ", Qual_train_df["SaleType"].isnull().sum())
# # (2)量的変数は適宜、処理する →最頻値、平均値、中央値のどれで埋めるか。
print(Qant_train_df.isnull().sum())
print(Qant_test_df.isnull().sum())
# ### →"LotFrontage", "MasVnrArea", "BsmtFinSF1", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF",
# ### "BsmtFullBath", "BsmtHalfBath", "GarageYrBlt", "GarageCars", "GarageArea"を補完する
Qant_train_df.describe()
fig = plt.figure(figsize=(40, 10))
ax1 = fig.add_subplot(2, 12, 1)
ax1.set_xlabel("LotFrontage")
ax1.set_ylabel("SalePrice")
plt.scatter(Qant_train_df["LotFrontage"], Qant_train_df["SalePrice"])
ax2 = fig.add_subplot(2, 12, 2)
ax2.set_xlabel("MasVnrArea")
plt.scatter(Qant_train_df["MasVnrArea"], Qant_train_df["SalePrice"])
ax3 = fig.add_subplot(2, 12, 3)
ax3.set_xlabel("BsmtFinSF1")
plt.scatter(Qant_train_df["BsmtFinSF1"], Qant_train_df["SalePrice"])
ax4 = fig.add_subplot(2, 12, 4)
ax4.set_xlabel("BsmtFinSF2")
plt.scatter(Qant_train_df["BsmtFinSF2"], Qant_train_df["SalePrice"])
ax5 = fig.add_subplot(2, 12, 5)
ax5.set_xlabel("BsmtUnfSF")
plt.scatter(Qant_train_df["BsmtUnfSF"], Qant_train_df["SalePrice"])
ax6 = fig.add_subplot(2, 12, 6)
ax6.set_xlabel("TotalBsmtSF")
plt.scatter(Qant_train_df["TotalBsmtSF"], Qant_train_df["SalePrice"])
ax7 = fig.add_subplot(2, 12, 7)
ax7.set_xlabel("BsmtFullBath")
plt.scatter(Qant_train_df["BsmtFullBath"], Qant_train_df["SalePrice"])
ax8 = fig.add_subplot(2, 12, 8)
ax8.set_xlabel("BsmtHalfBath")
plt.scatter(Qant_train_df["BsmtHalfBath"], Qant_train_df["SalePrice"])
ax9 = fig.add_subplot(2, 12, 9)
ax9.set_xlabel("GarageYrBlt")
plt.scatter(Qant_train_df["GarageYrBlt"], Qant_train_df["SalePrice"])
ax10 = fig.add_subplot(2, 12, 10)
ax10.set_xlabel("GarageCars")
plt.scatter(Qant_train_df["GarageCars"], Qant_train_df["SalePrice"])
ax11 = fig.add_subplot(2, 12, 11)
ax11.set_xlabel("GarageArea")
plt.scatter(Qant_train_df["GarageArea"], Qant_train_df["SalePrice"])
# 平均値で補完
Qant_train_df["LotFrontage"] = Qant_train_df["LotFrontage"].fillna(
Qant_train_df["LotFrontage"].mean()
)
Qant_test_df["LotFrontage"] = Qant_test_df["LotFrontage"].fillna(
Qant_test_df["LotFrontage"].mean()
)
print("nullnum is ", Qant_train_df["LotFrontage"].isnull().sum())
# 中央値で補完
Qant_train_df["MasVnrArea"] = Qant_train_df["MasVnrArea"].fillna(
Qant_train_df["MasVnrArea"].median()
)
Qant_test_df["MasVnrArea"] = Qant_test_df["MasVnrArea"].fillna(
Qant_test_df["MasVnrArea"].median()
)
print("nullnum is ", Qant_train_df["MasVnrArea"].isnull().sum())
# 中央値で補完
Qant_train_df["BsmtFinSF1"] = Qant_train_df["BsmtFinSF1"].fillna(
Qant_train_df["BsmtFinSF1"].median()
)
Qant_test_df["BsmtFinSF1"] = Qant_test_df["BsmtFinSF1"].fillna(
Qant_test_df["BsmtFinSF1"].median()
)
print("nullnum is ", Qant_train_df["BsmtFinSF1"].isnull().sum())
# 中央値で補完
Qant_train_df["BsmtFinSF2"] = Qant_train_df["BsmtFinSF2"].fillna(
Qant_train_df["BsmtFinSF2"].median()
)
Qant_test_df["BsmtFinSF2"] = Qant_test_df["BsmtFinSF2"].fillna(
Qant_test_df["mmmm"].median()
)
print("nullnum is ", Qant_train_df["BsmtFinSF2"].isnull().sum())
# 平均値で補完
Qant_train_df["TotalBsmtSF"] = Qant_train_df["TotalBsmtSF"].fillna(
Qant_train_df["TotalBsmtSF"].mean()
)
Qant_test_df["TotalBsmtSF"] = Qant_test_df["TotalBsmtSF"].fillna(
Qant_test_df["TotalBsmtSF"].mean()
)
print("nullnum is ", Qant_train_df["TotalBsmtSF"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtUnfSF"] = Qant_train_df["BsmtUnfSF"].fillna(
Qant_train_df["BsmtUnfSF"].mean()
)
Qant_test_df["BsmtUnfSF"] = Qant_test_df["BsmtUnfSF"].fillna(
Qant_test_df["BsmtUnfSF"].mean()
)
print("nullnum is ", Qant_train_df["BsmtUnfSF"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtFullBath"] = Qant_train_df["BsmtFullBath"].fillna(
Qant_train_df["BsmtFullBath"].mean()
)
Qant_test_df["testBsmtFullBath"] = Qant_test_df["BsmtFullBath"].fillna(
Qant_test_df["BsmtFullBath"].mean()
)
print("nullnum is ", Qant_train_df["BsmtFullBath"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtHalfBath"] = Qant_train_df["BsmtHalfBath"].fillna(
Qant_train_df["BsmtHalfBath"].mean()
)
Qant_test_df["BsmtHalfBath"] = Qant_test_df["BsmtHalfBath"].fillna(
Qant_test_df["BsmtHalfBath"].mean()
)
print("nullnum is ", Qant_train_df["BsmtHalfBath"].isnull().sum())
# 平均値で補完
Qant_train_df["GarageYrBlt"] = Qant_train_df["GarageYrBlt"] - 1900
Qant_test_df["GarageYrBlt"] = Qant_test_df["GarageYrBlt"] - 1900
Qant_train_df["GarageYrBlt"] = Qant_train_df["GarageYrBlt"].fillna(
Qant_train_df["GarageYrBlt"].mean()
)
Qant_test_df["GarageYrBlt"] = Qant_test_df["GarageYrBlt"].fillna(
Qant_train_df["GarageYrBlt"].mean()
)
print("nullnum is ", Qant_train_df["GarageYrBlt"].isnull().sum())
# 中央値で補完
Qant_train_df["GarageArea"] = Qant_train_df["GarageArea"].fillna(
Qant_train_df["GarageArea"].median()
)
Qant_test_df["GarageArea"] = Qant_test_df["GarageArea"].fillna(
Qant_test_df["GarageArea"].median()
)
print("nullnum is ", Qant_train_df["GarageArea"].isnull().sum())
# 平均値で補完
Qant_train_df["GarageCars"] = Qant_train_df["GarageCars"].fillna(
Qant_train_df["LotFrontage"].mean()
)
Qant_test_df["GarageCars"] = Qant_test_df["GarageCars"].fillna(
Qant_test_df["LotFrontage"].mean()
)
print("nullnum is ", Qant_train_df["GarageCars"].isnull().sum())
Qant_train
# 相関を眺める
import seaborn as sns
# sns.scatterplot(x=Qant_train_df["LotFrontage"], y=Qant_train_df["SalePrice"])
sns.pairplot(
data=Qant_train_df,
vars=[
"LotFrontage",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
"GarageYrBlt",
"GarageCars",
"GarageArea",
"SalePrice",
],
)
lns.pairplot(Qant_train_df)
import seaborn as sns
Qual_train_df_pivot = pd.pivot_table(
data=Qual_train_df,
values="SalePrice",
columns="Neighborhood",
index="SaleCondition",
)
sns.heatmap(Qual_train_df_pivot)
pairplot1 = train_df_nonnan[
[
"MSSubClass",
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"BsmtFinSF1",
"TotalBsmtSF",
]
]
sns.pairplot(pairplot1)
train_df_nan.describe()
# # データ型の変換
# # NaN値の処置の方針
# Alley:"Grvl"
# MasVnrType:"BrkFace":→最頻値
# MasVnrArea:平均値
# BsmtQuale:TA
# BsmtCond:TA
# BsmtExposure:No→最頻値
# BsmtFinType1:Unf→最頻値
# BsmtFinType2:Unf→最頻値
# Electrical:FuseA
# FireplaceQu:TA
# GarageType:Attchd→最頻値
# GarageYrBlt-1900:中間値
# GarageFinish:Unf
# GarageQual:TA
# GarageCond:TA
# PoolQC:TA
# Fence:MnPrv→最頻値
# MiscFeature:Shed
#
print(train_df_nan["Alley"].value_counts())
print(train_df_nan["MasVnrType"].value_counts())
print(train_df_nan["BsmtQual"].value_counts())
print(train_df_nan["BsmtCond"].value_counts())
print(train_df_nan["BsmtExposure"].value_counts())
print(train_df_nan["BsmtFinType1"].value_counts())
print(train_df_nan["BsmtFinType2"].value_counts())
print(train_df_nan["Electrical"].value_counts())
print(train_df_nan["FireplaceQu"].value_counts())
print(train_df_nan["GarageType"].value_counts())
print(train_df_nan["GarageFinish"].value_counts())
print(train_df_nan["GarageCond"].value_counts())
print(train_df_nan["PoolQC"].value_counts())
print(train_df_nan["Fence"].value_counts())
print(train_df_nan["MiscFeature"].value_counts())
print(train_df_nan["LotFrontage"].value_counts())
train_df_nan["Alley"] = train_df_nan["Alley"].fillna("Grvl")
test_df_nan["Alley"] = test_df_nan["Alley"].fillna("Grvl")
print(train_df_nan["Alley"].isnull().sum())
train_df_nan["MasVnrType"] = train_df_nan["MasVnrType"].fillna("BrkFace")
test_df_nan["MasVnrType"] = test_df_nan["MasVnrType"].fillna("BrkFace")
print(train_df_nan["MasVnrType"].isnull().sum())
train_df_nan["MasVnrArea"] = train_df_nan["MasVnrArea"].fillna(
train_df_nan["MasVnrArea"].mean()
)
test_df_nan["MasVnrArea"] = test_df_nan["MasVnrArea"].fillna(
train_df_nan["MasVnrArea"].mean()
)
print(train_df_nan["MasVnrArea"].isnull().sum())
train_df_nan["BsmtQual"] = train_df_nan["BsmtQual"].fillna("TA")
test_df_nan["BsmtQual"] = test_df_nan["BsmtQual"].fillna("TA")
print(train_df_nan["BsmtQual"].isnull().sum())
train_df_nan["BsmtCond"] = train_df_nan["BsmtCond"].fillna("TA")
test_df_nan["BsmtCond"] = test_df_nan["BsmtCond"].fillna("TA")
print(train_df_nan["BsmtCond"].isnull().sum())
train_df_nan["BsmtExposure"] = train_df_nan["BsmtExposure"].fillna("TA")
test_df_nan["BsmtExposure"] = test_df_nan["BsmtExposure"].fillna("TA")
print(train_df_nan["BsmtExposure"].isnull().sum())
train_df_nan["BsmtFinType1"] = train_df_nan["BsmtFinType1"].fillna("Unf")
test_df_nan["BsmtExposure"] = test_df_nan["BsmtExposure"].fillna("TA")
print(train_df_nan["BsmtFinType1"].isnull().sum())
train_df_nan["BsmtFinType2"] = train_df_nan["BsmtFinType2"].fillna("Unf")
test_df_nan["BsmtFinType2"] = test_df_nan["BsmtFinType2"].fillna("Unf")
print(train_df_nan["BsmtFinType2"].isnull().sum())
train_df_nan["Electrical"] = train_df_nan["Electrical"].fillna("FuseA")
test_df_nan["Electrical"] = test_df_nan["Electrical"].fillna("FuseA")
print(train_df_nan["Electrical"].isnull().sum())
train_df_nan["FireplaceQu"] = train_df_nan["FireplaceQu"].fillna("TA")
test_df_nan["FireplaceQu"] = test_df_nan["FireplaceQu"].fillna("TA")
print(train_df_nan["FireplaceQu"].isnull().sum())
train_df_nan["GarageType"] = train_df_nan["GarageType"].fillna("Attchd")
test_df_nan["GarageType"] = test_df_nan["GarageType"].fillna("Attchd")
print(train_df_nan["GarageType"].isnull().sum())
train_df_nan["GarageFinish"] = train_df_nan["GarageFinish"].fillna("Attchd")
test_df_nan["GarageFinish"] = test_df_nan["GarageFinish"].fillna("Attchd")
print(train_df_nan["GarageFinish"].isnull().sum())
train_df_nan["GarageQual"] = train_df_nan["GarageQual"].fillna("TA")
test_df_nan["GarageQual"] = test_df_nan["GarageQual"].fillna("TA")
print(train_df_nan["GarageQual"].isnull().sum())
train_df_nan["GarageCond"] = train_df_nan["GarageCond"].fillna("TA")
test_df_nan["GarageCond"] = test_df_nan["GarageCond"].fillna("TA")
print(train_df_nan["GarageCond"].isnull().sum())
train_df_nan["PoolQC"] = train_df_nan["PoolQC"].fillna("TA")
test_df_nan["PoolQC"] = test_df_nan["PoolQC"].fillna("TA")
print(train_df_nan["PoolQC"].isnull().sum())
train_df_nan["Fence"] = train_df_nan["Fence"].fillna("MnPrv")
test_df_nan["Fence"] = test_df_nan["Fence"].fillna("MnPrv")
print(train_df_nan["Fence"].isnull().sum())
train_df_nan["MiscFeature"] = train_df_nan["MiscFeature"].fillna("Shed")
test_df_nan["MiscFeature"] = test_df_nan["MiscFeature"].fillna("Shed")
print(train_df_nan["MiscFeature"].isnull().sum())
train_df_nan["LotFrontage"] = train_df_nan["LotFrontage"].fillna(
train_df_nan["LotFrontage"].mean()
)
test_df_nan["LotFrontage"] = test_df_nan["LotFrontage"].fillna(
train_df_nan["LotFrontage"].mean()
)
print(train_df_nan["LotFrontage"].isnull().sum())
# train_df_nan["GarageYrBlt"].head()
train_df_nan["GarageYrBlt"] = train_df_nan["GarageYrBlt"] - 1900
train_df_nan["GarageYrBlt"] = train_df_nan["GarageYrBlt"].fillna(
train_df_nan["GarageYrBlt"].mean()
)
test_df_nan["GarageYrBlt"] = test_df_nan["GarageYrBlt"] - 1900
test_df_nan["GarageYrBlt"] = test_df_nan["GarageYrBlt"].fillna(
train_df_nan["GarageYrBlt"].mean()
)
print(train_df_nan["GarageYrBlt"])
train_df = pd.concat([train_df_nan, train_df_nonnan], axis=1)
test_df = pd.concat([test_df_nan, test_df_nonnan], axis=1)
print(train_df.isnull().sum())
train_df.head(50)
test_df.head(50)
train_df_dummy = deepcopy(train_df)
train_df_dummy = pd.get_dummies(
train_df_dummy,
columns=[
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"ExterQual",
"ExterCond",
"Foundation",
"Heating",
"HeatingQC",
"CentralAir",
"KitchenQual",
"Functional",
"PavedDrive",
"SaleType",
"SaleCondition",
],
)
test_df_dummy = deepcopy(test_df)
test_df_dummy = pd.get_dummies(
test_df_dummy,
columns=[
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"ExterQual",
"ExterCond",
"Foundation",
"Heating",
"HeatingQC",
"CentralAir",
"KitchenQual",
"Functional",
"PavedDrive",
"SaleType",
"SaleCondition",
],
)
train_df_dummy.head(300)
test_df_dummy.astype({"MasVnrArea": "float32"}, {"GarageYrblt"}).dtypes
# test_df_dummy.info(100)
x_train = train_df_dummy.drop("SalePrice", axis=1)
x_train = x_train.values
y_train = train_df_dummy["SalePrice"]
y_train = y_train.values
x_test = test_df_dummy.values
from sklearn.linear_model import Perceptron
perceptron = Perceptron()
perceptron.fit(x_train, y_train)
Y_pred = perceptron.predict(x_test)
acc_perceptron = round(perceptron.score(x_train, y_train) * 100, 2)
print(acc_perceptron)
from sklearn.linear_model import LinearRegression
# LinerRegression
logreg = LinearRegression()
logreg = logreg.fit(x_train, y_train)
acc_log = round(logreg.score(x_train, y_train) * 100, 2)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207628.ipynb
| null | null |
[{"Id": 69207628, "ScriptId": 18364156, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3604364, "CreationDate": "07/28/2021 05:45:38", "VersionNumber": 8.0, "Title": "Day1 House Price", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 854.0, "LinesInsertedFromPrevious": 59.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 795.0, "LinesInsertedFromFork": 387.0, "LinesDeletedFromFork": 27.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 467.0, "TotalVotes": 0}]
| null | null | null | null |
# MSSubClass: Identifies the type of dwelling involved in the sale.
# 20 1-STORY 1946 & NEWER ALL STYLES
# 30 1-STORY 1945 & OLDER
# 40 1-STORY W/FINISHED ATTIC ALL AGES
# 45 1-1/2 STORY - UNFINISHED ALL AGES
# 50 1-1/2 STORY FINISHED ALL AGES
# 60 2-STORY 1946 & NEWER
# 70 2-STORY 1945 & OLDER
# 75 2-1/2 STORY ALL AGES
# 80 SPLIT OR MULTI-LEVEL
# 85 SPLIT FOYER
# 90 DUPLEX - ALL STYLES AND AGES
# 120 1-STORY PUD (Planned Unit Development) - 1946 & NEWER
# 150 1-1/2 STORY PUD - ALL AGES
# 160 2-STORY PUD - 1946 & NEWER
# 180 PUD - MULTILEVEL - INCL SPLIT LEV/FOYER
# 190 2 FAMILY CONVERSION - ALL STYLES AND AGES
# MSZoning object 一般的なエリアの種別
# A Agriculture
#
# C Commercial
#
# FV Floating Village Residential
#
# I Industrial
#
# RH Residential High Density
#
# RL Residential Low Density
#
# RP Residential Low Density Park
#
# RM Residential Medium Density
#
#
# Alley object 路地へのアクセス(舗装か砂利)
#
# Grvl Gravel
#
# Pave Paved
#
# NA No alley access
# LotShape object 全体的な敷地の形
#
# Reg Regular
#
# IR1 Slightly irregular
#
# IR2 Moderately Irregular
#
# IR3 Irregular
#
# LandContour object 敷地の平坦さ
#
# Lvl Near Flat/Level
#
# Bnk Banked - Quick and significant rise from street grade to building
#
# HLS Hillside - Significant slope from side to side
#
# Low Depression
# Utilities object ライフライン?
#
# AllPub All public Utilities (E,G,W,& S)
#
# NoSewr Electricity, Gas, and Water (Septic Tank)
#
# NoSeWa Electricity and Gas Only
#
# ELO Electricity only
# LotConfig object 建物の構成
#
# Inside Inside lot
#
# Corner Corner lot
#
# CulDSac Cul-de-sac
#
# FR2 Frontage on 2 sides of property
#
# FR3 Frontage on 3 sides of property
# LandSlope object 建物の傾斜
#
# Gtl Gentle slope
#
# Mod Moderate Slope
#
# Sev Severe Slope
# Neighborhood object 地域
# Condition1 object 近くの施設
# Condition2 object 近くの施設(2つ以上ある場合)
# BldgType object 建物のタイプ
#
# 1Fam Single-family Detached
#
# 2FmCon Two-family Conversion; originally built as one-family dwelling
#
# Duplx Duplex
#
# TwnhsE Townhouse End Unit
#
# TwnhsI Townhouse Inside Unit
# HouseStyle object 階建てと完成度合い
# Street object 道へのアクセス(舗装か砂利)
# Alley object 路地へのアクセス(舗装か砂利)
# RoofStyle object 屋根の形
# RoofMatl object 屋根の素材
# Exterior1st object 外装材
# Exterior2nd object 外装材(2つ以上ある場合)
# MasVnrType object 内装材
# ExterQual object 外装の質
# ExterCond object 現在の外装の状態
# Foundation object 基礎の種類
# BsmtQual object 地下室の高さ
# BsmtCond object 地下室の質
# BsmtExposure object どれだけ地上に出てるか
# BsmtFinType1 object 地下室の完成した領域の質
# BsmtFinType2 object 地下室の完成した領域の状態(もし2タイプあるなら)
# Heating object 暖房の種類
# HeatingQC object 暖房の質
# CentralAir object アコンが中央制御か否か
# Electrical object 電気のシステム
# KitchenQual object キッチンのクオリティー
# Functional object 機能性
# FireplaceQu object 暖炉の質
# GarageType object ガレージの場所
# GarageFinish object ガレージの内装の完成状態
# GarageQual object ガレージの質
# GarageCond object ガレージの状態
# PavedDrive object 車庫から道路までの道の舗装状態
# PoolQC object プールの質
# Fence object フェンスの材質
# MiscFeature object その他の特徴
# SaleType object 支払い方法
# SaleCondition object 売り方の種類
# Id int64 -
# MSSubClass int64 建物の等級
# LotFrontage float64 道から敷地までの距離
# LotArea int64 敷地面積
# OverallQual int64 建物の材料の質
# 10 Very Excellent
#
# 9 Excellent
#
# 8 Very Good
#
# 7 Good
#
# 6 Above Average
#
# 5 Average
#
# 4 Below Average
#
# 3 Fair
#
# 2 Poor
#
# 1 Very Poor
# OverallCond int64 家の質
# YearBuilt int64 建設年
# YearRemodAdd int64 建て替え年
# MasVnrArea float64 内装面積
# BsmtFinSF1 int64 完成した地下室の面積
# BsmtFinSF2 int64 完成した地下室の面積
# BsmtUnfSF int64 完成の地下室の面積
# TotalBsmtSF int64 地下室の延べ床面積
# 1stFlrSF int64 一階の面積
# 2ndFlrSF int64 二階の面積
# LowQualFinSF int64 低品質で仕上げた面積
# GrLivArea int64 住居エリアの延べ床面積
# BsmtFullBath int64 地下のバスルームの数
# BsmtHalfBath int64 地下のシャワールームの数
# FullBath int64 地上から上の階のバスルーム
# HalfBath int64 地上から上の階のシャワー
# BedroomAbvGr int64 地上から上のベットルームの数
# KitchenAbvGr int64 地上から上のキッチンの数
# TotRmsAbvGrd int64 地上階の部屋の数
# Fireplaces int64 暖炉の数
# GarageYrBlt float64 ガレージができてからの年数
# GarageCars int64 ガレージのサイズ(台数)
# GarageArea int64 ガレージの面積
# WoodDeckSF int64 ウッドデッキの面積
# OpenPorchSF int64 玄関の外面積(フロントポーチ)
# EnclosedPorch int64 玄関の面積(囲われたポーチ?)
# 3SsnPorch int64 3シーズン遊べちゃうくらいでかいポーチ?
# ScreenPorch int64 ガラス張りの囲われたポーチ
# PoolArea int64 プールの面積
# MiscVal int64 その他の特性の価格
# MoSold int64 月の売り上げ数
# YrSold int64 年間売り上げ
# SalePrice int64
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # モジュール読み込み
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
pd.set_option("display.max_columns", 100)
pd.set_option("display.max_rows", 100)
# # データの読み込み
train_df = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/train.csv", sep=","
)
test_df = pd.read_csv(
"../input/house-prices-advanced-regression-techniques/test.csv", sep=","
)
train_df.head(100)
# # データを眺めてみる
# pandasを活用
# df.info, df.columns, df.unique...etc
# ### 行数 = 81
# ### 列数 = 1460
print(train_df.info())
print("rows = ", len(train_df))
print("columns = ", len(train_df.columns))
print(train_df.shape)
print(test_df.info())
print("rows = ", len(test_df))
print("columns = ", len(test_df.columns))
print(test_df.shape)
# # わかること
# Nan値が含まれているものがある(1460 non-null以外のデータ)
# # データタイプの指定¶
# 少しでもデータ(メモリー)サイズを抑える努力をする
# 初期状態では全てのデータは8byte(float64)あるので、int8/uint8(1byte), int16/uint16/float16(2byte), int32/uint32/float32(4byte)に収めるようcastしていく
# # NAの処理
# NAはfloat扱いされてしまうので、int分についてはfillnaかdropnaしてastypeでcastして対処
# ## →一度データを見てみる
train_df.describe()
# # 方針 1
# ## (1)質的変数(数値以外)と量的変数に分ける(数値データ)
# ## (2)著しく数の少ないデータは使わない
# train_質的変数と量的変数に分別---------------------------------------------------------------------------------------------------
Qual_train_df = train_df[
[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
]
]
Qant_train_df = train_df.drop(
columns=[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LotShape",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
],
axis=1,
)
# ------------------------------------------------------------------------------------------------------------------------------
# test_質的変数と量的変数に分別----------------------------------------------------------------------------------------------------
Qual_test_df = test_df[
[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
]
]
Qant_test_df = test_df.drop(
columns=[
"MSZoning",
"Street",
"Alley",
"LotShape",
"LotShape",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"MasVnrType",
"ExterQual",
"ExterCond",
"Foundation",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Heating",
"HeatingQC",
"CentralAir",
"Electrical",
"KitchenQual",
"Functional",
"FireplaceQu",
"GarageType",
"GarageQual",
"GarageFinish",
"GarageCond",
"PavedDrive",
"PoolQC",
"Fence",
"MiscFeature",
"SaleType",
"SaleCondition",
],
axis=1,
)
# --------------------------------------------------------------------------------------------------------------------------------
# 数が著しく少ない変数の削除
Qual_train_df = Qual_train_df.drop(
columns=["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1
)
Qual_test_df = Qual_test_df.drop(
columns=["Alley", "PoolQC", "Fence", "MiscFeature"], axis=1
)
# # NaN値を補完する
# # 方針2
# (1)質的変数は最頻値で埋める
# (2)量的変数は適宜、処理する
# →最頻値、平均値、中央値のどれで埋めるか。
# # (1)質的変数は最頻値で埋める
# ## →とりあえず変数の中身を見てみる
print(Qual_train_df["MSZoning"].value_counts()) # 欠損値あり
# print(Qual_train_df["Street"].value_counts())
# print(Qual_train_df["LotShape"].value_counts())
# print(Qual_train_df["LandContour"].value_counts())
print(Qual_train_df["Utilities"].value_counts()) # 欠損値あり
# print(Qual_train_df["LotConfig"].value_counts())
# print(Qual_train_df["LandSlope"].value_counts())
# print(Qual_train_df["Neighborhood"].value_counts())
# print(Qual_train_df["Condition1"].value_counts())
# print(Qual_train_df["Condition2"].value_counts())
# print(Qual_train_df["HouseStyle"].value_counts())
# print(Qual_train_df["RoofStyle"].value_counts())
# print(Qual_train_df["RoofMatl"].value_counts())
print(Qual_train_df["Exterior1st"].value_counts()) # 欠損値あり
print(Qual_train_df["Exterior2nd"].value_counts()) # 欠損値あり
print(Qual_train_df["MasVnrType"].value_counts()) # 欠損値あり
# print(Qual_train_df["ExterQual"].value_counts())
# print(Qual_train_df["ExterCond"].value_counts())
# print(Qual_train_df["Foundation"].value_counts())
print(Qual_train_df["BsmtQual"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtCond"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtExposure"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtFinType1"].value_counts()) # 欠損値あり
print(Qual_train_df["BsmtFinType2"].value_counts()) # 欠損値あり
# print(Qual_train_df["Heating"].value_counts())
# print(Qual_train_df["HeatingQC"].value_counts())
# print(Qual_train_df["CentralAir"].value_counts())
# print(Qual_train_df["Electrical"].value_counts())
print(Qual_train_df["KitchenQual"].value_counts()) # 欠損値あり
print(Qual_train_df["Functional"].value_counts()) # 欠損値あり
print(Qual_train_df["FireplaceQu"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageType"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageFinish"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageQual"].value_counts()) # 欠損値あり
print(Qual_train_df["GarageCond"].value_counts()) # 欠損値あり
print(Qual_train_df["SaleType"].value_counts()) # 欠損値あり
# print(Qual_train_df["SaleCondition"].value_counts())
Qual_train_df["MSZoning"] = Qual_train_df["MSZoning"].fillna("RL")
Qual_test_df["MSZoning"] = Qual_test_df["MSZoning"].fillna("RL")
print("nullnum is ", Qual_train_df["MSZoning"].isnull().sum())
# 1459個が”ALLPub”なので変数的に意味ないかもしれない
Qual_train_df["Utilities"] = Qual_train_df["Utilities"].fillna("AllPub")
Qual_test_df["Utilities"] = Qual_test_df["Utilities"].fillna("AllPub")
print("nullnum is ", Qual_train_df["Utilities"].isnull().sum())
Qual_train_df["Exterior1st"] = Qual_train_df["Exterior1st"].fillna("VinylSd")
Qual_test_df["Exterior1st"] = Qual_test_df["Exterior1st"].fillna("VinylSd")
print("nullnum is ", Qual_train_df["Exterior1st"].isnull().sum())
Qual_train_df["Exterior2nd"] = Qual_train_df["Exterior2nd"].fillna("VinylSd")
Qual_test_df["Exterior2nd"] = Qual_test_df["Exterior2nd"].fillna("VinylSd")
print("nullnum is ", Qual_train_df["Exterior2nd"].isnull().sum())
Qual_train_df["MasVnrType"] = Qual_train_df["MasVnrType"].fillna("None")
Qual_test_df["MasVnrType"] = Qual_test_df["MasVnrType"].fillna("None")
print("nullnum is ", Qual_train_df["MasVnrType"].isnull().sum())
Qual_train_df["BsmtQual"] = Qual_train_df["BsmtQual"].fillna("TA")
Qual_test_df["BsmtQual"] = Qual_test_df["BsmtQual"].fillna("TA")
print("nullnum is ", Qual_train_df["BsmtQual"].isnull().sum())
Qual_train_df["BsmtCond"] = Qual_train_df["BsmtCond"].fillna("TA")
Qual_test_df["BsmtCond"] = Qual_test_df["BsmtCond"].fillna("TA")
print("nullnum is ", Qual_train_df["BsmtCond"].isnull().sum())
Qual_train_df["BsmtExposure"] = Qual_train_df["BsmtExposure"].fillna("No")
Qual_test_df["BsmtExposure"] = Qual_test_df["BsmtExposure"].fillna("No")
print("nullnum is ", Qual_train_df["BsmtExposure"].isnull().sum())
Qual_train_df["BsmtFinType1"] = Qual_train_df["BsmtFinType1"].fillna("Unf")
Qual_test_df["BsmtFinType1"] = Qual_test_df["BsmtFinType1"].fillna("Unf")
print("nullnum is ", Qual_train_df["BsmtFinType1"].isnull().sum())
Qual_train_df["BsmtFinType2"] = Qual_train_df["BsmtFinType2"].fillna("Unf")
Qual_test_df["BsmtFinType2"] = Qual_test_df["BsmtFinType2"].fillna("Unf")
print("nullnum is ", Qual_train_df["BsmtFinType2"].isnull().sum())
Qual_train_df["KitchenQual"] = Qual_train_df["KitchenQual"].fillna("TA")
Qual_test_df["KitchenQual"] = Qual_test_df["KitchenQual"].fillna("TA")
print("nullnum is ", Qual_train_df["KitchenQual"].isnull().sum())
Qual_train_df["Functional"] = Qual_train_df["Functional"].fillna("Typ")
Qual_test_df["Functional"] = Qual_test_df["Functional"].fillna("Typ")
print("nullnum is ", Qual_train_df["Functional"].isnull().sum())
Qual_train_df["FireplaceQu"] = Qual_train_df["FireplaceQu"].fillna("Gd")
Qual_test_df["FireplaceQu"] = Qual_test_df["FireplaceQu"].fillna("Gd")
print("nullnum is ", Qual_train_df["FireplaceQu"].isnull().sum())
Qual_train_df["GarageType"] = Qual_train_df["GarageType"].fillna("Attchd")
Qual_test_df["GarageType"] = Qual_test_df["GarageType"].fillna("Attchd")
print("nullnum is ", Qual_train_df["GarageType"].isnull().sum())
Qual_train_df["GarageFinish"] = Qual_train_df["GarageFinish"].fillna("Unf")
Qual_test_df["GarageFinish"] = Qual_test_df["GarageFinish"].fillna("Unf")
print("nullnum is ", Qual_train_df["GarageFinish"].isnull().sum())
Qual_train_df["GarageQual"] = Qual_train_df["GarageQual"].fillna("TA")
Qual_test_df["GarageQual"] = Qual_test_df["GarageQual"].fillna("TA")
print("nullnum is ", Qual_train_df["GarageQual"].isnull().sum())
Qual_train_df["GarageCond"] = Qual_train_df["GarageCond"].fillna("TA")
Qual_test_df["GarageCond"] = Qual_test_df["GarageCond"].fillna("TA")
print("nullnum is ", Qual_train_df["GarageCond"].isnull().sum())
Qual_train_df["SaleType"] = Qual_train_df["SaleType"].fillna("WD")
Qual_test_df["SaleType"] = Qual_test_df["SaleType"].fillna("WD")
print("nullnum is ", Qual_train_df["SaleType"].isnull().sum())
# # (2)量的変数は適宜、処理する →最頻値、平均値、中央値のどれで埋めるか。
print(Qant_train_df.isnull().sum())
print(Qant_test_df.isnull().sum())
# ### →"LotFrontage", "MasVnrArea", "BsmtFinSF1", "BsmtFinSF2", "BsmtUnfSF", "TotalBsmtSF",
# ### "BsmtFullBath", "BsmtHalfBath", "GarageYrBlt", "GarageCars", "GarageArea"を補完する
Qant_train_df.describe()
fig = plt.figure(figsize=(40, 10))
ax1 = fig.add_subplot(2, 12, 1)
ax1.set_xlabel("LotFrontage")
ax1.set_ylabel("SalePrice")
plt.scatter(Qant_train_df["LotFrontage"], Qant_train_df["SalePrice"])
ax2 = fig.add_subplot(2, 12, 2)
ax2.set_xlabel("MasVnrArea")
plt.scatter(Qant_train_df["MasVnrArea"], Qant_train_df["SalePrice"])
ax3 = fig.add_subplot(2, 12, 3)
ax3.set_xlabel("BsmtFinSF1")
plt.scatter(Qant_train_df["BsmtFinSF1"], Qant_train_df["SalePrice"])
ax4 = fig.add_subplot(2, 12, 4)
ax4.set_xlabel("BsmtFinSF2")
plt.scatter(Qant_train_df["BsmtFinSF2"], Qant_train_df["SalePrice"])
ax5 = fig.add_subplot(2, 12, 5)
ax5.set_xlabel("BsmtUnfSF")
plt.scatter(Qant_train_df["BsmtUnfSF"], Qant_train_df["SalePrice"])
ax6 = fig.add_subplot(2, 12, 6)
ax6.set_xlabel("TotalBsmtSF")
plt.scatter(Qant_train_df["TotalBsmtSF"], Qant_train_df["SalePrice"])
ax7 = fig.add_subplot(2, 12, 7)
ax7.set_xlabel("BsmtFullBath")
plt.scatter(Qant_train_df["BsmtFullBath"], Qant_train_df["SalePrice"])
ax8 = fig.add_subplot(2, 12, 8)
ax8.set_xlabel("BsmtHalfBath")
plt.scatter(Qant_train_df["BsmtHalfBath"], Qant_train_df["SalePrice"])
ax9 = fig.add_subplot(2, 12, 9)
ax9.set_xlabel("GarageYrBlt")
plt.scatter(Qant_train_df["GarageYrBlt"], Qant_train_df["SalePrice"])
ax10 = fig.add_subplot(2, 12, 10)
ax10.set_xlabel("GarageCars")
plt.scatter(Qant_train_df["GarageCars"], Qant_train_df["SalePrice"])
ax11 = fig.add_subplot(2, 12, 11)
ax11.set_xlabel("GarageArea")
plt.scatter(Qant_train_df["GarageArea"], Qant_train_df["SalePrice"])
# 平均値で補完
Qant_train_df["LotFrontage"] = Qant_train_df["LotFrontage"].fillna(
Qant_train_df["LotFrontage"].mean()
)
Qant_test_df["LotFrontage"] = Qant_test_df["LotFrontage"].fillna(
Qant_test_df["LotFrontage"].mean()
)
print("nullnum is ", Qant_train_df["LotFrontage"].isnull().sum())
# 中央値で補完
Qant_train_df["MasVnrArea"] = Qant_train_df["MasVnrArea"].fillna(
Qant_train_df["MasVnrArea"].median()
)
Qant_test_df["MasVnrArea"] = Qant_test_df["MasVnrArea"].fillna(
Qant_test_df["MasVnrArea"].median()
)
print("nullnum is ", Qant_train_df["MasVnrArea"].isnull().sum())
# 中央値で補完
Qant_train_df["BsmtFinSF1"] = Qant_train_df["BsmtFinSF1"].fillna(
Qant_train_df["BsmtFinSF1"].median()
)
Qant_test_df["BsmtFinSF1"] = Qant_test_df["BsmtFinSF1"].fillna(
Qant_test_df["BsmtFinSF1"].median()
)
print("nullnum is ", Qant_train_df["BsmtFinSF1"].isnull().sum())
# 中央値で補完
Qant_train_df["BsmtFinSF2"] = Qant_train_df["BsmtFinSF2"].fillna(
Qant_train_df["BsmtFinSF2"].median()
)
Qant_test_df["BsmtFinSF2"] = Qant_test_df["BsmtFinSF2"].fillna(
Qant_test_df["mmmm"].median()
)
print("nullnum is ", Qant_train_df["BsmtFinSF2"].isnull().sum())
# 平均値で補完
Qant_train_df["TotalBsmtSF"] = Qant_train_df["TotalBsmtSF"].fillna(
Qant_train_df["TotalBsmtSF"].mean()
)
Qant_test_df["TotalBsmtSF"] = Qant_test_df["TotalBsmtSF"].fillna(
Qant_test_df["TotalBsmtSF"].mean()
)
print("nullnum is ", Qant_train_df["TotalBsmtSF"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtUnfSF"] = Qant_train_df["BsmtUnfSF"].fillna(
Qant_train_df["BsmtUnfSF"].mean()
)
Qant_test_df["BsmtUnfSF"] = Qant_test_df["BsmtUnfSF"].fillna(
Qant_test_df["BsmtUnfSF"].mean()
)
print("nullnum is ", Qant_train_df["BsmtUnfSF"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtFullBath"] = Qant_train_df["BsmtFullBath"].fillna(
Qant_train_df["BsmtFullBath"].mean()
)
Qant_test_df["testBsmtFullBath"] = Qant_test_df["BsmtFullBath"].fillna(
Qant_test_df["BsmtFullBath"].mean()
)
print("nullnum is ", Qant_train_df["BsmtFullBath"].isnull().sum())
# 平均値で補完
Qant_train_df["BsmtHalfBath"] = Qant_train_df["BsmtHalfBath"].fillna(
Qant_train_df["BsmtHalfBath"].mean()
)
Qant_test_df["BsmtHalfBath"] = Qant_test_df["BsmtHalfBath"].fillna(
Qant_test_df["BsmtHalfBath"].mean()
)
print("nullnum is ", Qant_train_df["BsmtHalfBath"].isnull().sum())
# 平均値で補完
Qant_train_df["GarageYrBlt"] = Qant_train_df["GarageYrBlt"] - 1900
Qant_test_df["GarageYrBlt"] = Qant_test_df["GarageYrBlt"] - 1900
Qant_train_df["GarageYrBlt"] = Qant_train_df["GarageYrBlt"].fillna(
Qant_train_df["GarageYrBlt"].mean()
)
Qant_test_df["GarageYrBlt"] = Qant_test_df["GarageYrBlt"].fillna(
Qant_train_df["GarageYrBlt"].mean()
)
print("nullnum is ", Qant_train_df["GarageYrBlt"].isnull().sum())
# 中央値で補完
Qant_train_df["GarageArea"] = Qant_train_df["GarageArea"].fillna(
Qant_train_df["GarageArea"].median()
)
Qant_test_df["GarageArea"] = Qant_test_df["GarageArea"].fillna(
Qant_test_df["GarageArea"].median()
)
print("nullnum is ", Qant_train_df["GarageArea"].isnull().sum())
# 平均値で補完
Qant_train_df["GarageCars"] = Qant_train_df["GarageCars"].fillna(
Qant_train_df["LotFrontage"].mean()
)
Qant_test_df["GarageCars"] = Qant_test_df["GarageCars"].fillna(
Qant_test_df["LotFrontage"].mean()
)
print("nullnum is ", Qant_train_df["GarageCars"].isnull().sum())
Qant_train
# 相関を眺める
import seaborn as sns
# sns.scatterplot(x=Qant_train_df["LotFrontage"], y=Qant_train_df["SalePrice"])
sns.pairplot(
data=Qant_train_df,
vars=[
"LotFrontage",
"MasVnrArea",
"BsmtFinSF1",
"BsmtFinSF2",
"BsmtUnfSF",
"TotalBsmtSF",
"BsmtFullBath",
"BsmtHalfBath",
"GarageYrBlt",
"GarageCars",
"GarageArea",
"SalePrice",
],
)
lns.pairplot(Qant_train_df)
import seaborn as sns
Qual_train_df_pivot = pd.pivot_table(
data=Qual_train_df,
values="SalePrice",
columns="Neighborhood",
index="SaleCondition",
)
sns.heatmap(Qual_train_df_pivot)
pairplot1 = train_df_nonnan[
[
"MSSubClass",
"LotArea",
"OverallQual",
"OverallCond",
"YearBuilt",
"YearRemodAdd",
"BsmtFinSF1",
"TotalBsmtSF",
]
]
sns.pairplot(pairplot1)
train_df_nan.describe()
# # データ型の変換
# # NaN値の処置の方針
# Alley:"Grvl"
# MasVnrType:"BrkFace":→最頻値
# MasVnrArea:平均値
# BsmtQuale:TA
# BsmtCond:TA
# BsmtExposure:No→最頻値
# BsmtFinType1:Unf→最頻値
# BsmtFinType2:Unf→最頻値
# Electrical:FuseA
# FireplaceQu:TA
# GarageType:Attchd→最頻値
# GarageYrBlt-1900:中間値
# GarageFinish:Unf
# GarageQual:TA
# GarageCond:TA
# PoolQC:TA
# Fence:MnPrv→最頻値
# MiscFeature:Shed
#
print(train_df_nan["Alley"].value_counts())
print(train_df_nan["MasVnrType"].value_counts())
print(train_df_nan["BsmtQual"].value_counts())
print(train_df_nan["BsmtCond"].value_counts())
print(train_df_nan["BsmtExposure"].value_counts())
print(train_df_nan["BsmtFinType1"].value_counts())
print(train_df_nan["BsmtFinType2"].value_counts())
print(train_df_nan["Electrical"].value_counts())
print(train_df_nan["FireplaceQu"].value_counts())
print(train_df_nan["GarageType"].value_counts())
print(train_df_nan["GarageFinish"].value_counts())
print(train_df_nan["GarageCond"].value_counts())
print(train_df_nan["PoolQC"].value_counts())
print(train_df_nan["Fence"].value_counts())
print(train_df_nan["MiscFeature"].value_counts())
print(train_df_nan["LotFrontage"].value_counts())
train_df_nan["Alley"] = train_df_nan["Alley"].fillna("Grvl")
test_df_nan["Alley"] = test_df_nan["Alley"].fillna("Grvl")
print(train_df_nan["Alley"].isnull().sum())
train_df_nan["MasVnrType"] = train_df_nan["MasVnrType"].fillna("BrkFace")
test_df_nan["MasVnrType"] = test_df_nan["MasVnrType"].fillna("BrkFace")
print(train_df_nan["MasVnrType"].isnull().sum())
train_df_nan["MasVnrArea"] = train_df_nan["MasVnrArea"].fillna(
train_df_nan["MasVnrArea"].mean()
)
test_df_nan["MasVnrArea"] = test_df_nan["MasVnrArea"].fillna(
train_df_nan["MasVnrArea"].mean()
)
print(train_df_nan["MasVnrArea"].isnull().sum())
train_df_nan["BsmtQual"] = train_df_nan["BsmtQual"].fillna("TA")
test_df_nan["BsmtQual"] = test_df_nan["BsmtQual"].fillna("TA")
print(train_df_nan["BsmtQual"].isnull().sum())
train_df_nan["BsmtCond"] = train_df_nan["BsmtCond"].fillna("TA")
test_df_nan["BsmtCond"] = test_df_nan["BsmtCond"].fillna("TA")
print(train_df_nan["BsmtCond"].isnull().sum())
train_df_nan["BsmtExposure"] = train_df_nan["BsmtExposure"].fillna("TA")
test_df_nan["BsmtExposure"] = test_df_nan["BsmtExposure"].fillna("TA")
print(train_df_nan["BsmtExposure"].isnull().sum())
train_df_nan["BsmtFinType1"] = train_df_nan["BsmtFinType1"].fillna("Unf")
test_df_nan["BsmtExposure"] = test_df_nan["BsmtExposure"].fillna("TA")
print(train_df_nan["BsmtFinType1"].isnull().sum())
train_df_nan["BsmtFinType2"] = train_df_nan["BsmtFinType2"].fillna("Unf")
test_df_nan["BsmtFinType2"] = test_df_nan["BsmtFinType2"].fillna("Unf")
print(train_df_nan["BsmtFinType2"].isnull().sum())
train_df_nan["Electrical"] = train_df_nan["Electrical"].fillna("FuseA")
test_df_nan["Electrical"] = test_df_nan["Electrical"].fillna("FuseA")
print(train_df_nan["Electrical"].isnull().sum())
train_df_nan["FireplaceQu"] = train_df_nan["FireplaceQu"].fillna("TA")
test_df_nan["FireplaceQu"] = test_df_nan["FireplaceQu"].fillna("TA")
print(train_df_nan["FireplaceQu"].isnull().sum())
train_df_nan["GarageType"] = train_df_nan["GarageType"].fillna("Attchd")
test_df_nan["GarageType"] = test_df_nan["GarageType"].fillna("Attchd")
print(train_df_nan["GarageType"].isnull().sum())
train_df_nan["GarageFinish"] = train_df_nan["GarageFinish"].fillna("Attchd")
test_df_nan["GarageFinish"] = test_df_nan["GarageFinish"].fillna("Attchd")
print(train_df_nan["GarageFinish"].isnull().sum())
train_df_nan["GarageQual"] = train_df_nan["GarageQual"].fillna("TA")
test_df_nan["GarageQual"] = test_df_nan["GarageQual"].fillna("TA")
print(train_df_nan["GarageQual"].isnull().sum())
train_df_nan["GarageCond"] = train_df_nan["GarageCond"].fillna("TA")
test_df_nan["GarageCond"] = test_df_nan["GarageCond"].fillna("TA")
print(train_df_nan["GarageCond"].isnull().sum())
train_df_nan["PoolQC"] = train_df_nan["PoolQC"].fillna("TA")
test_df_nan["PoolQC"] = test_df_nan["PoolQC"].fillna("TA")
print(train_df_nan["PoolQC"].isnull().sum())
train_df_nan["Fence"] = train_df_nan["Fence"].fillna("MnPrv")
test_df_nan["Fence"] = test_df_nan["Fence"].fillna("MnPrv")
print(train_df_nan["Fence"].isnull().sum())
train_df_nan["MiscFeature"] = train_df_nan["MiscFeature"].fillna("Shed")
test_df_nan["MiscFeature"] = test_df_nan["MiscFeature"].fillna("Shed")
print(train_df_nan["MiscFeature"].isnull().sum())
train_df_nan["LotFrontage"] = train_df_nan["LotFrontage"].fillna(
train_df_nan["LotFrontage"].mean()
)
test_df_nan["LotFrontage"] = test_df_nan["LotFrontage"].fillna(
train_df_nan["LotFrontage"].mean()
)
print(train_df_nan["LotFrontage"].isnull().sum())
# train_df_nan["GarageYrBlt"].head()
train_df_nan["GarageYrBlt"] = train_df_nan["GarageYrBlt"] - 1900
train_df_nan["GarageYrBlt"] = train_df_nan["GarageYrBlt"].fillna(
train_df_nan["GarageYrBlt"].mean()
)
test_df_nan["GarageYrBlt"] = test_df_nan["GarageYrBlt"] - 1900
test_df_nan["GarageYrBlt"] = test_df_nan["GarageYrBlt"].fillna(
train_df_nan["GarageYrBlt"].mean()
)
print(train_df_nan["GarageYrBlt"])
train_df = pd.concat([train_df_nan, train_df_nonnan], axis=1)
test_df = pd.concat([test_df_nan, test_df_nonnan], axis=1)
print(train_df.isnull().sum())
train_df.head(50)
test_df.head(50)
train_df_dummy = deepcopy(train_df)
train_df_dummy = pd.get_dummies(
train_df_dummy,
columns=[
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"ExterQual",
"ExterCond",
"Foundation",
"Heating",
"HeatingQC",
"CentralAir",
"KitchenQual",
"Functional",
"PavedDrive",
"SaleType",
"SaleCondition",
],
)
test_df_dummy = deepcopy(test_df)
test_df_dummy = pd.get_dummies(
test_df_dummy,
columns=[
"Alley",
"MasVnrType",
"BsmtQual",
"BsmtCond",
"BsmtExposure",
"BsmtFinType1",
"BsmtFinType2",
"Electrical",
"FireplaceQu",
"GarageType",
"GarageFinish",
"GarageQual",
"GarageCond",
"PoolQC",
"Fence",
"MiscFeature",
"MSZoning",
"Street",
"LotShape",
"LandContour",
"Utilities",
"LotConfig",
"LandSlope",
"Neighborhood",
"Condition1",
"Condition2",
"BldgType",
"HouseStyle",
"RoofStyle",
"RoofMatl",
"Exterior1st",
"Exterior2nd",
"ExterQual",
"ExterCond",
"Foundation",
"Heating",
"HeatingQC",
"CentralAir",
"KitchenQual",
"Functional",
"PavedDrive",
"SaleType",
"SaleCondition",
],
)
train_df_dummy.head(300)
test_df_dummy.astype({"MasVnrArea": "float32"}, {"GarageYrblt"}).dtypes
# test_df_dummy.info(100)
x_train = train_df_dummy.drop("SalePrice", axis=1)
x_train = x_train.values
y_train = train_df_dummy["SalePrice"]
y_train = y_train.values
x_test = test_df_dummy.values
from sklearn.linear_model import Perceptron
perceptron = Perceptron()
perceptron.fit(x_train, y_train)
Y_pred = perceptron.predict(x_test)
acc_perceptron = round(perceptron.score(x_train, y_train) * 100, 2)
print(acc_perceptron)
from sklearn.linear_model import LinearRegression
# LinerRegression
logreg = LinearRegression()
logreg = logreg.fit(x_train, y_train)
acc_log = round(logreg.score(x_train, y_train) * 100, 2)
| false | 0 | 12,027 | 0 | 12,027 | 12,027 |
||
69207362
|
# # Проект - Credit Scoring
# Первая и самая важная цель банка — заработать деньги. Если банк будет вести слишком жесткую политику, не выдавая кредиты даже тем, кто вернул бы деньги, то он не заработает на процентах. Отказ в кредите хорошему заемщику — ошибка первого рода. Риск, с которым сталкивается кредитная организация — дать кредит неблагонадёжному заёмщику, который впоследствии может допустить дефолт, а банк потеряет деньги — ошибка второго рода.
#
# Чтобы соблюсти баланс, используется кредитный скоринг, который позволяет спрогнозировать вероятность невозврата кредита. Клиентов, обратившихся за кредитом, сортируют по этой предсказанной вероятности (по убыванию), и получается скоркарта — рейтинг клиентов от плохих к хорошим.
# Используя данные о клиенте, работник банка может решить, выдавать ли клиенту кредит, и если да, то под какой процент.
# При этом используемый в кредитном скоринге алгоритм позволяет предсказывать значения непрерывной зависимой переменной на интервале от до . Банки самостоятельно определяют для себя значения, при которых они принимают решение об отказе в кредите.
# Цель проекта - выбрать наиболее эффективную модель оценки качества клиентов банка.
import pandas as pd
import numpy as np
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import (
LabelEncoder,
OneHotEncoder,
StandardScaler,
RobustScaler,
)
from sklearn.decomposition import PCA
from sklearn.feature_selection import f_classif, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from catboost import CatBoost, CatBoostClassifier, Pool
from catboost.utils import get_roc_curve
import lightgbm
from sklearn.model_selection import (
StratifiedKFold,
cross_val_score,
train_test_split,
GridSearchCV,
RandomizedSearchCV,
RepeatedStratifiedKFold,
)
from xgboost import XGBClassifier
# settings to display all columns
pd.set_option("display.max_columns", None)
RANDOM_SEED = 42
# from google.colab import drive
# drive.mount('/content/drive', force_remount=True)
# dir_data = './drive/MyDrive/Stud/SkillFactory/Unit_5/project_5/'
def get_boxplot(df, col):
fig, axes = plt.subplots(figsize=(14, 4))
sns.boxplot(x="default", y=col, data=df[df["sample"] == 1], ax=axes)
axes.set_title("Boxplot for " + col)
plt.show()
def age_to_cat(age):
if age <= 28:
cat_age = 0
return cat_age
if 28 < age <= 35:
cat_age = 1
return cat_age
if 35 < age <= 50:
cat_age = 2
return cat_age
if age > 50:
cat_age = 3
return cat_age
def show_metrics(y_test, y_pred, probs):
print("accuracy_score:", accuracy_score(y_test, y_pred))
print("precision_score:", precision_score(y_test, y_pred, zero_division=0))
print("recall_score:", recall_score(y_test, y_pred, zero_division=0))
print("f1_score:", f1_score(y_test, y_pred, zero_division=0))
print("roc_auc_score:", roc_auc_score(y_test, probs))
def show_basic_models(df):
train_df = df.query("sample == 1").drop(["sample", "client_id"], axis=1)
X = train_df.drop(["default"], axis=1).values
y = train_df["default"].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=RANDOM_SEED
)
# models with default settings
lr = LogisticRegression(max_iter=1000) # fix ConvergenceWarning
tree = DecisionTreeClassifier()
extra_tree = ExtraTreeClassifier()
ada = AdaBoostClassifier()
bagg = BaggingClassifier()
extra_ens = ExtraTreesClassifier()
gboost = GradientBoostingClassifier()
rforest = RandomForestClassifier()
catboo = CatBoostClassifier(silent=True) # silent=True to hide logs
models = [lr, tree, extra_tree, ada, bagg, extra_ens, gboost, rforest, catboo]
for model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
probs = probs[:, 1]
# zero_division=0 to fix zero division Warning
print("Results for:", model)
show_metrics(y_test, y_pred, probs)
print("---------")
print()
def compute_selected_model(model):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
probs = probs[:, 1]
show_metrics(y_test, y_pred, probs)
return y_pred, probs
path = "/kaggle/input/sf-dst-scoring/"
train = pd.read_csv(path + "train.csv")
test = pd.read_csv(path + "test.csv")
sample = pd.read_csv(path + "sample_submission.csv")
# train = pd.read_csv('train.csv')
# test = pd.read_csv('test.csv')
# sample = pd.read_csv('sample_submission.csv')
# train = pd.read_csv(dir_data+'train.csv')
# test = pd.read_csv(dir_data+'test.csv')
# sample = pd.read_csv(dir_data+'sample_submission.csv')
# profile = train.profile_report()
# profile.to_file('data_report.html')
print(train.info())
print()
print("Train size: ", train.shape)
print()
train.head()
print(test.info())
print()
print("Test size: ", test.shape)
print()
test.head(5)
print(sample.info())
print()
print("Sample size: ", sample.shape)
print()
sample.head(5)
# ## Описания полей
# * client_id - идентификатор клиента
# * education - уровень образования
# * sex - пол заемщика
# * age - возраст заемщика
# * car - флаг наличия автомобиля
# * car_type - флаг автомобиля иномарки
# * decline_app_cnt - количество отказанных прошлых заявок
# * good_work - флаг наличия “хорошей” работы
# * bki_request_cnt - количество запросов клиента в БКИ о своей кредитной истории
# * home_address - категоризатор домашнего адреса
# * work_address - категоризатор рабочего адреса
# * income - доход заемщика
# * foreign_passport - наличие загранпаспорта
# * sna - связь заемщика с клиентами банка
# * first_time - давность наличия информации о заемщике
# * score_bki - скоринговый балл по данным из БКИ
# * region_rating - рейтинг региона
# * app_date - дата подачи заявки
# * default - флаг дефолта по кредиту
# посмотрим на целевую переменную
train["default"].value_counts().plot(
kind="barh",
title='Distribution of "Default" feature',
ylabel="cases",
xlabel="default",
)
# Note: Выборка несбалансированная.
# объединим датасеты для удобства предварительной обработки
train["sample"] = 1 # train
test["sample"] = 0 # test
test["default"] = -1
data = pd.concat([train, test], ignore_index=True)
# num of unique values, first 10 unique values, null values count, type
data_agg = (
data.agg({"nunique", lambda s: s.unique()[:10]})
.append(pd.Series(data.isnull().sum(), name="null"))
.append(pd.Series(data.dtypes, name="dtype"))
.transpose()
)
data_agg
print(data.info())
print()
print(data.shape)
# ## EDA
# * В наборе даных 19 признаков
# * Всего представлено 110 148 клиентоа
# * Количество пропусков 0.04%, только в признаке education
# * Дубликатов нет
# * client_id не имеет повторяющихся значений, все значения уникальные
# * app_date только 120 вариантов признака (0.1%). Большинство данных за период февраль-апрель 2014 года
# * education содержит 5 категорий:
# 1. SCH (52%) - School;
# 2. GRD (31%) - Graduated (Master degree);
# 3. UGR (13%) - UnderGraduated (Bachelor degree);
# 4. PGR (1.7%) - PostGraduated;
# 5. ACD (0.3%) - Academic Degree.
# * sex содержит 2 вариации признака:
# 1. Female (56%);
# 2. Male (44%);
# * age представлен конкретными значениями со смещением влево:
# -- Minimum 21
# -- median 37
# -- Mean 39.2
# -- Maximum 72
# -- Interquartile range (IQR) 18
# * car бинарный признак, 67% заемщиков не имеют автомобиля
# * car_type бинарный признак, показывающий отечественный или иностранный автомобиль у заемщика. 81% заемщиков имеют отечественный автомобиль
# decline_app_cnt модержит конкретные значения со смещением влево. Большинство значений (83%) нулевые. Преобладающее большинство наблюдений в промежутке от 0 до 6. Можно попробовать преобразовать признак в категориальный
# -- Maximum 33
# * good_work Mбольшинство заемщиков не имеют хорошей работы (83%)
# * score_bki 93% значений уникальны, распределение нормальное, присутствуют отрицательные значения - возможно, здесь уже применялся scaler
# * bki_request_cnt натуральыне числа, которые варбируются от 0 до 53 с медианой 1. Большинство значений в промежутке от 0 до 8
# * region_rating варбируются между 20 и 80. Категориальный признак. Самое часто встречающееся значение 50 (37%)
# * home_address, work_address категориальные признаки с 3 вариациями.
# * income большой разброс значений от 1000 до 1000000; можно попробовать либо превратить в категориальный признак, либо прологарифмировать
# * sna / first_time категориальные признаки с 4 вариациями значений
# * foreign_passport бинарный признак, 67% заемщиков имеют заграничный паспорт
# * default целевой признак. Бинарный признак с подавляющим большинством тех, кто возвращает кредит без проблем. Выборка несбалансированная, при моделировании нужно будет попробовать undersampling.
# exclude client_id, app_date, default, sample
num_cols = ["age", "decline_app_cnt", "score_bki", "income", "bki_request_cnt"]
cat_cols = [
"education",
"work_address",
"home_address",
"region_rating",
"sna",
"first_time",
]
bin_cols = ["sex", "car", "car_type", "good_work", "foreign_passport"]
# ## Количественные признаки
# посмотрим на распределение количественных признаков
for i in num_cols:
plt.figure()
sns.displot(data[i][data[i] > 0].dropna(), kde=False, rug=False)
plt.title(f"Distribution of {i}")
plt.show()
# посмотрим на выбросы
# и распределение целевой переменной между количественными признаками
for col in num_cols:
get_boxplot(data, col)
# Краткие выводы:
# * дефолт больше характерен для более молодых
# * высокий score_bki характерен для дефолта
# * рейтинг региона влияет на целевую переменную
# * частые запросы в БКИ характерны для менее уверенных клиентов и могут сигнализировать дефолтную ситуацию
# * в среднем, более высокий доход свидетельствует о меньшей вероятности дефолта
# ## Категориальные признаки
# Клиенты с более низким уровнем образования чаще подвержены дефолту. В то же время, они чаще берут кредиты.
# Чем ниже рейтинг региона, тем чаще возникает дефолт по кредитам. Но это неверно для двух наименее рейтинговых регионов.
# Домашний и рабочий адреса распределены схожим образом. Возможно, есть мультиколлинеарность.
# Категории sna: ниже категория - меньше дефолтов. First_time - аналогично.
data.education.value_counts().plot(
kind="bar",
figsize=(8, 6),
color="r",
title="Distribution of clients by Education",
xlabel="education level",
ylabel="number of clients",
)
print("Пропущенные значения:", data.education.isna().sum())
print()
# заполним пропуски наиболее часто встречающимся значением
data.education = data.education.fillna("SCH")
# оценим доход от уровня образования
plt.figure(figsize=(15, 8))
plt.title("Distribution of Income by Education level")
sns.boxplot(x="education", y="income", data=data, showfliers=False)
# оценим влияние региона проживания на уровень образования
plt.figure(figsize=(15, 8))
plt.title("Distribution of Education level by Region")
sns.boxplot(x="education", y="region_rating", data=data, showfliers=False)
# Люди с более высоким уровнем образования живут в регионах с более высоким рейтингом. И наоборот.
# посмотрим на распределение дефолтных состояний по различным признакам
plt.figure(figsize=[20, 20])
i = 1
for k in cat_cols:
plt.subplot(4, 3, i)
sns.barplot(
x=k,
y="proportion",
hue="default",
data=data[[k, "default"]]
.value_counts(normalize=True)
.rename("proportion")
.reset_index(),
)
plt.title("Clients default distribution according to\n" + k, fontsize=15)
i += 1
plt.tight_layout()
plt.show()
# закодируем категориальные признаки
mappc = {}
label_encoder = LabelEncoder()
for col in cat_cols:
data[col] = label_encoder.fit_transform(data[col])
mappc[col] = dict(enumerate(label_encoder.classes_))
print(mappc)
# ## Бинарные признаки
# Женщины чаще берут кредиты, чем мужчины. Относительное количество дефолтов при этом практически одинаковое.
# Заемщики, у которых есть машина, более надежны.
# Заемщики, у которых иностранный автомобиль, более надежны, чем заемщики с отечественными машинами. Далее попробуем объединить два признака и сделать новый категориальный - не имеет машины, имеет отечественную машину, имеет иностранную машину.
# Заемщики с хорошей работой и заграничным паспортом возвращают долг чаще, чем противоположные группы.
# посмотрим на распределение дефолтных состояний по различным признакам
plt.figure(figsize=[20, 20])
i = 1
for k in bin_cols:
plt.subplot(4, 3, i)
sns.barplot(
x=k,
y="proportion",
hue="default",
data=data[[k, "default"]]
.value_counts(normalize=True)
.rename("proportion")
.reset_index(),
)
plt.title("Clients default distribution according to\n" + k, fontsize=15)
i += 1
plt.tight_layout()
plt.show()
# закодируем бинарные признаки
mapp = {}
label_encoder = LabelEncoder()
for col in bin_cols:
data[col] = label_encoder.fit_transform(data[col])
mapp[col] = dict(enumerate(label_encoder.classes_))
print(mapp)
# ## Корреляционный анализ
# Существенная корреляция между домашним адресом и местом работы. Можно попробовать удалить один из признаков.
# Сильная корреляция между car, car_type. Отсутствие автомобиля и соответственное отсутствие признака, что автомобиль иностранный. Далее скомбинируем два столбца в один, преобразовав в новый категориальный признак. Так мы уменьшим размерность набора данных, но не потеряем информацию.
# Есть довольно сильная обратная зависимость между sna, first_time. Объяснить эти признаки можно попробовать так: чем дольше человек является клиентом банка, тем больше у него отношений с другими клиентами внутри банка (например, поручители) и тем ниже риск дефолта.
# Наиболее статистически значимые признаки - Score_bki, decline_app_cnt. Мультиколлинеарность в целом отсутствует, что подтверждается рангом матрицы. Значение определителя позволяет сделать предположение о наличиии обратной матрицы.
plt.title("Correlation Matrix of dataset features")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (numerical)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[num_cols].corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (categorical)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[cat_cols].corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (binary)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[bin_cols].corr(), vmin=-1, vmax=1, annot=True)
# ## Наивная модель
# Ожидаемо, результаты простых классификаторов (LogisticRegression, DecisionTreeClassifier, ExtraTreeClassifier) оказалось значительно хуже (ROC AUC в пределах 0.54-0.55), чем результаты ансамблей и специализированных моделей (результаты ROC_AUC в пределах 0.63-0.73 для AdaBoostClassifier(), BaggingClassifier(), ExtraTreesClassifier(), GradientBoostingClassifier(), RandomForestClassifier(), CatBoostClassifier()).
# Вернемся к тестированию качества моделей после генерации новых признаков и исключения признаков, обладающих высокой парной корреляцией.
# временно удалим столбец,
# вернемся к его обработке при создании новых признаков
df = data.drop(["app_date"], axis=1)
show_basic_models(df)
# ## Создание признаков
# ### Декомпозиция адресов
# Как мы установили ранее, home_address and work address обладают сильной корреляцией. Уменьшим размерность матрицы признаков с помощью sklearn.decomposition.
# получим кол-во дней от "начала эпохи" датасета
data["app_date"] = pd.to_datetime(data["app_date"], format="%d%b%Y")
data_min = min(data["app_date"])
data["days"] = (data["app_date"] - data_min).dt.days.astype("int")
data["day"] = data["app_date"].dt.day
data["month"] = data["app_date"].dt.month
data.drop(["app_date"], axis=1, inplace=True)
# средний доход для конкретного возраста
mean_income = data.groupby("age")["income"].mean().to_dict()
data["mean_income_age"] = data["age"].map(mean_income)
# максимальный доход для конкретного возраста
max_income = data.groupby("age")["income"].max().to_dict()
data["max_income_age"] = data["age"].map(max_income)
# минимальный доход для конкретного возраста
min_income = data.groupby("age")["income"].min().to_dict()
data["min_income_age"] = data["age"].map(min_income)
# нормализуем доход
data["normalized_income"] = abs(
(data.income - data.mean_income_age) / data.max_income_age
)
data.drop(["mean_income_age", "max_income_age"], axis=1, inplace=True)
# среднее кол-во запросов в БКИ по конкретному возрасту
mean_bki = data.groupby("age")["bki_request_cnt"].mean().to_dict()
data["mean_requests_age"] = data["age"].map(mean_bki)
# максимальное кол-во запросов в БКИ по конкретному возрасту
max_bki = data.groupby("age")["bki_request_cnt"].max().to_dict()
data["max_requests_age"] = data["age"].map(max_bki)
# нормализуем requests
data["normalized_req"] = abs(
(data.bki_request_cnt - data.mean_requests_age) / data.max_requests_age
)
data.drop(["mean_requests_age", "max_requests_age"], axis=1, inplace=True)
# среднее кол-во запросов в БКИ в зависимости от дохода
mean_bki_inc = data.groupby("income")["bki_request_cnt"].mean().to_dict()
data["mean_requests_income"] = data["income"].map(mean_bki_inc)
# средний доход по региону
mean_income_rat = data.groupby("region_rating")["income"].mean().to_dict()
data["mean_income_region"] = data["region_rating"].map(mean_income_rat)
data.drop(["income"], axis=1, inplace=True)
# сократим размерность матрицы без потери информации
# 0 - нет машины, 1 - есть отечественна машина, 2 - есть иномарка
data["car_comb"] = data["car"] + data["car_type"]
data["car_comb"] = data["car_comb"].astype("category")
data.drop(["car", "car_type"], axis=1, inplace=True)
# возраст разделим на четыре категории
data["age_cat"] = 0 # создадим пустой столбец для нового признака
data["age_cat"] = data["age"].apply(lambda x: age_to_cat(x))
data.drop("age", axis=1, inplace=True)
label_encoder = LabelEncoder()
data["age_cat"] = label_encoder.fit_transform(data["age_cat"])
# Sort out decline_app_cnt and bki_request_cnt by groups:
data["decline_cat"] = data["decline_app_cnt"].apply(lambda x: 4 if x >= 4 else x)
data["bki_request_cat"] = data["bki_request_cnt"].apply(lambda x: 6 if x >= 6 else x)
data.drop(["decline_app_cnt", "bki_request_cnt"], axis=1, inplace=True)
# Декомпозиция адресов
# вытащим два столбца из датасета
data_addresses = data[["work_address", "home_address"]].values
# создадим Scaler
scaler = StandardScaler()
scaled_data = scaler.fit_transform(data_addresses)
# У нас два вектора. Сократим до одного, оставив наиболее значимую информацию.
pca = PCA(n_components=1)
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
data["pca_address"] = pca_data
data["pca_address"] = data["pca_address"] + 5
data["pca_address"] = data["pca_address"].apply(lambda x: np.log(x) + 1)
# удалим ненужные столбцы
data.drop(["home_address", "work_address"], axis=1, inplace=True)
# fix 2 zeros in normalized_req
data = data.fillna(data.mean())
# обновим списки признаков в переменных для обработки
# exclude day, month (high correlation)
num_cols = [
"score_bki",
"days",
"min_income_age",
"normalized_income",
"normalized_req",
"mean_requests_income",
"mean_income_region",
]
cat_cols = [
"education",
"region_rating",
"sna",
"first_time",
"car_comb",
"age_cat",
"decline_cat",
"bki_request_cat",
"pca_address",
]
bin_cols = [
"sex",
"good_work",
"foreign_passport",
]
# ## Выбросы и стандартизация
for col in num_cols:
median = data[col].median()
IQR = data[col].quantile(0.75) - data[col].quantile(0.25)
perc25 = data[col].quantile(0.25)
perc75 = data[col].quantile(0.75)
print("Column: ", col)
print(
"25%: {},".format(perc25),
"75%: {},".format(perc75),
"IQR: {}, ".format(IQR),
"Borderline: [{f}, {l}].".format(f=perc25 - 1.5 * IQR, l=perc75 + 1.5 * IQR),
)
print()
# replace outliers with border-values
data[col].loc[data[col] < (perc25 - 1.5 * IQR)] = perc25 - 1.5 * IQR
data[col].loc[data[col] > (perc75 + 1.5 * IQR)] = perc75 + 1.5 * IQR
scaler = RobustScaler()
data[num_cols] = scaler.fit_transform(data[num_cols].values)
data.info()
data.head()
# ## Моделирование после добавления признаков
# Настройка моделей еще не проведена, использовались стандартные настройки.
# Результаты по ROC AUC для DecisionTreeClassifier, ExtraTreeClassifier остались на прежнем уровне в 0.54
# В то же время, метрика LogisticRegression увеличена с 0.55 до 0.739.
# Лучший результат после добавления новых признаков у GradientBoostingClassifier - 0.7418.
# Метрики AdaBoostClassifier(), BaggingClassifier(), ExtraTreesClassifier(), RandomForestClassifier(), CatBoostClassifier() - в диапазоне 0.65-0.73.
# Вернемся к тестированию качества моделей после One-Hot Encoding категориальных признаков и исключения попарно коррелирующихся признаков.
show_basic_models(data)
# ## One-Hot Encoding для категориальных признаков
data = pd.get_dummies(data, prefix=cat_cols, columns=cat_cols)
data.info()
# ### Корреляционный анализ после генерации новых признаков
plt.title("Correlation Matrix of dataset features")
plt.rcParams["figure.figsize"] = (30, 20)
sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=False)
# show features with corr > 0.7
corr = data.corr().abs()
corr_table = corr.unstack()
corr_table_sorted = corr_table.sort_values(kind="quicksort", ascending=False)
corr_exclude_ones = corr_table_sorted[corr_table_sorted != 1]
high_corr = corr_exclude_ones[corr_exclude_ones >= 0.7]
high_corr
# exclude features due high corr
data.drop(
["day", "month", "region_rating_6", "decline_cat_1", "education_3"],
axis=1,
inplace=True,
)
# ## Моделирование после One-Hot Encoding
# Настройка моделей еще не проведена, использовались стандартные настройки.
# Результаты по ROC AUC для DecisionTreeClassifier, ExtraTreeClassifier остались на прежнем уровне в 0.53-0.54
# В то же время, метрика LogisticRegression увеличена с 0.739 до 0.746 (теперь это лучшая модель).
# Также, метрика выше 0.74 у GradientBoostingClassifier и AdaBoostClassifier.
# Метрики BaggingClassifier(), ExtraTreesClassifier(), RandomForestClassifier(), CatBoostClassifier() - в диапазоне 0.65-0.73.
# Вернемся к тестированию качества моделей после оценки статистической значимости признаков и исключения малозначимых фич.
show_basic_models(data)
# ## Статистическая значимость признаков
data_temp = data.loc[data["sample"] == 1].drop(["client_id", "sample"], axis=1)
imp_num = pd.Series(
f_classif(data_temp[num_cols], data_temp["default"])[0], index=num_cols
)
imp_num.sort_values(inplace=True)
imp_num.plot(kind="barh", color="pink", title="Numeric Features Importance")
imp_num
imp_bin = pd.Series(
mutual_info_classif(
data_temp[bin_cols], data_temp["default"], discrete_features=False
),
index=bin_cols,
)
imp_bin.sort_values(inplace=True)
imp_bin.plot(kind="barh", color="pink", title="Binary Features Importance")
imp_bin
# update cat_cols list of columns
cols = list(data.columns)
num_bin_cols = [col for col in cols if (col in bin_cols or col in num_cols)]
cat_cols = [col for col in cols if col not in num_bin_cols]
cat_cols.remove("client_id")
cat_cols.remove("default")
cat_cols.remove("sample")
imp_cat = pd.Series(
mutual_info_classif(
data_temp[cat_cols], data_temp["default"], discrete_features=False
),
index=cat_cols,
)
imp_cat.sort_values(inplace=True)
imp_cat.plot(kind="barh", color="pink", title="Categorical Features Importance")
imp_cat
# ## Моделирование после исключения малозначимых признаков
# После исключения малозначимых признаков по-прежнему лучшие метрики ROC AUC показывает LogisticRegression (0.746), далее следуют GradientBoostingClassifier (0.743) и AdaBoostClassifier (0.740). Остальные модели демонстрируют метрику ниже 0.74.
# За базовую возьмем LogisticRegression и будем настраивать гиперпараметры этого варианта классификатора.
cat_cols_to_drop = list(imp_cat[imp_cat == 0].index)
data.drop(cat_cols_to_drop, axis=1, inplace=True)
show_basic_models(data)
# ## Промежуточные выводы
# Бинарные признаки:
# * Car и car_type сильно взаимозависимы. Объединены в один признак car_comb с тремя характеристиками.
# * Количество должников среди мужчин и женщин примерно одинаковое, но женщины берут кредиты чаще.
# Категориальные признаки:
# * Люди со слабым образованием возвращают кредиты реже, чем люди с сильным образованием.
# * Чем больше отношений у клиента с другими клиентами в банке - тем лучше и меньше просроченных кредитов.
# * Люди с более крутым образованием живут в регионах с более высоким рейтингом.
# * Чем выше рейтинг региона, тем ниже риск дефолта.
# * Имеется корреляция между sna и first_time, Home и work addresses (признак заменен на объединенный посредством PCA).
# Numerical:
# * score_bki имеет распределение, близкое к нормальному
# * В данных есть выбросы. Устранены через преобразование признаков в категориальные, логарифмирование или с использованием Scaler
# * Между количественными признаками нет сильных корреляций
# * Наличие иномарки коррелирует с уровнем дохода
# * Количество связей с другими клиентами банка коррелирует с наличием заграничного паспорта
# Наиболее статистически значимые признаки:
# * sna
# * pca_address (home & work addresses)
# * first_time
# * score_bki
# * mean_income_region
# Поскольку мы имеем достаточно много неочевидных корреляций между признаками, стоит попробовать использовать logistic regressions (при этом descicion tree models показали довольно слабые результаты).
# ## Моделирование
# Сравним logisticRegression со стандартными настройками и настройкой class_weight='balanced'.
train_df = data.query("sample == 1").drop(["sample", "client_id"], axis=1)
test_df = data.query("sample == 0").drop(["sample", "client_id"], axis=1)
X = train_df.drop(["default"], axis=1).values
y = train_df["default"].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=RANDOM_SEED
)
# default model
lr = LogisticRegression(max_iter=500)
y_pred, probs = compute_selected_model(lr)
# 0.7466261107147693
# penalty=none is stronger than penalty=l2
lr = LogisticRegression(penalty="none", max_iter=1000)
y_pred, probs = compute_selected_model(lr)
# 0.7466358446849413
# multi_class=multinominal is weaker than auto or ovr (them equal)
lr = LogisticRegression(penalty="l2", max_iter=1000, multi_class="ovr")
y_pred, probs = compute_selected_model(lr)
# 0.7466261107147693
# higher max_iter - higher metrics (costs time)
lr = LogisticRegression(penalty="l2", max_iter=1500, solver="sag")
y_pred, probs = compute_selected_model(lr)
# 0.7466335593180313
# saga is weaker than sag with equal max_iter
# both sag and saga weaker than default solver
lr = LogisticRegression(penalty="l2", max_iter=1500, solver="saga")
y_pred, probs = compute_selected_model(lr)
# 0.7466332207451557
# balanced is weaker than default settings in roc_auc
# but f1_score is significant stronger
lr = LogisticRegression(class_weight="balanced", max_iter=500)
y_pred, probs = compute_selected_model(lr)
# 0.7461245996428649
# ### Краткие выводы по LogReg
# 1. Настройка сбалансированного веса классов ухудшает метрику ROC AUC по сравнению с настройками по умолчанию (f1_score, при этом, улучшается существенно).
# 2. Solvers sag, saga - слабее, чем solver по умолчанию (lbfgs).
# 3. На заданном наборе данных penalty=none эффективнее penalty=l2.
# 4. Мультикласс-настройка multinominal формирует более слабую модель. Ovr формирует идентичную модель с настройкой по умолчанию - auto.
# ## Оценка ROC AUC и других метрик
# best LogReg model from previous chapter
lr = LogisticRegression(penalty="none", max_iter=1000)
y_pred, probs = compute_selected_model(lr)
fpr, tpr, threshold = roc_curve(y_test, probs)
roc_auc = roc_auc_score(y_test, probs)
plt.figure()
plt.plot([0, 1], label="Baseline", linestyle="--")
plt.plot(fpr, tpr, label="Regression")
plt.title("Logistic Regression ROC AUC = %0.3f" % roc_auc)
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
plt.show()
cm = confusion_matrix(y_test, y_pred)
cmd = ConfusionMatrixDisplay(cm, display_labels=["non_default", "default"])
cmd.plot()
cmd.ax_.set(xlabel="Predicted", ylabel="True")
# LogisticRegression на стандартных настройках имеет большцю ROC AUC, чем с настройкой class_weight=balanced. В то же время, f1_score выше у class_weight=balanced.
# Были проведены эксперименты с различными классификаторами (XGBoost, GradientBoostingClassifier, AdaBoost - в ранних версиях каггл-ноутбуков), и, например, XGBoostClassifier с подбором параметров через RandomizedSearchCV давал более эффективные результаты по ROC AUC. тем не менее, в учебных целях и для чистоты эксперимента по сравниванию настроек по умолчанию и настроек, получаемых методами подбора гиперпараметров, осуществим эту работу для LogisticRegression.
# ## Подбор гиперпараметров
train_data = data.query("sample == 1").drop(["sample", "client_id"], axis=1)
test_data = data.query("sample == 0").drop(["sample", "client_id"], axis=1)
X_train = train_data.drop(["default"], axis=1)
y_train = train_data.default.values
X_test = test_data.drop(["default"], axis=1)
y_test = test_data.default.values
# Penalty - Used to specify the norm used in the penalization
# C - Inverse of regularization strength;
# must be a positive float.
# Like in support vector machines, smaller values specify stronger regularization.
# solver - Algorithm to use in the optimization problem.
params = {
"penalty": ["l1", "l2", "elasticnet"],
"C": np.logspace(-4, 4, 20),
"solver": ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
}
lr = LogisticRegression(max_iter=1000, class_weight="balanced")
folds = 3
param_comb = 5
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=RANDOM_SEED)
random_search = RandomizedSearchCV(
lr,
param_distributions=params,
n_iter=param_comb,
scoring="roc_auc",
n_jobs=4,
cv=skf.split(X_train, y_train),
verbose=3,
random_state=RANDOM_SEED,
)
random_search.fit(X_train, y_train)
print("\n Best estimator:")
print(random_search.best_estimator_)
print("\n Best hyperparameters:")
print(random_search.best_params_)
y_pred = random_search.predict_proba(X_test)
results_df = pd.DataFrame(
data={"client_id": test["client_id"], "default": y_pred[:, 1]}
)
results_df.to_csv("submission.csv", index=False)
results_df
# #### GradientBoostingClassifier, ROC AUC = 0.73727
# gbc = GradientBoostingClassifier()
# parameters = {
# "n_estimators":[5,50,250,500],
# "max_depth":[1,3,5,7,9],
# "learning_rate":[0.01,0.1,1,10,100]
# }
# cv = GridSearchCV(gbc, parameters, cv=5)
# cv.fit(X_train, y_train)
# y_pred = cv.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-gbc.csv', index=False)
# results_df
# #### XGBoostClassifier, ROC AUC = 0.74226
# params = {
# 'min_child_weight': [1, 5, 10],
# 'gamma': [0.5, 1, 1.5, 2, 5],
# 'subsample': [0.6, 0.8, 1.0],
# 'colsample_bytree': [0.6, 0.8, 1.0],
# 'max_depth': [3, 4, 5]
# }
# xgb = XGBClassifier(learning_rate=0.02,
# n_estimators=600,
# objective='binary:logistic',
# nthread=1,
# use_label_encoder=False,
# eval_metric='logloss')
# folds = 3
# param_comb = 5
# skf = StratifiedKFold(n_splits=folds,
# shuffle=True,
# random_state=RANDOM_SEED)
# random_search = RandomizedSearchCV(xgb,
# param_distributions=params,
# n_iter=param_comb,
# scoring='roc_auc',
# n_jobs=4,
# cv=skf.split(X_train, y_train),
# verbose=3,
# random_state=RANDOM_SEED)
# random_search.fit(X_train, y_train)
# y_pred = random_search.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-xgb.csv', index=False)
# results_df
# #### AdaBoost, ROC AUC = 0.73713
# model = AdaBoostClassifier()
# define the grid of values to search
# grid = dict()
# grid['n_estimators'] = [10, 50, 100, 500]
# grid['learning_rate'] = [0.0001, 0.001, 0.01, 0.1, 1.0]
# define the evaluation procedure
# cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# define the grid search procedure
# grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
# grid_search.fit(X_train, y_train)
# y_pred = grid_search.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-adaboost.csv', index=False)
# results_df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207362.ipynb
| null | null |
[{"Id": 69207362, "ScriptId": 18379876, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6918837, "CreationDate": "07/28/2021 05:40:59", "VersionNumber": 24.0, "Title": "project_5_credit_scoring", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 989.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 941.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Проект - Credit Scoring
# Первая и самая важная цель банка — заработать деньги. Если банк будет вести слишком жесткую политику, не выдавая кредиты даже тем, кто вернул бы деньги, то он не заработает на процентах. Отказ в кредите хорошему заемщику — ошибка первого рода. Риск, с которым сталкивается кредитная организация — дать кредит неблагонадёжному заёмщику, который впоследствии может допустить дефолт, а банк потеряет деньги — ошибка второго рода.
#
# Чтобы соблюсти баланс, используется кредитный скоринг, который позволяет спрогнозировать вероятность невозврата кредита. Клиентов, обратившихся за кредитом, сортируют по этой предсказанной вероятности (по убыванию), и получается скоркарта — рейтинг клиентов от плохих к хорошим.
# Используя данные о клиенте, работник банка может решить, выдавать ли клиенту кредит, и если да, то под какой процент.
# При этом используемый в кредитном скоринге алгоритм позволяет предсказывать значения непрерывной зависимой переменной на интервале от до . Банки самостоятельно определяют для себя значения, при которых они принимают решение об отказе в кредите.
# Цель проекта - выбрать наиболее эффективную модель оценки качества клиентов банка.
import pandas as pd
import numpy as np
import pandas_profiling
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import (
LabelEncoder,
OneHotEncoder,
StandardScaler,
RobustScaler,
)
from sklearn.decomposition import PCA
from sklearn.feature_selection import f_classif, mutual_info_classif
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import ExtraTreeClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import auc, roc_auc_score, roc_curve
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.metrics import confusion_matrix, ConfusionMatrixDisplay
from catboost import CatBoost, CatBoostClassifier, Pool
from catboost.utils import get_roc_curve
import lightgbm
from sklearn.model_selection import (
StratifiedKFold,
cross_val_score,
train_test_split,
GridSearchCV,
RandomizedSearchCV,
RepeatedStratifiedKFold,
)
from xgboost import XGBClassifier
# settings to display all columns
pd.set_option("display.max_columns", None)
RANDOM_SEED = 42
# from google.colab import drive
# drive.mount('/content/drive', force_remount=True)
# dir_data = './drive/MyDrive/Stud/SkillFactory/Unit_5/project_5/'
def get_boxplot(df, col):
fig, axes = plt.subplots(figsize=(14, 4))
sns.boxplot(x="default", y=col, data=df[df["sample"] == 1], ax=axes)
axes.set_title("Boxplot for " + col)
plt.show()
def age_to_cat(age):
if age <= 28:
cat_age = 0
return cat_age
if 28 < age <= 35:
cat_age = 1
return cat_age
if 35 < age <= 50:
cat_age = 2
return cat_age
if age > 50:
cat_age = 3
return cat_age
def show_metrics(y_test, y_pred, probs):
print("accuracy_score:", accuracy_score(y_test, y_pred))
print("precision_score:", precision_score(y_test, y_pred, zero_division=0))
print("recall_score:", recall_score(y_test, y_pred, zero_division=0))
print("f1_score:", f1_score(y_test, y_pred, zero_division=0))
print("roc_auc_score:", roc_auc_score(y_test, probs))
def show_basic_models(df):
train_df = df.query("sample == 1").drop(["sample", "client_id"], axis=1)
X = train_df.drop(["default"], axis=1).values
y = train_df["default"].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=RANDOM_SEED
)
# models with default settings
lr = LogisticRegression(max_iter=1000) # fix ConvergenceWarning
tree = DecisionTreeClassifier()
extra_tree = ExtraTreeClassifier()
ada = AdaBoostClassifier()
bagg = BaggingClassifier()
extra_ens = ExtraTreesClassifier()
gboost = GradientBoostingClassifier()
rforest = RandomForestClassifier()
catboo = CatBoostClassifier(silent=True) # silent=True to hide logs
models = [lr, tree, extra_tree, ada, bagg, extra_ens, gboost, rforest, catboo]
for model in models:
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
probs = probs[:, 1]
# zero_division=0 to fix zero division Warning
print("Results for:", model)
show_metrics(y_test, y_pred, probs)
print("---------")
print()
def compute_selected_model(model):
model.fit(X_train, y_train)
y_pred = model.predict(X_test)
probs = model.predict_proba(X_test)
probs = probs[:, 1]
show_metrics(y_test, y_pred, probs)
return y_pred, probs
path = "/kaggle/input/sf-dst-scoring/"
train = pd.read_csv(path + "train.csv")
test = pd.read_csv(path + "test.csv")
sample = pd.read_csv(path + "sample_submission.csv")
# train = pd.read_csv('train.csv')
# test = pd.read_csv('test.csv')
# sample = pd.read_csv('sample_submission.csv')
# train = pd.read_csv(dir_data+'train.csv')
# test = pd.read_csv(dir_data+'test.csv')
# sample = pd.read_csv(dir_data+'sample_submission.csv')
# profile = train.profile_report()
# profile.to_file('data_report.html')
print(train.info())
print()
print("Train size: ", train.shape)
print()
train.head()
print(test.info())
print()
print("Test size: ", test.shape)
print()
test.head(5)
print(sample.info())
print()
print("Sample size: ", sample.shape)
print()
sample.head(5)
# ## Описания полей
# * client_id - идентификатор клиента
# * education - уровень образования
# * sex - пол заемщика
# * age - возраст заемщика
# * car - флаг наличия автомобиля
# * car_type - флаг автомобиля иномарки
# * decline_app_cnt - количество отказанных прошлых заявок
# * good_work - флаг наличия “хорошей” работы
# * bki_request_cnt - количество запросов клиента в БКИ о своей кредитной истории
# * home_address - категоризатор домашнего адреса
# * work_address - категоризатор рабочего адреса
# * income - доход заемщика
# * foreign_passport - наличие загранпаспорта
# * sna - связь заемщика с клиентами банка
# * first_time - давность наличия информации о заемщике
# * score_bki - скоринговый балл по данным из БКИ
# * region_rating - рейтинг региона
# * app_date - дата подачи заявки
# * default - флаг дефолта по кредиту
# посмотрим на целевую переменную
train["default"].value_counts().plot(
kind="barh",
title='Distribution of "Default" feature',
ylabel="cases",
xlabel="default",
)
# Note: Выборка несбалансированная.
# объединим датасеты для удобства предварительной обработки
train["sample"] = 1 # train
test["sample"] = 0 # test
test["default"] = -1
data = pd.concat([train, test], ignore_index=True)
# num of unique values, first 10 unique values, null values count, type
data_agg = (
data.agg({"nunique", lambda s: s.unique()[:10]})
.append(pd.Series(data.isnull().sum(), name="null"))
.append(pd.Series(data.dtypes, name="dtype"))
.transpose()
)
data_agg
print(data.info())
print()
print(data.shape)
# ## EDA
# * В наборе даных 19 признаков
# * Всего представлено 110 148 клиентоа
# * Количество пропусков 0.04%, только в признаке education
# * Дубликатов нет
# * client_id не имеет повторяющихся значений, все значения уникальные
# * app_date только 120 вариантов признака (0.1%). Большинство данных за период февраль-апрель 2014 года
# * education содержит 5 категорий:
# 1. SCH (52%) - School;
# 2. GRD (31%) - Graduated (Master degree);
# 3. UGR (13%) - UnderGraduated (Bachelor degree);
# 4. PGR (1.7%) - PostGraduated;
# 5. ACD (0.3%) - Academic Degree.
# * sex содержит 2 вариации признака:
# 1. Female (56%);
# 2. Male (44%);
# * age представлен конкретными значениями со смещением влево:
# -- Minimum 21
# -- median 37
# -- Mean 39.2
# -- Maximum 72
# -- Interquartile range (IQR) 18
# * car бинарный признак, 67% заемщиков не имеют автомобиля
# * car_type бинарный признак, показывающий отечественный или иностранный автомобиль у заемщика. 81% заемщиков имеют отечественный автомобиль
# decline_app_cnt модержит конкретные значения со смещением влево. Большинство значений (83%) нулевые. Преобладающее большинство наблюдений в промежутке от 0 до 6. Можно попробовать преобразовать признак в категориальный
# -- Maximum 33
# * good_work Mбольшинство заемщиков не имеют хорошей работы (83%)
# * score_bki 93% значений уникальны, распределение нормальное, присутствуют отрицательные значения - возможно, здесь уже применялся scaler
# * bki_request_cnt натуральыне числа, которые варбируются от 0 до 53 с медианой 1. Большинство значений в промежутке от 0 до 8
# * region_rating варбируются между 20 и 80. Категориальный признак. Самое часто встречающееся значение 50 (37%)
# * home_address, work_address категориальные признаки с 3 вариациями.
# * income большой разброс значений от 1000 до 1000000; можно попробовать либо превратить в категориальный признак, либо прологарифмировать
# * sna / first_time категориальные признаки с 4 вариациями значений
# * foreign_passport бинарный признак, 67% заемщиков имеют заграничный паспорт
# * default целевой признак. Бинарный признак с подавляющим большинством тех, кто возвращает кредит без проблем. Выборка несбалансированная, при моделировании нужно будет попробовать undersampling.
# exclude client_id, app_date, default, sample
num_cols = ["age", "decline_app_cnt", "score_bki", "income", "bki_request_cnt"]
cat_cols = [
"education",
"work_address",
"home_address",
"region_rating",
"sna",
"first_time",
]
bin_cols = ["sex", "car", "car_type", "good_work", "foreign_passport"]
# ## Количественные признаки
# посмотрим на распределение количественных признаков
for i in num_cols:
plt.figure()
sns.displot(data[i][data[i] > 0].dropna(), kde=False, rug=False)
plt.title(f"Distribution of {i}")
plt.show()
# посмотрим на выбросы
# и распределение целевой переменной между количественными признаками
for col in num_cols:
get_boxplot(data, col)
# Краткие выводы:
# * дефолт больше характерен для более молодых
# * высокий score_bki характерен для дефолта
# * рейтинг региона влияет на целевую переменную
# * частые запросы в БКИ характерны для менее уверенных клиентов и могут сигнализировать дефолтную ситуацию
# * в среднем, более высокий доход свидетельствует о меньшей вероятности дефолта
# ## Категориальные признаки
# Клиенты с более низким уровнем образования чаще подвержены дефолту. В то же время, они чаще берут кредиты.
# Чем ниже рейтинг региона, тем чаще возникает дефолт по кредитам. Но это неверно для двух наименее рейтинговых регионов.
# Домашний и рабочий адреса распределены схожим образом. Возможно, есть мультиколлинеарность.
# Категории sna: ниже категория - меньше дефолтов. First_time - аналогично.
data.education.value_counts().plot(
kind="bar",
figsize=(8, 6),
color="r",
title="Distribution of clients by Education",
xlabel="education level",
ylabel="number of clients",
)
print("Пропущенные значения:", data.education.isna().sum())
print()
# заполним пропуски наиболее часто встречающимся значением
data.education = data.education.fillna("SCH")
# оценим доход от уровня образования
plt.figure(figsize=(15, 8))
plt.title("Distribution of Income by Education level")
sns.boxplot(x="education", y="income", data=data, showfliers=False)
# оценим влияние региона проживания на уровень образования
plt.figure(figsize=(15, 8))
plt.title("Distribution of Education level by Region")
sns.boxplot(x="education", y="region_rating", data=data, showfliers=False)
# Люди с более высоким уровнем образования живут в регионах с более высоким рейтингом. И наоборот.
# посмотрим на распределение дефолтных состояний по различным признакам
plt.figure(figsize=[20, 20])
i = 1
for k in cat_cols:
plt.subplot(4, 3, i)
sns.barplot(
x=k,
y="proportion",
hue="default",
data=data[[k, "default"]]
.value_counts(normalize=True)
.rename("proportion")
.reset_index(),
)
plt.title("Clients default distribution according to\n" + k, fontsize=15)
i += 1
plt.tight_layout()
plt.show()
# закодируем категориальные признаки
mappc = {}
label_encoder = LabelEncoder()
for col in cat_cols:
data[col] = label_encoder.fit_transform(data[col])
mappc[col] = dict(enumerate(label_encoder.classes_))
print(mappc)
# ## Бинарные признаки
# Женщины чаще берут кредиты, чем мужчины. Относительное количество дефолтов при этом практически одинаковое.
# Заемщики, у которых есть машина, более надежны.
# Заемщики, у которых иностранный автомобиль, более надежны, чем заемщики с отечественными машинами. Далее попробуем объединить два признака и сделать новый категориальный - не имеет машины, имеет отечественную машину, имеет иностранную машину.
# Заемщики с хорошей работой и заграничным паспортом возвращают долг чаще, чем противоположные группы.
# посмотрим на распределение дефолтных состояний по различным признакам
plt.figure(figsize=[20, 20])
i = 1
for k in bin_cols:
plt.subplot(4, 3, i)
sns.barplot(
x=k,
y="proportion",
hue="default",
data=data[[k, "default"]]
.value_counts(normalize=True)
.rename("proportion")
.reset_index(),
)
plt.title("Clients default distribution according to\n" + k, fontsize=15)
i += 1
plt.tight_layout()
plt.show()
# закодируем бинарные признаки
mapp = {}
label_encoder = LabelEncoder()
for col in bin_cols:
data[col] = label_encoder.fit_transform(data[col])
mapp[col] = dict(enumerate(label_encoder.classes_))
print(mapp)
# ## Корреляционный анализ
# Существенная корреляция между домашним адресом и местом работы. Можно попробовать удалить один из признаков.
# Сильная корреляция между car, car_type. Отсутствие автомобиля и соответственное отсутствие признака, что автомобиль иностранный. Далее скомбинируем два столбца в один, преобразовав в новый категориальный признак. Так мы уменьшим размерность набора данных, но не потеряем информацию.
# Есть довольно сильная обратная зависимость между sna, first_time. Объяснить эти признаки можно попробовать так: чем дольше человек является клиентом банка, тем больше у него отношений с другими клиентами внутри банка (например, поручители) и тем ниже риск дефолта.
# Наиболее статистически значимые признаки - Score_bki, decline_app_cnt. Мультиколлинеарность в целом отсутствует, что подтверждается рангом матрицы. Значение определителя позволяет сделать предположение о наличиии обратной матрицы.
plt.title("Correlation Matrix of dataset features")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (numerical)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[num_cols].corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (categorical)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[cat_cols].corr(), vmin=-1, vmax=1, annot=True)
plt.title("Correlation Matrix of dataset features (binary)")
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data[bin_cols].corr(), vmin=-1, vmax=1, annot=True)
# ## Наивная модель
# Ожидаемо, результаты простых классификаторов (LogisticRegression, DecisionTreeClassifier, ExtraTreeClassifier) оказалось значительно хуже (ROC AUC в пределах 0.54-0.55), чем результаты ансамблей и специализированных моделей (результаты ROC_AUC в пределах 0.63-0.73 для AdaBoostClassifier(), BaggingClassifier(), ExtraTreesClassifier(), GradientBoostingClassifier(), RandomForestClassifier(), CatBoostClassifier()).
# Вернемся к тестированию качества моделей после генерации новых признаков и исключения признаков, обладающих высокой парной корреляцией.
# временно удалим столбец,
# вернемся к его обработке при создании новых признаков
df = data.drop(["app_date"], axis=1)
show_basic_models(df)
# ## Создание признаков
# ### Декомпозиция адресов
# Как мы установили ранее, home_address and work address обладают сильной корреляцией. Уменьшим размерность матрицы признаков с помощью sklearn.decomposition.
# получим кол-во дней от "начала эпохи" датасета
data["app_date"] = pd.to_datetime(data["app_date"], format="%d%b%Y")
data_min = min(data["app_date"])
data["days"] = (data["app_date"] - data_min).dt.days.astype("int")
data["day"] = data["app_date"].dt.day
data["month"] = data["app_date"].dt.month
data.drop(["app_date"], axis=1, inplace=True)
# средний доход для конкретного возраста
mean_income = data.groupby("age")["income"].mean().to_dict()
data["mean_income_age"] = data["age"].map(mean_income)
# максимальный доход для конкретного возраста
max_income = data.groupby("age")["income"].max().to_dict()
data["max_income_age"] = data["age"].map(max_income)
# минимальный доход для конкретного возраста
min_income = data.groupby("age")["income"].min().to_dict()
data["min_income_age"] = data["age"].map(min_income)
# нормализуем доход
data["normalized_income"] = abs(
(data.income - data.mean_income_age) / data.max_income_age
)
data.drop(["mean_income_age", "max_income_age"], axis=1, inplace=True)
# среднее кол-во запросов в БКИ по конкретному возрасту
mean_bki = data.groupby("age")["bki_request_cnt"].mean().to_dict()
data["mean_requests_age"] = data["age"].map(mean_bki)
# максимальное кол-во запросов в БКИ по конкретному возрасту
max_bki = data.groupby("age")["bki_request_cnt"].max().to_dict()
data["max_requests_age"] = data["age"].map(max_bki)
# нормализуем requests
data["normalized_req"] = abs(
(data.bki_request_cnt - data.mean_requests_age) / data.max_requests_age
)
data.drop(["mean_requests_age", "max_requests_age"], axis=1, inplace=True)
# среднее кол-во запросов в БКИ в зависимости от дохода
mean_bki_inc = data.groupby("income")["bki_request_cnt"].mean().to_dict()
data["mean_requests_income"] = data["income"].map(mean_bki_inc)
# средний доход по региону
mean_income_rat = data.groupby("region_rating")["income"].mean().to_dict()
data["mean_income_region"] = data["region_rating"].map(mean_income_rat)
data.drop(["income"], axis=1, inplace=True)
# сократим размерность матрицы без потери информации
# 0 - нет машины, 1 - есть отечественна машина, 2 - есть иномарка
data["car_comb"] = data["car"] + data["car_type"]
data["car_comb"] = data["car_comb"].astype("category")
data.drop(["car", "car_type"], axis=1, inplace=True)
# возраст разделим на четыре категории
data["age_cat"] = 0 # создадим пустой столбец для нового признака
data["age_cat"] = data["age"].apply(lambda x: age_to_cat(x))
data.drop("age", axis=1, inplace=True)
label_encoder = LabelEncoder()
data["age_cat"] = label_encoder.fit_transform(data["age_cat"])
# Sort out decline_app_cnt and bki_request_cnt by groups:
data["decline_cat"] = data["decline_app_cnt"].apply(lambda x: 4 if x >= 4 else x)
data["bki_request_cat"] = data["bki_request_cnt"].apply(lambda x: 6 if x >= 6 else x)
data.drop(["decline_app_cnt", "bki_request_cnt"], axis=1, inplace=True)
# Декомпозиция адресов
# вытащим два столбца из датасета
data_addresses = data[["work_address", "home_address"]].values
# создадим Scaler
scaler = StandardScaler()
scaled_data = scaler.fit_transform(data_addresses)
# У нас два вектора. Сократим до одного, оставив наиболее значимую информацию.
pca = PCA(n_components=1)
pca.fit(scaled_data)
pca_data = pca.transform(scaled_data)
data["pca_address"] = pca_data
data["pca_address"] = data["pca_address"] + 5
data["pca_address"] = data["pca_address"].apply(lambda x: np.log(x) + 1)
# удалим ненужные столбцы
data.drop(["home_address", "work_address"], axis=1, inplace=True)
# fix 2 zeros in normalized_req
data = data.fillna(data.mean())
# обновим списки признаков в переменных для обработки
# exclude day, month (high correlation)
num_cols = [
"score_bki",
"days",
"min_income_age",
"normalized_income",
"normalized_req",
"mean_requests_income",
"mean_income_region",
]
cat_cols = [
"education",
"region_rating",
"sna",
"first_time",
"car_comb",
"age_cat",
"decline_cat",
"bki_request_cat",
"pca_address",
]
bin_cols = [
"sex",
"good_work",
"foreign_passport",
]
# ## Выбросы и стандартизация
for col in num_cols:
median = data[col].median()
IQR = data[col].quantile(0.75) - data[col].quantile(0.25)
perc25 = data[col].quantile(0.25)
perc75 = data[col].quantile(0.75)
print("Column: ", col)
print(
"25%: {},".format(perc25),
"75%: {},".format(perc75),
"IQR: {}, ".format(IQR),
"Borderline: [{f}, {l}].".format(f=perc25 - 1.5 * IQR, l=perc75 + 1.5 * IQR),
)
print()
# replace outliers with border-values
data[col].loc[data[col] < (perc25 - 1.5 * IQR)] = perc25 - 1.5 * IQR
data[col].loc[data[col] > (perc75 + 1.5 * IQR)] = perc75 + 1.5 * IQR
scaler = RobustScaler()
data[num_cols] = scaler.fit_transform(data[num_cols].values)
data.info()
data.head()
# ## Моделирование после добавления признаков
# Настройка моделей еще не проведена, использовались стандартные настройки.
# Результаты по ROC AUC для DecisionTreeClassifier, ExtraTreeClassifier остались на прежнем уровне в 0.54
# В то же время, метрика LogisticRegression увеличена с 0.55 до 0.739.
# Лучший результат после добавления новых признаков у GradientBoostingClassifier - 0.7418.
# Метрики AdaBoostClassifier(), BaggingClassifier(), ExtraTreesClassifier(), RandomForestClassifier(), CatBoostClassifier() - в диапазоне 0.65-0.73.
# Вернемся к тестированию качества моделей после One-Hot Encoding категориальных признаков и исключения попарно коррелирующихся признаков.
show_basic_models(data)
# ## One-Hot Encoding для категориальных признаков
data = pd.get_dummies(data, prefix=cat_cols, columns=cat_cols)
data.info()
# ### Корреляционный анализ после генерации новых признаков
plt.title("Correlation Matrix of dataset features")
plt.rcParams["figure.figsize"] = (30, 20)
sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=False)
# show features with corr > 0.7
corr = data.corr().abs()
corr_table = corr.unstack()
corr_table_sorted = corr_table.sort_values(kind="quicksort", ascending=False)
corr_exclude_ones = corr_table_sorted[corr_table_sorted != 1]
high_corr = corr_exclude_ones[corr_exclude_ones >= 0.7]
high_corr
# exclude features due high corr
data.drop(
["day", "month", "region_rating_6", "decline_cat_1", "education_3"],
axis=1,
inplace=True,
)
# ## Моделирование после One-Hot Encoding
# Настройка моделей еще не проведена, использовались стандартные настройки.
# Результаты по ROC AUC для DecisionTreeClassifier, ExtraTreeClassifier остались на прежнем уровне в 0.53-0.54
# В то же время, метрика LogisticRegression увеличена с 0.739 до 0.746 (теперь это лучшая модель).
# Также, метрика выше 0.74 у GradientBoostingClassifier и AdaBoostClassifier.
# Метрики BaggingClassifier(), ExtraTreesClassifier(), RandomForestClassifier(), CatBoostClassifier() - в диапазоне 0.65-0.73.
# Вернемся к тестированию качества моделей после оценки статистической значимости признаков и исключения малозначимых фич.
show_basic_models(data)
# ## Статистическая значимость признаков
data_temp = data.loc[data["sample"] == 1].drop(["client_id", "sample"], axis=1)
imp_num = pd.Series(
f_classif(data_temp[num_cols], data_temp["default"])[0], index=num_cols
)
imp_num.sort_values(inplace=True)
imp_num.plot(kind="barh", color="pink", title="Numeric Features Importance")
imp_num
imp_bin = pd.Series(
mutual_info_classif(
data_temp[bin_cols], data_temp["default"], discrete_features=False
),
index=bin_cols,
)
imp_bin.sort_values(inplace=True)
imp_bin.plot(kind="barh", color="pink", title="Binary Features Importance")
imp_bin
# update cat_cols list of columns
cols = list(data.columns)
num_bin_cols = [col for col in cols if (col in bin_cols or col in num_cols)]
cat_cols = [col for col in cols if col not in num_bin_cols]
cat_cols.remove("client_id")
cat_cols.remove("default")
cat_cols.remove("sample")
imp_cat = pd.Series(
mutual_info_classif(
data_temp[cat_cols], data_temp["default"], discrete_features=False
),
index=cat_cols,
)
imp_cat.sort_values(inplace=True)
imp_cat.plot(kind="barh", color="pink", title="Categorical Features Importance")
imp_cat
# ## Моделирование после исключения малозначимых признаков
# После исключения малозначимых признаков по-прежнему лучшие метрики ROC AUC показывает LogisticRegression (0.746), далее следуют GradientBoostingClassifier (0.743) и AdaBoostClassifier (0.740). Остальные модели демонстрируют метрику ниже 0.74.
# За базовую возьмем LogisticRegression и будем настраивать гиперпараметры этого варианта классификатора.
cat_cols_to_drop = list(imp_cat[imp_cat == 0].index)
data.drop(cat_cols_to_drop, axis=1, inplace=True)
show_basic_models(data)
# ## Промежуточные выводы
# Бинарные признаки:
# * Car и car_type сильно взаимозависимы. Объединены в один признак car_comb с тремя характеристиками.
# * Количество должников среди мужчин и женщин примерно одинаковое, но женщины берут кредиты чаще.
# Категориальные признаки:
# * Люди со слабым образованием возвращают кредиты реже, чем люди с сильным образованием.
# * Чем больше отношений у клиента с другими клиентами в банке - тем лучше и меньше просроченных кредитов.
# * Люди с более крутым образованием живут в регионах с более высоким рейтингом.
# * Чем выше рейтинг региона, тем ниже риск дефолта.
# * Имеется корреляция между sna и first_time, Home и work addresses (признак заменен на объединенный посредством PCA).
# Numerical:
# * score_bki имеет распределение, близкое к нормальному
# * В данных есть выбросы. Устранены через преобразование признаков в категориальные, логарифмирование или с использованием Scaler
# * Между количественными признаками нет сильных корреляций
# * Наличие иномарки коррелирует с уровнем дохода
# * Количество связей с другими клиентами банка коррелирует с наличием заграничного паспорта
# Наиболее статистически значимые признаки:
# * sna
# * pca_address (home & work addresses)
# * first_time
# * score_bki
# * mean_income_region
# Поскольку мы имеем достаточно много неочевидных корреляций между признаками, стоит попробовать использовать logistic regressions (при этом descicion tree models показали довольно слабые результаты).
# ## Моделирование
# Сравним logisticRegression со стандартными настройками и настройкой class_weight='balanced'.
train_df = data.query("sample == 1").drop(["sample", "client_id"], axis=1)
test_df = data.query("sample == 0").drop(["sample", "client_id"], axis=1)
X = train_df.drop(["default"], axis=1).values
y = train_df["default"].values
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=RANDOM_SEED
)
# default model
lr = LogisticRegression(max_iter=500)
y_pred, probs = compute_selected_model(lr)
# 0.7466261107147693
# penalty=none is stronger than penalty=l2
lr = LogisticRegression(penalty="none", max_iter=1000)
y_pred, probs = compute_selected_model(lr)
# 0.7466358446849413
# multi_class=multinominal is weaker than auto or ovr (them equal)
lr = LogisticRegression(penalty="l2", max_iter=1000, multi_class="ovr")
y_pred, probs = compute_selected_model(lr)
# 0.7466261107147693
# higher max_iter - higher metrics (costs time)
lr = LogisticRegression(penalty="l2", max_iter=1500, solver="sag")
y_pred, probs = compute_selected_model(lr)
# 0.7466335593180313
# saga is weaker than sag with equal max_iter
# both sag and saga weaker than default solver
lr = LogisticRegression(penalty="l2", max_iter=1500, solver="saga")
y_pred, probs = compute_selected_model(lr)
# 0.7466332207451557
# balanced is weaker than default settings in roc_auc
# but f1_score is significant stronger
lr = LogisticRegression(class_weight="balanced", max_iter=500)
y_pred, probs = compute_selected_model(lr)
# 0.7461245996428649
# ### Краткие выводы по LogReg
# 1. Настройка сбалансированного веса классов ухудшает метрику ROC AUC по сравнению с настройками по умолчанию (f1_score, при этом, улучшается существенно).
# 2. Solvers sag, saga - слабее, чем solver по умолчанию (lbfgs).
# 3. На заданном наборе данных penalty=none эффективнее penalty=l2.
# 4. Мультикласс-настройка multinominal формирует более слабую модель. Ovr формирует идентичную модель с настройкой по умолчанию - auto.
# ## Оценка ROC AUC и других метрик
# best LogReg model from previous chapter
lr = LogisticRegression(penalty="none", max_iter=1000)
y_pred, probs = compute_selected_model(lr)
fpr, tpr, threshold = roc_curve(y_test, probs)
roc_auc = roc_auc_score(y_test, probs)
plt.figure()
plt.plot([0, 1], label="Baseline", linestyle="--")
plt.plot(fpr, tpr, label="Regression")
plt.title("Logistic Regression ROC AUC = %0.3f" % roc_auc)
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
plt.show()
cm = confusion_matrix(y_test, y_pred)
cmd = ConfusionMatrixDisplay(cm, display_labels=["non_default", "default"])
cmd.plot()
cmd.ax_.set(xlabel="Predicted", ylabel="True")
# LogisticRegression на стандартных настройках имеет большцю ROC AUC, чем с настройкой class_weight=balanced. В то же время, f1_score выше у class_weight=balanced.
# Были проведены эксперименты с различными классификаторами (XGBoost, GradientBoostingClassifier, AdaBoost - в ранних версиях каггл-ноутбуков), и, например, XGBoostClassifier с подбором параметров через RandomizedSearchCV давал более эффективные результаты по ROC AUC. тем не менее, в учебных целях и для чистоты эксперимента по сравниванию настроек по умолчанию и настроек, получаемых методами подбора гиперпараметров, осуществим эту работу для LogisticRegression.
# ## Подбор гиперпараметров
train_data = data.query("sample == 1").drop(["sample", "client_id"], axis=1)
test_data = data.query("sample == 0").drop(["sample", "client_id"], axis=1)
X_train = train_data.drop(["default"], axis=1)
y_train = train_data.default.values
X_test = test_data.drop(["default"], axis=1)
y_test = test_data.default.values
# Penalty - Used to specify the norm used in the penalization
# C - Inverse of regularization strength;
# must be a positive float.
# Like in support vector machines, smaller values specify stronger regularization.
# solver - Algorithm to use in the optimization problem.
params = {
"penalty": ["l1", "l2", "elasticnet"],
"C": np.logspace(-4, 4, 20),
"solver": ["newton-cg", "lbfgs", "liblinear", "sag", "saga"],
}
lr = LogisticRegression(max_iter=1000, class_weight="balanced")
folds = 3
param_comb = 5
skf = StratifiedKFold(n_splits=folds, shuffle=True, random_state=RANDOM_SEED)
random_search = RandomizedSearchCV(
lr,
param_distributions=params,
n_iter=param_comb,
scoring="roc_auc",
n_jobs=4,
cv=skf.split(X_train, y_train),
verbose=3,
random_state=RANDOM_SEED,
)
random_search.fit(X_train, y_train)
print("\n Best estimator:")
print(random_search.best_estimator_)
print("\n Best hyperparameters:")
print(random_search.best_params_)
y_pred = random_search.predict_proba(X_test)
results_df = pd.DataFrame(
data={"client_id": test["client_id"], "default": y_pred[:, 1]}
)
results_df.to_csv("submission.csv", index=False)
results_df
# #### GradientBoostingClassifier, ROC AUC = 0.73727
# gbc = GradientBoostingClassifier()
# parameters = {
# "n_estimators":[5,50,250,500],
# "max_depth":[1,3,5,7,9],
# "learning_rate":[0.01,0.1,1,10,100]
# }
# cv = GridSearchCV(gbc, parameters, cv=5)
# cv.fit(X_train, y_train)
# y_pred = cv.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-gbc.csv', index=False)
# results_df
# #### XGBoostClassifier, ROC AUC = 0.74226
# params = {
# 'min_child_weight': [1, 5, 10],
# 'gamma': [0.5, 1, 1.5, 2, 5],
# 'subsample': [0.6, 0.8, 1.0],
# 'colsample_bytree': [0.6, 0.8, 1.0],
# 'max_depth': [3, 4, 5]
# }
# xgb = XGBClassifier(learning_rate=0.02,
# n_estimators=600,
# objective='binary:logistic',
# nthread=1,
# use_label_encoder=False,
# eval_metric='logloss')
# folds = 3
# param_comb = 5
# skf = StratifiedKFold(n_splits=folds,
# shuffle=True,
# random_state=RANDOM_SEED)
# random_search = RandomizedSearchCV(xgb,
# param_distributions=params,
# n_iter=param_comb,
# scoring='roc_auc',
# n_jobs=4,
# cv=skf.split(X_train, y_train),
# verbose=3,
# random_state=RANDOM_SEED)
# random_search.fit(X_train, y_train)
# y_pred = random_search.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-xgb.csv', index=False)
# results_df
# #### AdaBoost, ROC AUC = 0.73713
# model = AdaBoostClassifier()
# define the grid of values to search
# grid = dict()
# grid['n_estimators'] = [10, 50, 100, 500]
# grid['learning_rate'] = [0.0001, 0.001, 0.01, 0.1, 1.0]
# define the evaluation procedure
# cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1)
# define the grid search procedure
# grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='roc_auc')
# execute the grid search
# grid_search.fit(X_train, y_train)
# y_pred = grid_search.predict_proba(X_test)
# results_df = pd.DataFrame(data={'client_id':test['client_id'], 'default':y_pred[:,1]})
# results_df.to_csv('submission-adaboost.csv', index=False)
# results_df
| false | 0 | 13,181 | 0 | 13,181 | 13,181 |
||
69207594
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import os
# # Functions for visualising encoded distance based observations
def summarise_deltas(df):
data = df["distance_delta"]
mean = data.mean()
std = data.std()
return mean, std
def calculate_improved_frequency(df):
improved_frequency = len(df[df.improved == True]) / len(df)
return improved_frequency
def plot_distance_delta_distribution(df, method_name):
fig, ax = plt.subplots(figsize=(10, 10))
fig.suptitle("Plot showing the distribution of the distance delta")
sns.histplot(df["distance_delta"], kde=True, ax=ax)
ax.set_xlabel("d_image_original_label - d_image_optimised_label")
plt.axvline(0, c="black", linewidth=3)
if not os.path.exists(method_name):
os.makedirs(method_name)
fig.savefig(f"{method_name}/distance_delta_distribution.png")
plt.show()
def plot_distance_distributions(df):
fig, ax = plt.subplots(2, figsize=(10, 10), sharex=True, sharey=True)
fig.suptitle("Plot showing distribution of distances from each label to the image")
fig.tight_layout(pad=4)
sns.histplot(df["d_image_original_label"], kde=True, ax=ax[0], color="r")
sns.histplot(df["d_image_optimised_label"], kde=True, ax=ax[1])
ax[0].set_title(
f"Original label \n Mean = {round(df['d_image_original_label'].mean(),3)} \n Standard Deviation = {round(df['d_image_original_label'].std(),3)}"
)
ax[1].set_title(
f"Optimised label \n Mean = {round(df['d_image_optimised_label'].mean(),3)} \n Standard Deviation = {round(df['d_image_optimised_label'].std(),3)}"
)
plt.show()
def assess_distances(df, method_name):
plot_distance_distributions(df)
plot_distance_delta_distribution(df, method_name)
print(
f"The method improved the label in {round(100*calculate_improved_frequency(df), 4)}% of cases"
)
mean, std = summarise_deltas(df)
print(f"The mean decrease is {round(mean, 3)} and the std is {round(std,3)}")
sum_of_distances_original = df["d_image_original_label"].sum()
sum_of_distances_optimised = df["d_image_optimised_label"].sum()
print(
f"Sum of all distances reduced by {round(((sum_of_distances_original - sum_of_distances_optimised)/sum_of_distances_original)*100, 3)}%"
)
# # Functions for visualising accuracy and confidence based observations
def plot_confidence_distributions(df, method_name):
correct_df = df[(df.correct == True)]
incorrect_df = df[(df.correct == False)]
fig, ax = plt.subplots(2, figsize=(10, 10), sharex=True)
fig.suptitle(
"Plots of distributions of model confidence for accurate and inaccurate predictions"
)
fig.tight_layout(pad=4)
sns.histplot(correct_df["confidence"], kde=True, ax=ax[0])
ax[0].set_title(
f"Accurate predictions \n Mean = {round(correct_df['confidence'].mean(),3)} \n Standard Deviation = {round(correct_df['confidence'].std(),3)}"
)
sns.histplot(incorrect_df["confidence"], kde=True, ax=ax[1], color="r")
ax[1].set_title(
f"Inaccurate predictions \n Mean = {round(incorrect_df['confidence'].mean(),3)} \n Standard Deviation = {round(incorrect_df['confidence'].std(),3)}"
)
if not os.path.exists(method_name):
os.makedirs(method_name)
fig.savefig(f"{method_name}/confidence_distributions.png")
plt.show()
def get_accuracy(df):
accs = len(df.loc[df.correct == True])
total = len(df)
return accs / total
baseline_accuracy = get_accuracy(
pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_performance.csv"
)
)
def assess_performance(df, method_name, baseline_accuracy=baseline_accuracy):
plot_confidence_distributions(df, method_name)
method_acc = get_accuracy(df)
print(
f"Accuracy = {method_acc*100}% This is {(method_acc - baseline_accuracy)*100}% improvement the baseline"
)
# # Results
# ## Baseline
# label comparrison
baseline = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_label_comparrison.csv"
)
assess_distances(baseline, "baseline")
# model performance
baseline_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_performance.csv"
)
assess_performance(baseline_performance, "baseline")
# ## Control (from CLIP paper)
# label comparrison
control = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/control_results/control_label_comparrison.csv"
)
assess_distances(control, "control")
# model performance
control_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/control_results/control_performance.csv"
)
assess_performance(control_performance, "control")
# ## Zero Shot object categorizer
# label comparrison
ZS_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_label_comparrison.csv"
)
assess_distances(ZS_OC, "ZS_OC")
# model performance
ZS_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_performance.csv"
)
assess_performance(ZS_OC_performance, "ZS_OC")
# ## Simple MLM object categorizer
# label comparrison
simple_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/MLM_OC_results/MLM_OC_label_comparrison.csv"
)
assess_distances(simple_MLM_OC, "simple_MLM_OC")
# model performance
simple_MLM_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/MLM_OC_results/MLM_OC_performance.csv"
)
assess_performance(simple_MLM_OC_performance, "simple_MLM_OC")
# ## Random sampling MLM object categorizer
# label comparrison
random_sampling_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/random_MLM_OC_results/random_MLM_OC_label_comparrison.csv"
)
assess_distances(random_sampling_MLM_OC, "random_sampling_OC")
# model performance
random_sampling_MLM_OC_storage = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/random_MLM_OC_results/random_MLM_OC_performance.csv"
)
assess_performance(random_sampling_MLM_OC_storage, "random_sampling_OC")
# ## SBERT object categorizer
# label comparrison
SBERT_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_label_comparrison.csv"
)
assess_distances(SBERT_OC, "SBERT_OC")
# model performance
SBERT_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_performance.csv"
)
assess_performance(SBERT_OC_performance, "SBERT_OC")
# ## SBERT enhanced sampling MLM object categorizer
# label comparrison
SBERT_sampling_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/SBERT_MLM_OC_results/SBERT_MLM_OC_label_comparrison.csv"
)
assess_distances(SBERT_sampling_MLM_OC, "SBERT_sampling_MLM_OC")
# model performance
SBERT_sampling_MLM_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/SBERT_MLM_OC_results/SBERT_MLM_OC_performance.csv"
)
assess_performance(SBERT_sampling_MLM_OC_performance, "SBERT_sampling_MLM_OC")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207594.ipynb
| null | null |
[{"Id": 69207594, "ScriptId": 18620458, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2641690, "CreationDate": "07/28/2021 05:45:01", "VersionNumber": 13.0, "Title": "Imagenet1kMini Results Visualisations", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 188.0, "LinesInsertedFromPrevious": 32.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 156.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import os
# # Functions for visualising encoded distance based observations
def summarise_deltas(df):
data = df["distance_delta"]
mean = data.mean()
std = data.std()
return mean, std
def calculate_improved_frequency(df):
improved_frequency = len(df[df.improved == True]) / len(df)
return improved_frequency
def plot_distance_delta_distribution(df, method_name):
fig, ax = plt.subplots(figsize=(10, 10))
fig.suptitle("Plot showing the distribution of the distance delta")
sns.histplot(df["distance_delta"], kde=True, ax=ax)
ax.set_xlabel("d_image_original_label - d_image_optimised_label")
plt.axvline(0, c="black", linewidth=3)
if not os.path.exists(method_name):
os.makedirs(method_name)
fig.savefig(f"{method_name}/distance_delta_distribution.png")
plt.show()
def plot_distance_distributions(df):
fig, ax = plt.subplots(2, figsize=(10, 10), sharex=True, sharey=True)
fig.suptitle("Plot showing distribution of distances from each label to the image")
fig.tight_layout(pad=4)
sns.histplot(df["d_image_original_label"], kde=True, ax=ax[0], color="r")
sns.histplot(df["d_image_optimised_label"], kde=True, ax=ax[1])
ax[0].set_title(
f"Original label \n Mean = {round(df['d_image_original_label'].mean(),3)} \n Standard Deviation = {round(df['d_image_original_label'].std(),3)}"
)
ax[1].set_title(
f"Optimised label \n Mean = {round(df['d_image_optimised_label'].mean(),3)} \n Standard Deviation = {round(df['d_image_optimised_label'].std(),3)}"
)
plt.show()
def assess_distances(df, method_name):
plot_distance_distributions(df)
plot_distance_delta_distribution(df, method_name)
print(
f"The method improved the label in {round(100*calculate_improved_frequency(df), 4)}% of cases"
)
mean, std = summarise_deltas(df)
print(f"The mean decrease is {round(mean, 3)} and the std is {round(std,3)}")
sum_of_distances_original = df["d_image_original_label"].sum()
sum_of_distances_optimised = df["d_image_optimised_label"].sum()
print(
f"Sum of all distances reduced by {round(((sum_of_distances_original - sum_of_distances_optimised)/sum_of_distances_original)*100, 3)}%"
)
# # Functions for visualising accuracy and confidence based observations
def plot_confidence_distributions(df, method_name):
correct_df = df[(df.correct == True)]
incorrect_df = df[(df.correct == False)]
fig, ax = plt.subplots(2, figsize=(10, 10), sharex=True)
fig.suptitle(
"Plots of distributions of model confidence for accurate and inaccurate predictions"
)
fig.tight_layout(pad=4)
sns.histplot(correct_df["confidence"], kde=True, ax=ax[0])
ax[0].set_title(
f"Accurate predictions \n Mean = {round(correct_df['confidence'].mean(),3)} \n Standard Deviation = {round(correct_df['confidence'].std(),3)}"
)
sns.histplot(incorrect_df["confidence"], kde=True, ax=ax[1], color="r")
ax[1].set_title(
f"Inaccurate predictions \n Mean = {round(incorrect_df['confidence'].mean(),3)} \n Standard Deviation = {round(incorrect_df['confidence'].std(),3)}"
)
if not os.path.exists(method_name):
os.makedirs(method_name)
fig.savefig(f"{method_name}/confidence_distributions.png")
plt.show()
def get_accuracy(df):
accs = len(df.loc[df.correct == True])
total = len(df)
return accs / total
baseline_accuracy = get_accuracy(
pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_performance.csv"
)
)
def assess_performance(df, method_name, baseline_accuracy=baseline_accuracy):
plot_confidence_distributions(df, method_name)
method_acc = get_accuracy(df)
print(
f"Accuracy = {method_acc*100}% This is {(method_acc - baseline_accuracy)*100}% improvement the baseline"
)
# # Results
# ## Baseline
# label comparrison
baseline = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_label_comparrison.csv"
)
assess_distances(baseline, "baseline")
# model performance
baseline_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/baseline_results/baseline_performance.csv"
)
assess_performance(baseline_performance, "baseline")
# ## Control (from CLIP paper)
# label comparrison
control = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/control_results/control_label_comparrison.csv"
)
assess_distances(control, "control")
# model performance
control_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/control_results/control_performance.csv"
)
assess_performance(control_performance, "control")
# ## Zero Shot object categorizer
# label comparrison
ZS_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_label_comparrison.csv"
)
assess_distances(ZS_OC, "ZS_OC")
# model performance
ZS_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_performance.csv"
)
assess_performance(ZS_OC_performance, "ZS_OC")
# ## Simple MLM object categorizer
# label comparrison
simple_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/MLM_OC_results/MLM_OC_label_comparrison.csv"
)
assess_distances(simple_MLM_OC, "simple_MLM_OC")
# model performance
simple_MLM_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/MLM_OC_results/MLM_OC_performance.csv"
)
assess_performance(simple_MLM_OC_performance, "simple_MLM_OC")
# ## Random sampling MLM object categorizer
# label comparrison
random_sampling_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/random_MLM_OC_results/random_MLM_OC_label_comparrison.csv"
)
assess_distances(random_sampling_MLM_OC, "random_sampling_OC")
# model performance
random_sampling_MLM_OC_storage = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/random_MLM_OC_results/random_MLM_OC_performance.csv"
)
assess_performance(random_sampling_MLM_OC_storage, "random_sampling_OC")
# ## SBERT object categorizer
# label comparrison
SBERT_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_label_comparrison.csv"
)
assess_distances(SBERT_OC, "SBERT_OC")
# model performance
SBERT_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/ZS_OC_results/ZS_OC_performance.csv"
)
assess_performance(SBERT_OC_performance, "SBERT_OC")
# ## SBERT enhanced sampling MLM object categorizer
# label comparrison
SBERT_sampling_MLM_OC = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/SBERT_MLM_OC_results/SBERT_MLM_OC_label_comparrison.csv"
)
assess_distances(SBERT_sampling_MLM_OC, "SBERT_sampling_MLM_OC")
# model performance
SBERT_sampling_MLM_OC_performance = pd.read_csv(
"../input/imagenet1k-mini-results/Imagenet1KMini Results/SBERT_MLM_OC_results/SBERT_MLM_OC_performance.csv"
)
assess_performance(SBERT_sampling_MLM_OC_performance, "SBERT_sampling_MLM_OC")
| false | 0 | 2,399 | 0 | 2,399 | 2,399 |
||
69207897
|
# ### Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL.Image
# ### Load data
df1 = pd.read_csv("train.csv")
df2 = pd.read_csv("test.csv")
df1.head(10)
df2.head()
df1.shape
df2.shape
df1.isnull().any().sum()
df2.isnull().any().sum()
df1.info()
df2.info()
set((df1.dtypes).to_list())
df1.describe()
df2.describe()
df1["species"].nunique()
# ### Building Model
test_ids = df2.pop("id")
x = df1.drop("species", axis=1)
y = df1["species"]
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_fit = encoder.fit(df1["species"])
y_label = y_fit.transform(df1["species"])
classes = list(y_fit.classes_)
classes
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5)
# ### Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=40)
classifier.fit(x_train, y_train)
pred_train = classifier.predict(x_train)
pred_test = classifier.predict(x_test)
from sklearn.metrics import accuracy_score, confusion_matrix
print(confusion_matrix(y_test, pred_test))
print("Training Accuracy: ", accuracy_score(y_train, pred_train))
print("Testing Accuracy: ", accuracy_score(y_test, pred_test))
x_test = df2.values
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_scaled = sc.fit_transform(x)
x_scaled[:5]
submission = pd.DataFrame(final_predictions, columns=classes)
submission.insert(0, "id", test_ids)
submission.reset_index()
submission.head(10)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207897.ipynb
| null | null |
[{"Id": 69207897, "ScriptId": 18892748, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7632351, "CreationDate": "07/28/2021 05:49:53", "VersionNumber": 1.0, "Title": "Leaf Classification", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 85.0, "LinesInsertedFromPrevious": 85.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import PIL.Image
# ### Load data
df1 = pd.read_csv("train.csv")
df2 = pd.read_csv("test.csv")
df1.head(10)
df2.head()
df1.shape
df2.shape
df1.isnull().any().sum()
df2.isnull().any().sum()
df1.info()
df2.info()
set((df1.dtypes).to_list())
df1.describe()
df2.describe()
df1["species"].nunique()
# ### Building Model
test_ids = df2.pop("id")
x = df1.drop("species", axis=1)
y = df1["species"]
from sklearn.preprocessing import LabelEncoder
encoder = LabelEncoder()
y_fit = encoder.fit(df1["species"])
y_label = y_fit.transform(df1["species"])
classes = list(y_fit.classes_)
classes
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=5)
# ### Random Forest Classifier
from sklearn.ensemble import RandomForestClassifier
classifier = RandomForestClassifier(n_estimators=40)
classifier.fit(x_train, y_train)
pred_train = classifier.predict(x_train)
pred_test = classifier.predict(x_test)
from sklearn.metrics import accuracy_score, confusion_matrix
print(confusion_matrix(y_test, pred_test))
print("Training Accuracy: ", accuracy_score(y_train, pred_train))
print("Testing Accuracy: ", accuracy_score(y_test, pred_test))
x_test = df2.values
from sklearn.preprocessing import StandardScaler
sc = StandardScaler()
x_scaled = sc.fit_transform(x)
x_scaled[:5]
submission = pd.DataFrame(final_predictions, columns=classes)
submission.insert(0, "id", test_ids)
submission.reset_index()
submission.head(10)
submission.to_csv("submission.csv", index=False)
| false | 0 | 553 | 0 | 553 | 553 |
||
69207642
|
<jupyter_start><jupyter_text>tf-distilbert-base-uncased
Kaggle dataset identifier: tfdistilbertbaseuncased
<jupyter_script>import codecs
import copy
import csv
import gc
from itertools import chain
import os
import pickle
import random
from typing import Dict, List, Tuple, Union
import warnings
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import nltk
from nltk.corpus import wordnet
import numpy as np
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.python.framework import ops, tensor_util
from tensorflow.python.keras.utils import losses_utils, tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_util
import tensorflow_addons as tfa
from transformers import AutoConfig, AutoTokenizer, TFAutoModel
print(tf.__version__)
# Author: Joseph Sefara
# URL: <https://github.com/dsfsi/textaugment/>
class Wordnet:
"""
A set of functions used to augment data.
Typical usage: ::
>>> import nltk
>>> nltk.download('punkt')
>>> nltk.download('wordnet')
>>> nltk.download('averaged_perceptron_tagger')
>>> from textaugment import Wordnet
>>> t = Wordnet(v=True,n=True,p=0.5)
>>> t.augment('I love school')
i adore school
"""
def __init__(self, **kwargs):
"""
A method to initialize parameters
:type random_state: int
:param random_state: seed
:type v: bool
:param v: Verb, default is True
:type n: bool
:param n: Noun
:type runs: int
:param runs: Number of repetition on single text
:type p: float, optional
:param p: The probability of success of an individual trial. (0.1<p<1.0), default is 0.5
:rtype: None
:return: Constructer do not return.
"""
# Set random state
if "random_state" in kwargs:
self.random_state = kwargs["random_state"]
if isinstance(self.random_state, int):
np.random.seed(self.random_state)
else:
raise TypeError(
"random_state must have type int, float, str, bytes, or bytearray"
)
# Set verb to be default if no values given
try:
if "v" not in kwargs and "n" not in kwargs:
kwargs["v"] = True
kwargs["n"] = False
elif "v" in kwargs and "n" not in kwargs:
kwargs["v"] = True
kwargs["n"] = False
elif "v" not in kwargs and "n" in kwargs:
kwargs["n"] = True
kwargs["v"] = False
if "runs" not in kwargs:
kwargs["runs"] = 1
except KeyError:
raise
try:
if "p" in kwargs:
if type(kwargs["p"]) is not float:
raise TypeError(
"p represent probability of success and must be a float from 0.1 to 0.9. E.g p=0.5"
)
elif type(kwargs["p"]) is float:
self.p = kwargs["p"]
else:
kwargs["p"] = 0.5 # Set default value
except KeyError:
raise
self.p = kwargs["p"]
self.v = kwargs["v"]
self.n = kwargs["n"]
self.runs = kwargs["runs"]
def geometric(self, data):
"""
Used to generate Geometric distribution.
:type data: list
:param data: Input data
:rtype: ndarray or scalar
:return: Drawn samples from the parameterized Geometric distribution.
"""
data = np.array(data)
first_trial = (
np.random.geometric(p=self.p, size=data.shape[0]) == 1
) # Capture success after first trial
return data[first_trial]
def replace(self, data):
"""
The method to replace words with synonyms
:type data: str
:param data: sentence used for data augmentation
:rtype: str
:return: The augmented data
"""
data = data.lower().split()
data_tokens = [
[i, x, y] for i, (x, y) in enumerate(nltk.pos_tag(data))
] # Convert tuple to list
if self.v:
for loop in range(self.runs):
words = [[i, x] for i, x, y in data_tokens if y[0] == "V"]
words = [
i for i in self.geometric(data=words)
] # List of selected words
if len(words) >= 1: # There are synonyms
for word in words:
synonyms1 = wordnet.synsets(
word[1], wordnet.VERB
) # Return verbs only
synonyms = list(
set(
chain.from_iterable(
[syn.lemma_names() for syn in synonyms1]
)
)
)
synonyms_ = [] # Synonyms with no underscores goes here
for w in synonyms:
if "_" not in w:
synonyms_.append(w) # Remove words with underscores
if len(synonyms_) >= 1:
synonym = self.geometric(data=synonyms_).tolist()
if synonym: # There is a synonym
data[int(word[0])] = synonym[
0
].lower() # Take the first success
if self.n:
for loop in range(self.runs):
words = [[i, x] for i, x, y in data_tokens if y[0] == "N"]
words = [
i for i in self.geometric(data=words)
] # List of selected words
if len(words) >= 1: # There are synonyms
for word in words:
synonyms1 = wordnet.synsets(
word[1], wordnet.NOUN
) # Return nouns only
synonyms = list(
set(
chain.from_iterable(
[syn.lemma_names() for syn in synonyms1]
)
)
)
synonyms_ = [] # Synonyms with no underscores goes here
for w in synonyms:
if "_" not in w:
synonyms_.append(w) # Remove words with underscores
if len(synonyms_) >= 1:
synonym = self.geometric(data=synonyms_).tolist()
if synonym: # There is a synonym
data[int(word[0])] = synonym[
0
].lower() # Take the first success
return " ".join(data)
def augment(self, data):
"""
Data augmentation for text. Generate new dataset based on verb/nouns synonyms.
:type data: str
:param data: sentence used for data augmentation
:rtype: str
:return: The augmented data
"""
# Error handling
if type(data) is not str:
raise TypeError("Only strings are supported")
data = self.replace(data)
return data
class LossFunctionWrapper(tf.keras.losses.Loss):
def __init__(
self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs
):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = (
tf.keras.backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
)
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def distance_based_log_loss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
margin = 1.0
p = (1.0 + tf.math.exp(-margin)) / (1.0 + tf.math.exp(y_pred - margin))
return tf.keras.backend.binary_crossentropy(
target=y_true, output=p, label_smoothing=0.05
)
class DBLLogLoss(LossFunctionWrapper):
def __init__(
self, reduction=losses_utils.ReductionV2.AUTO, name="distance_based_log_loss"
):
super(DBLLogLoss, self).__init__(
distance_based_log_loss, name=name, reduction=reduction
)
class MaskCalculator(tf.keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MaskCalculator, self).__init__(**kwargs)
def build(self, input_shape):
super(MaskCalculator, self).build(input_shape)
def call(self, inputs, **kwargs):
return tf.keras.backend.permute_dimensions(
x=tf.keras.backend.repeat(
x=tf.keras.backend.cast(
x=tf.keras.backend.greater(x=inputs, y=0), dtype="float32"
),
n=self.output_dim,
),
pattern=(0, 2, 1),
)
def compute_output_shape(self, input_shape):
assert len(input_shape) == 1
shape = list(input_shape)
shape.append(self.output_dim)
return tuple(shape)
class DatasetGen(tf.keras.utils.Sequence):
def __init__(
self,
data: Dict[str, Tuple[List[int], float, float]],
data_IDs: List[str],
output_scaler: StandardScaler,
token_indices: np.ndarray,
pad_token_id: int,
median_distance_between_pairs: float,
batch_size: int,
batches_per_epoch: Union[int, None] = None,
):
self.data = copy.deepcopy(data)
self.token_indices = token_indices
self.pad_token_id = pad_token_id
self.batch_size = batch_size
self.median_distance_between_pairs = median_distance_between_pairs
self.batches_per_epoch = batches_per_epoch
self.output_scaler = output_scaler
self.pairs = set()
for key1 in data_IDs:
for key2 in data_IDs:
if key1 == key2:
continue
if ((key1, key2) not in self.pairs) and (
(key2, key1) not in self.pairs
):
self.pairs.add((key1, key2))
self.pairs = sorted(list(self.pairs))
def __len__(self):
if self.batches_per_epoch is None:
return int(np.ceil(len(self.pairs) / float(self.batch_size)))
return self.batches_per_epoch
def __getitem__(self, idx):
x_left = np.zeros(
shape=(self.batch_size, self.token_indices.shape[1]), dtype=np.int32
)
x_right = np.zeros(
shape=(self.batch_size, self.token_indices.shape[1]), dtype=np.int32
)
batch_y = [
np.zeros((self.batch_size, 1), dtype=np.int32),
np.zeros((self.batch_size, 1), dtype=np.float32),
np.zeros((self.batch_size, 1), dtype=np.float32),
]
if self.batches_per_epoch is None:
batch_start = idx * self.batch_size
batch_end = min(len(self.pairs), batch_start + self.batch_size)
for sample_idx in range(batch_end - batch_start):
left_key, right_key = self.pairs[sample_idx + batch_start]
left_idx = self.data[left_key][0][0]
left_target = self.data[left_key][1]
right_idx = self.data[right_key][0][0]
right_target = self.data[right_key][1]
x_left[sample_idx] = self.token_indices[left_idx]
x_right[sample_idx] = self.token_indices[right_idx]
if abs(left_target - right_target) < self.median_distance_between_pairs:
batch_y[0][sample_idx, 0] = 1
else:
batch_y[0][sample_idx, 0] = 0
batch_y[1][sample_idx, 0] = left_target
batch_y[2][sample_idx, 0] = right_target
n_pad = self.batch_size - (batch_end - batch_start)
if n_pad > 0:
for sample_idx in range(batch_end - batch_start, self.batch_size):
x_left[sample_idx] = x_left[sample_idx - 1]
x_right[sample_idx] = x_right[sample_idx - 1]
batch_y[0][sample_idx, 0] = batch_y[0][sample_idx - 1, 0]
batch_y[1][sample_idx, 0] = batch_y[1][sample_idx - 1, 0]
batch_y[2][sample_idx, 0] = batch_y[2][sample_idx - 1, 0]
else:
for sample_idx in range(self.batch_size):
left_key, right_key = random.choice(self.pairs)
left_idx = random.choice(self.data[left_key][0])
left_target = np.random.normal(
loc=self.data[left_key][1], scale=self.data[left_key][2]
)
right_idx = random.choice(self.data[right_key][0])
right_target = np.random.normal(
loc=self.data[right_key][1], scale=self.data[right_key][2]
)
x_left[sample_idx] = self.token_indices[left_idx]
x_right[sample_idx] = self.token_indices[right_idx]
if abs(left_target - right_target) < self.median_distance_between_pairs:
batch_y[0][sample_idx, 0] = 1
else:
batch_y[0][sample_idx, 0] = 0
batch_y[1][sample_idx, 0] = left_target
batch_y[2][sample_idx, 0] = right_target
batch_x = [
x_left,
generate_attention_mask(x_left, self.pad_token_id),
x_right,
generate_attention_mask(x_right, self.pad_token_id),
]
del x_left, x_right
batch_y[1] = self.output_scaler.transform(batch_y[1])
batch_y[2] = self.output_scaler.transform(batch_y[2])
return batch_x, batch_y, None
def generate_attention_mask(token_indices: np.ndarray, padding_id: int) -> np.ndarray:
attention = np.zeros(token_indices.shape, dtype=np.int32)
for sample_idx in range(token_indices.shape[0]):
for token_idx in range(token_indices.shape[1]):
if token_indices[sample_idx, token_idx] == padding_id:
break
attention[sample_idx, token_idx] = 1
return attention
def load_data_for_training(
fname: str,
) -> List[Dict[str, Tuple[List[str], float, float]]]:
loaded_header = []
id_col_idx = -1
text_col_idx = -1
target_col_idx = -1
std_col_idx = -1
line_idx = 1
data = dict()
set_of_texts = set()
t = Wordnet(v=True, n=True, p=0.5)
with codecs.open(fname, mode="r", encoding="utf-8") as fp:
data_reader = csv.reader(fp, quotechar='"', delimiter=",")
for row in data_reader:
if len(row) > 0:
err_msg = f"File {fname}: line {line_idx} is wrong!"
if len(loaded_header) == 0:
loaded_header = copy.copy(row)
try:
text_col_idx = loaded_header.index("excerpt")
except:
text_col_idx = -1
if text_col_idx <= 0:
raise ValueError(err_msg + ' Field "excerpt" is not found!')
try:
id_col_idx = loaded_header.index("id")
except:
id_col_idx = -1
if id_col_idx < 0:
raise ValueError(err_msg + ' Field "id" is not found!')
try:
target_col_idx = loaded_header.index("target")
except:
target_col_idx = -1
if target_col_idx < 0:
raise ValueError(err_msg + ' Field "target" is not found!')
try:
std_col_idx = loaded_header.index("standard_error")
except:
std_col_idx = -1
if std_col_idx < 0:
err_msg2 = f'{err_msg} Field "standard_error" is not found!'
raise ValueError(err_msg2)
else:
sample_id = row[id_col_idx]
if sample_id != sample_id.strip():
raise ValueError(err_msg + f" {sample_id} is wrong sample ID!")
if sample_id in data:
err_msg2 = f"{err_msg} {sample_id} is not unique sample ID!"
raise ValueError(err_msg2)
text = row[text_col_idx].replace("\r", "\n")
if len(text) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
sentences = []
for paragraph in map(lambda it: it.strip(), text.split("\n")):
if len(paragraph) > 0:
sentences += nltk.sent_tokenize(paragraph)
if len(sentences) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
text = " ".join([cur_sent.lower() for cur_sent in sentences])
if text.lower() in set_of_texts:
raise ValueError(err_msg + f" Text {sample_id} is not unique!")
set_of_texts.add(text.lower())
try:
target_val = float(row[target_col_idx])
ok = True
except:
target_val = 0.0
ok = False
if not ok:
err_msg2 = err_msg
err_msg2 += (
f" {row[target_col_idx]} is wrong target for "
f"text {sample_id}."
)
raise ValueError(err_msg2)
try:
std_val = float(row[std_col_idx])
ok = std_val > 0.0
except:
std_val = 0.0
ok = False
if not ok:
err_msg2 = err_msg
err_msg2 += (
f" {row[std_col_idx]} is wrong standard error"
f" for text {sample_id}."
)
warnings.warn(err_msg2)
else:
augmented_texts = set()
for _ in range(5):
new_augmented_text = []
for cur_sent in sentences:
new_sent = t.augment(cur_sent.lower()).strip().lower()
if len(new_sent) > 0:
new_augmented_text.append(new_sent)
assert len(new_augmented_text) > 0
random.shuffle(new_augmented_text)
new_augmented_text = " ".join(new_augmented_text)
if (len(new_augmented_text) > 0) and (
new_augmented_text.lower() not in set_of_texts
):
set_of_texts.add(new_augmented_text)
augmented_texts.add(new_augmented_text)
del new_augmented_text
if text in augmented_texts:
added_texts = [text] + list(augmented_texts - {text})
else:
added_texts = [text] + list(augmented_texts)
data[sample_id] = (added_texts, target_val, std_val)
line_idx += 1
return data
def load_data_for_testing(fname: str, batch_size: int):
loaded_header = []
id_col_idx = -1
text_col_idx = -1
target_col_idx = -1
std_col_idx = -1
line_idx = 1
data = dict()
with codecs.open(fname, mode="r", encoding="utf-8") as fp:
data_reader = csv.reader(fp, quotechar='"', delimiter=",")
for row in data_reader:
if len(row) > 0:
err_msg = f"File {fname}: line {line_idx} is wrong!"
if len(loaded_header) == 0:
loaded_header = copy.copy(row)
try:
text_col_idx = loaded_header.index("excerpt")
except:
text_col_idx = -1
if text_col_idx <= 0:
raise ValueError(err_msg + ' Field "excerpt" is not found!')
try:
id_col_idx = loaded_header.index("id")
except:
id_col_idx = -1
if id_col_idx < 0:
raise ValueError(err_msg + ' Field "id" is not found!')
else:
sample_id = row[id_col_idx]
if sample_id != sample_id.strip():
raise ValueError(err_msg + f" {sample_id} is wrong sample ID!")
if sample_id in data:
err_msg2 = f"{err_msg} {sample_id} is not unique sample ID!"
raise ValueError(err_msg2)
text = row[text_col_idx].replace("\n", " ").replace("\r", " ")
text = " ".join(text.split()).strip()
if len(text) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
data[sample_id] = text
if len(data) >= batch_size:
yield data
del data
data = dict()
line_idx += 1
if len(data) > 0:
yield data
def train_output_scaler(
data: Dict[str, Tuple[List[int], float, float]]
) -> StandardScaler:
outputs_for_training = np.empty((len(data), 1), dtype=np.float64)
for idx, sample_id in enumerate(list(data.keys())):
outputs_for_training[idx, 0] = data[sample_id][1]
return StandardScaler().fit(outputs_for_training)
def tokenize_data(
data: Union[List[Dict[str, str]], List[Dict[str, Tuple[List[str], float, float]]]],
tokenizer: AutoTokenizer,
max_seq_len: int,
) -> Tuple[
Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]], np.ndarray
]:
tokenized_data = dict()
all_tokens_matrix = []
for sample_idx, cur_ID in enumerate(sorted(list(data.keys()))):
if isinstance(data[cur_ID], str):
tokens = tokenizer.tokenize(data[cur_ID])
tokenized_data[cur_ID] = len(all_tokens_matrix)
token_ids = tokenizer.convert_tokens_to_ids(
[tokenizer.cls_token] + tokens + [tokenizer.sep_token]
)
ndiff = max_seq_len - len(token_ids)
if ndiff > 0:
token_ids += [tokenizer.pad_token_id for _ in range(ndiff)]
elif ndiff < 0:
token_ids = token_ids[:max_seq_len]
all_tokens_matrix.append(token_ids)
else:
text_idx_list = []
for cur_text in data[cur_ID][0]:
tokens = tokenizer.tokenize(cur_text)
token_ids = tokenizer.convert_tokens_to_ids(
[tokenizer.cls_token] + tokens + [tokenizer.sep_token]
)
ndiff = max_seq_len - len(token_ids)
if ndiff > 0:
token_ids += [tokenizer.pad_token_id for _ in range(ndiff)]
elif ndiff < 0:
token_ids = token_ids[:max_seq_len]
text_idx_list.append(len(all_tokens_matrix))
all_tokens_matrix.append(token_ids)
tokenized_data[cur_ID] = (text_idx_list, data[cur_ID][1], data[cur_ID][2])
return tokenized_data, np.array(all_tokens_matrix, dtype=np.int32)
def print_info_about_data(
data: Union[List[Dict[str, str]], List[Dict[str, Tuple[List[str], float, float]]]],
identifiers: List[str],
):
for_training = isinstance(data[identifiers[0]], tuple)
if for_training:
print(f"Number of samples for training is {len(data)}.")
else:
print(f"Number of samples for submission is {len(data)}.")
print("")
print(f"{len(identifiers)} random samples:")
for cur_id in identifiers:
print("")
print(f" Sample {cur_id}")
if for_training:
print(" Text:")
print(f" {data[cur_id][0][0]}")
print(f" Number of augmented texts is {len(data[cur_id][0]) - 1}.")
if (len(data[cur_id][0]) - 1) > 0:
if (len(data[cur_id][0]) - 1) > 1:
print(" 2 augmented texts:")
for augmented in data[cur_id][0][1:3]:
print(f" {augmented}")
else:
print(" Augmented text:")
for augmented in data[cur_id][0][1:2]:
print(f" {augmented}")
print(" Target:")
print(f" {data[cur_id][1]} +- {data[cur_id][2]}")
else:
print(f" {data[cur_id]}")
def print_info_about_tokenized_data(
data: Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]],
matrix: np.ndarray,
identifiers: List[str],
):
for_training = isinstance(data[identifiers[0]], tuple)
if for_training:
print(f"Number of tokenized samples for training is {len(data)}.")
else:
print(f"Number of tokenized samples for submission is {len(data)}.")
print("")
print(f"{len(identifiers)} random samples:")
for cur_id in identifiers:
print("")
print(f"Sample {cur_id}")
if for_training:
sample_idx = data[cur_id][0][0]
else:
sample_idx = data[cur_id]
print(matrix[sample_idx].tolist())
def find_median_distance_between_pairs(
data: Dict[str, Tuple[List[int], float, float]], identifiers: List[str]
) -> float:
distances = []
assert len(identifiers) == len(set(identifiers))
for idx, first_id in enumerate(identifiers):
first_target = data[first_id][1]
for second_id in identifiers[(idx + 1) :]:
second_target = data[second_id][1]
distances.append(abs(first_target - second_target))
distances.sort()
distances = np.array(distances, dtype=np.float32)
n = distances.shape[0]
print("Mean distance between training pairs is {0:.5f}.".format(np.mean(distances)))
print(
"Minimal distance between training pairs is {0:.5f}.".format(np.min(distances))
)
print(
"Maximal distance between training pairs is {0:.5f}.".format(np.max(distances))
)
print(
"Median distance between training pairs is {0:.5f}.".format(
distances[(n - 1) // 2]
)
)
return distances[(n - 1) // 2]
def tf_euclidean_distance(vects):
x, y = vects
sum_square = tf.keras.backend.sum(
tf.keras.backend.square(x - y), axis=1, keepdims=True
)
return tf.keras.backend.sqrt(
tf.keras.backend.maximum(sum_square, tf.keras.backend.epsilon())
)
def tf_eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def build_neural_network(
bert_name: str, max_seq_len: int, batch_size: int
) -> Tuple[tf.keras.Model, tf.keras.Model, tf.keras.Model]:
transformer_model = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=bert_name, name="DistilTransformer"
)
transformer_config = AutoConfig.from_pretrained(bert_name)
print("Transformer Configuration")
print("=========================")
print(transformer_config)
left_tokens = tf.keras.layers.Input(
shape=(max_seq_len,), batch_size=batch_size, dtype=tf.int32, name="word_ids"
)
left_attention = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="attention_mask",
)
right_tokens = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="right_word_ids",
)
right_attention = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="right_attention_mask",
)
left_sequence_output = transformer_model([left_tokens, left_attention])[0]
left_output_mask = MaskCalculator(
output_dim=transformer_config.hidden_size,
trainable=False,
name="OutMaskCalculator",
)(left_attention)
left_masked_output = tf.keras.layers.Multiply(name="OutMaskMultiplicator")(
[left_output_mask, left_sequence_output]
)
left_masked_output = tf.keras.layers.Masking(name="OutMasking")(left_masked_output)
left_output = tf.keras.layers.GlobalAvgPool1D(name="AvePool")(left_masked_output)
left_output = tf.keras.layers.Lambda(
function=lambda x: tf.math.l2_normalize(x, axis=1), name="Emdedding"
)(left_output)
right_sequence_output = transformer_model([right_tokens, right_attention])[0]
right_output_mask = MaskCalculator(
output_dim=transformer_config.hidden_size,
trainable=False,
name="OutMaskCalculator_right",
)(right_attention)
right_masked_output = tf.keras.layers.Multiply(name="OutMaskMultiplicator_right")(
[right_output_mask, right_sequence_output]
)
right_masked_output = tf.keras.layers.Masking(name="OutMasking_right")(
right_masked_output
)
right_output = tf.keras.layers.GlobalAvgPool1D(name="AvePool_right")(
right_masked_output
)
right_output = tf.keras.layers.Lambda(
lambda x: tf.math.l2_normalize(x, axis=1), name="Emdedding_right"
)(right_output)
distance_output = tf.keras.layers.Lambda(
function=tf_euclidean_distance,
output_shape=tf_eucl_dist_output_shape,
name="L2DistLayer",
)([left_output, right_output])
regression_layer = tf.keras.layers.Dense(
units=1,
input_dim=transformer_config.hidden_size,
activation=None,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=42),
bias_initializer="zeros",
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=1e-4, l2=1e-3),
name="RegressionLayer",
)
left_regression_output = regression_layer(left_output)
right_regression_output = regression_layer(right_output)
regression_model = tf.keras.Model(
inputs=[left_tokens, left_attention],
outputs=left_regression_output,
name="RegressionModel",
)
regression_model.build(
input_shape=[(batch_size, max_seq_len), (batch_size, max_seq_len)]
)
feature_extraction_model = tf.keras.Model(
inputs=[left_tokens, left_attention],
outputs=left_output,
name="FeatureExtractionModel",
)
feature_extraction_model.build(
input_shape=[(batch_size, max_seq_len), (batch_size, max_seq_len)]
)
siamese_model = tf.keras.Model(
inputs=[left_tokens, left_attention, right_tokens, right_attention],
outputs=[distance_output, left_regression_output, right_regression_output],
name="SiameseModel",
)
radam = tfa.optimizers.RectifiedAdam(learning_rate=1e-5)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
losses = [
DBLLogLoss(),
tf.keras.losses.MeanSquaredError(),
tf.keras.losses.MeanSquaredError(),
]
loss_weights = [0.7, 1.0, 1.0]
siamese_model.compile(optimizer=ranger, loss=losses, loss_weights=loss_weights)
return siamese_model, regression_model, feature_extraction_model
def show_minibatch(X: List[np.ndarray], y: List[np.ndarray]):
assert len(X) == 4
assert len(y) == 3
print("")
print("X1")
for it in X[0].tolist():
print(it)
print("")
print("X2")
for it in X[1].tolist():
print(it)
print("")
print("X3")
for it in X[2].tolist():
print(it)
print("")
print("X4")
for it in X[3].tolist():
print(it)
print("")
print("y1")
for it in y[0].tolist():
print(it)
print("")
print("y2")
for it in y[1].tolist():
print(it)
print("")
print("y3")
for it in y[2].tolist():
print(it)
def show_tsne(
fe: tf.keras.Model,
batch_size: int,
data: Dict[str, Tuple[List[int], float, float]],
token_matrix: np.ndarray,
identifiers: List[str],
pad_id: int,
title: str,
figure_id: int,
):
indices = list(map(lambda it: data[it][0][0], identifiers))
colors = np.array(list(map(lambda it: data[it][1], identifiers)), dtype=np.float64)
area = np.array(list(map(lambda it: data[it][2], identifiers)), dtype=np.float64)
area /= np.max(area)
area *= 10.0
area = np.power(area, 2)
texts = token_matrix[indices]
ndiff = texts.shape[0] % batch_size
if ndiff > 0:
last_text_idx = texts.shape[0] - 1
texts = np.vstack(
[texts]
+ [
texts[last_text_idx : (last_text_idx + 1)]
for _ in range(batch_size - ndiff)
]
)
attentions = generate_attention_mask(texts, pad_id)
assert texts.shape[0] % batch_size == 0, f"{texts.shape[0] % batch_size}"
features = fe.predict([texts, attentions], batch_size=batch_size)
features = features[: len(indices)]
projected_features = TSNE(n_components=2, n_jobs=-1).fit_transform(features)
fig = plt.figure(figure_id, figsize=(11, 11))
plt.scatter(
x=projected_features[:, 0],
y=projected_features[:, 1],
marker="o",
cmap=plt.cm.get_cmap("jet"),
s=area,
c=colors,
norm=Normalize(vmin=np.min(colors), vmax=np.max(colors)),
)
plt.title("t-SNE projections of texts " + title)
plt.colorbar()
plt.show()
def show_training_process(
history: tf.keras.callbacks.History, metric_name: str, figure_id: int
):
val_metric_name = "val_" + metric_name
possible_metrics = list(history.history.keys())
if metric_name not in history.history:
err_msg = f'The metric "{metric_name}" is not found!'
err_msg += f" Available metrics are: {possible_metrics}."
raise ValueError(err_msg)
fig = plt.figure(figure_id, figsize=(7, 7))
metric_values = history.history[metric_name]
plt.plot(
list(range(len(metric_values))),
metric_values,
label="Training {0}".format(metric_name),
)
if val_metric_name in history.history:
val_metric_values = history.history["val_" + metric_name]
assert len(metric_values) == len(val_metric_values)
plt.plot(
list(range(len(val_metric_values))),
val_metric_values,
label="Validation {0}".format(metric_name),
)
plt.xlabel("Epochs")
plt.ylabel(metric_name)
plt.title("Training process")
plt.legend(loc="best")
plt.show()
def do_predictions(
regressor: tf.keras.Model,
output_scaler: StandardScaler,
batch_size: int,
data: Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]],
token_matrix: np.ndarray,
pad_id: int,
identifiers: Union[List[str], None] = None,
) -> Dict[str, float]:
if identifiers is None:
identifiers_ = sorted(list(data.keys()))
else:
identifiers_ = sorted(identifiers)
indices = list(
map(
lambda it: data[it] if isinstance(data[it], int) else data[it][0][0],
identifiers_,
)
)
texts = token_matrix[indices]
ndiff = texts.shape[0] % batch_size
if ndiff > 0:
last_text_idx = texts.shape[0] - 1
texts = np.vstack(
[texts]
+ [
texts[last_text_idx : (last_text_idx + 1)]
for _ in range(batch_size - ndiff)
]
)
attentions = generate_attention_mask(texts, pad_id)
assert texts.shape[0] % batch_size == 0, f"{texts.shape[0] % batch_size}"
predictions = regressor.predict([texts, attentions], batch_size=batch_size)
predictions = np.reshape(predictions, newshape=(predictions.shape[0], 1))
predictions = output_scaler.inverse_transform(predictions)
return dict(
map(lambda idx: (identifiers_[idx], predictions[idx, 0]), range(len(indices)))
)
random.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
MAX_TEXT_LEN = 128
PRETRAINED_BERT = "/kaggle/input/tfdistilbertbaseuncased"
MINIBATCH_SIZE = 32
DATA_DIR = "/kaggle/input/commonlitreadabilityprize"
MODEL_DIR = "/kaggle/working"
print(f"{DATA_DIR} {os.path.isdir(DATA_DIR)}")
print(f"{MODEL_DIR} {os.path.isdir(MODEL_DIR)}")
trainset_name = os.path.join(DATA_DIR, "train.csv")
print(f"{trainset_name} {os.path.isfile(trainset_name)}")
testset_name = os.path.join(DATA_DIR, "test.csv")
print(f"{testset_name} {os.path.isfile(testset_name)}")
submission_name = os.path.join(MODEL_DIR, "submission.csv")
print(f"{submission_name} {os.path.isfile(submission_name)}")
siamese_model_name = os.path.join(MODEL_DIR, "siamese_nn.h5")
regression_model_name = os.path.join(MODEL_DIR, "regression_nn.h5")
scaler_name = os.path.join(MODEL_DIR, "output_scaler.pkl")
figure_identifier = 1
data_for_training = load_data_for_training(trainset_name)
assert len(data_for_training) > 100
all_IDs = sorted(list(data_for_training.keys()))
selected_IDs_for_training = random.sample(population=all_IDs, k=3)
print_info_about_data(data_for_training, selected_IDs_for_training)
pretrained_tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_BERT)
print(f"Vocabulary size is {pretrained_tokenizer.vocab_size}.")
labels_for_training, tokens_for_training = tokenize_data(
data=data_for_training, tokenizer=pretrained_tokenizer, max_seq_len=MAX_TEXT_LEN
)
print_info_about_tokenized_data(
data=labels_for_training,
matrix=tokens_for_training,
identifiers=selected_IDs_for_training,
)
label_scaler = train_output_scaler(labels_for_training)
with open(scaler_name, "wb") as scaler_fp:
pickle.dump(label_scaler, scaler_fp)
random.shuffle(all_IDs)
n_train_size = int(round(len(all_IDs) * 0.85))
n_val_size = int(round(len(all_IDs) * 0.04))
IDs_for_training = all_IDs[:n_train_size]
IDs_for_validation = all_IDs[n_train_size : (n_train_size + n_val_size)]
IDs_for_final_testing = all_IDs[(n_train_size + n_val_size) :]
median_dist = find_median_distance_between_pairs(
data=labels_for_training, identifiers=IDs_for_training
)
datagen_for_validation = DatasetGen(
data=labels_for_training,
data_IDs=IDs_for_validation,
token_indices=tokens_for_training,
median_distance_between_pairs=median_dist,
pad_token_id=pretrained_tokenizer.pad_token_id,
batch_size=MINIBATCH_SIZE,
output_scaler=label_scaler,
)
n_batches_per_validset = len(datagen_for_validation)
print(f"Mini-batches per validation set is {n_batches_per_validset}.")
X_, y_, _ = datagen_for_validation[0]
show_minibatch(X_, y_)
n_batches_per_epoch = n_batches_per_validset * 2
datagen_for_training = DatasetGen(
data=labels_for_training,
data_IDs=IDs_for_training,
token_indices=tokens_for_training,
median_distance_between_pairs=median_dist,
pad_token_id=pretrained_tokenizer.pad_token_id,
batch_size=MINIBATCH_SIZE,
batches_per_epoch=n_batches_per_epoch,
output_scaler=label_scaler,
)
X_, y_, _ = datagen_for_training[0]
show_minibatch(X_, y_)
model_for_training, model_for_inference, model_for_fe = build_neural_network(
bert_name=PRETRAINED_BERT, max_seq_len=MAX_TEXT_LEN, batch_size=MINIBATCH_SIZE
)
model_for_training.summary()
model_for_inference.summary()
model_for_fe.summary()
show_tsne(
fe=model_for_fe,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
identifiers=IDs_for_validation + IDs_for_final_testing,
pad_id=pretrained_tokenizer.pad_token_id,
title="before training",
figure_id=figure_identifier,
)
figure_identifier += 1
predictions_for_testing = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
pad_id=pretrained_tokenizer.pad_token_id,
identifiers=IDs_for_final_testing,
)
error = 0.0
for cur_id in IDs_for_final_testing:
difference = predictions_for_testing[cur_id] - labels_for_training[cur_id][1]
error += difference * difference
error /= float(len(IDs_for_final_testing))
error = np.sqrt(error)
print(f"RMSE before training = {error}")
del predictions_for_testing, error
callbacks = [
tf.keras.callbacks.ModelCheckpoint(
filepath=siamese_model_name,
save_best_only=True,
monitor="val_loss",
save_weights_only=True,
verbose=True,
),
tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=7, verbose=True, restore_best_weights=True
),
tfa.callbacks.TimeStopping(seconds=int(round(3600 * 2.6)), verbose=True),
]
history = model_for_training.fit(
datagen_for_training,
validation_data=datagen_for_validation,
epochs=1000,
callbacks=callbacks,
)
model_for_training.load_weights(siamese_model_name)
model_for_inference.save_weights(regression_model_name)
os.remove(siamese_model_name)
show_training_process(history, "loss", figure_identifier)
figure_identifier += 1
show_tsne(
fe=model_for_fe,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
identifiers=IDs_for_validation + IDs_for_final_testing,
pad_id=pretrained_tokenizer.pad_token_id,
title="after training",
figure_id=figure_identifier,
)
figure_identifier += 1
predictions_for_testing = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
pad_id=pretrained_tokenizer.pad_token_id,
identifiers=IDs_for_final_testing,
)
error = 0.0
for cur_id in IDs_for_final_testing:
difference = predictions_for_testing[cur_id] - labels_for_training[cur_id][1]
error += difference * difference
error /= float(len(IDs_for_final_testing))
error = np.sqrt(error)
print(f"RMSE after training = {error}")
del datagen_for_training, datagen_for_validation
del labels_for_training, tokens_for_training
del data_for_training
del IDs_for_training, IDs_for_validation, IDs_for_final_testing
del model_for_training
gc.collect()
with codecs.open(submission_name, mode="w", encoding="utf-8") as fp:
data_writer = csv.writer(fp, quotechar='"', delimiter=",")
data_writer.writerow(["id", "target"])
for data_part in load_data_for_testing(testset_name, MINIBATCH_SIZE * 8):
labels_for_submission, tokens_for_submission = tokenize_data(
data=data_part, tokenizer=pretrained_tokenizer, max_seq_len=MAX_TEXT_LEN
)
del data_part
predictions_for_submission = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_submission,
token_matrix=tokens_for_submission,
pad_id=pretrained_tokenizer.pad_token_id,
)
for cur_id in predictions_for_submission:
data_writer.writerow([cur_id, f"{predictions_for_submission[cur_id]}"])
del predictions_for_submission
del labels_for_submission, tokens_for_submission
gc.collect()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/207/69207642.ipynb
|
tfdistilbertbaseuncased
|
anjalia8
|
[{"Id": 69207642, "ScriptId": 18719667, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 763106, "CreationDate": "07/28/2021 05:45:57", "VersionNumber": 34.0, "Title": "readability-bert", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 1076.0, "LinesInsertedFromPrevious": 6.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 1070.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92097935, "KernelVersionId": 69207642, "SourceDatasetVersionId": 2248198}]
|
[{"Id": 2248198, "DatasetId": 1352088, "DatasourceVersionId": 2289111, "CreatorUserId": 1981001, "LicenseName": "Unknown", "CreationDate": "05/19/2021 05:28:54", "VersionNumber": 1.0, "Title": "tf-distilbert-base-uncased", "Slug": "tfdistilbertbaseuncased", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1352088, "CreatorUserId": 1981001, "OwnerUserId": 1981001.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2248198.0, "CurrentDatasourceVersionId": 2289111.0, "ForumId": 1371127, "Type": 2, "CreationDate": "05/19/2021 05:28:54", "LastActivityDate": "05/19/2021", "TotalViews": 835, "TotalDownloads": 6, "TotalVotes": 3, "TotalKernels": 2}]
|
[{"Id": 1981001, "UserName": "anjalia8", "DisplayName": "Anjali Agarwal", "RegisterDate": "06/10/2018", "PerformanceTier": 1}]
|
import codecs
import copy
import csv
import gc
from itertools import chain
import os
import pickle
import random
from typing import Dict, List, Tuple, Union
import warnings
import matplotlib.pyplot as plt
from matplotlib.colors import Normalize
import nltk
from nltk.corpus import wordnet
import numpy as np
from sklearn.manifold import TSNE
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.python.framework import ops, tensor_util
from tensorflow.python.keras.utils import losses_utils, tf_utils
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.losses import util as tf_losses_util
import tensorflow_addons as tfa
from transformers import AutoConfig, AutoTokenizer, TFAutoModel
print(tf.__version__)
# Author: Joseph Sefara
# URL: <https://github.com/dsfsi/textaugment/>
class Wordnet:
"""
A set of functions used to augment data.
Typical usage: ::
>>> import nltk
>>> nltk.download('punkt')
>>> nltk.download('wordnet')
>>> nltk.download('averaged_perceptron_tagger')
>>> from textaugment import Wordnet
>>> t = Wordnet(v=True,n=True,p=0.5)
>>> t.augment('I love school')
i adore school
"""
def __init__(self, **kwargs):
"""
A method to initialize parameters
:type random_state: int
:param random_state: seed
:type v: bool
:param v: Verb, default is True
:type n: bool
:param n: Noun
:type runs: int
:param runs: Number of repetition on single text
:type p: float, optional
:param p: The probability of success of an individual trial. (0.1<p<1.0), default is 0.5
:rtype: None
:return: Constructer do not return.
"""
# Set random state
if "random_state" in kwargs:
self.random_state = kwargs["random_state"]
if isinstance(self.random_state, int):
np.random.seed(self.random_state)
else:
raise TypeError(
"random_state must have type int, float, str, bytes, or bytearray"
)
# Set verb to be default if no values given
try:
if "v" not in kwargs and "n" not in kwargs:
kwargs["v"] = True
kwargs["n"] = False
elif "v" in kwargs and "n" not in kwargs:
kwargs["v"] = True
kwargs["n"] = False
elif "v" not in kwargs and "n" in kwargs:
kwargs["n"] = True
kwargs["v"] = False
if "runs" not in kwargs:
kwargs["runs"] = 1
except KeyError:
raise
try:
if "p" in kwargs:
if type(kwargs["p"]) is not float:
raise TypeError(
"p represent probability of success and must be a float from 0.1 to 0.9. E.g p=0.5"
)
elif type(kwargs["p"]) is float:
self.p = kwargs["p"]
else:
kwargs["p"] = 0.5 # Set default value
except KeyError:
raise
self.p = kwargs["p"]
self.v = kwargs["v"]
self.n = kwargs["n"]
self.runs = kwargs["runs"]
def geometric(self, data):
"""
Used to generate Geometric distribution.
:type data: list
:param data: Input data
:rtype: ndarray or scalar
:return: Drawn samples from the parameterized Geometric distribution.
"""
data = np.array(data)
first_trial = (
np.random.geometric(p=self.p, size=data.shape[0]) == 1
) # Capture success after first trial
return data[first_trial]
def replace(self, data):
"""
The method to replace words with synonyms
:type data: str
:param data: sentence used for data augmentation
:rtype: str
:return: The augmented data
"""
data = data.lower().split()
data_tokens = [
[i, x, y] for i, (x, y) in enumerate(nltk.pos_tag(data))
] # Convert tuple to list
if self.v:
for loop in range(self.runs):
words = [[i, x] for i, x, y in data_tokens if y[0] == "V"]
words = [
i for i in self.geometric(data=words)
] # List of selected words
if len(words) >= 1: # There are synonyms
for word in words:
synonyms1 = wordnet.synsets(
word[1], wordnet.VERB
) # Return verbs only
synonyms = list(
set(
chain.from_iterable(
[syn.lemma_names() for syn in synonyms1]
)
)
)
synonyms_ = [] # Synonyms with no underscores goes here
for w in synonyms:
if "_" not in w:
synonyms_.append(w) # Remove words with underscores
if len(synonyms_) >= 1:
synonym = self.geometric(data=synonyms_).tolist()
if synonym: # There is a synonym
data[int(word[0])] = synonym[
0
].lower() # Take the first success
if self.n:
for loop in range(self.runs):
words = [[i, x] for i, x, y in data_tokens if y[0] == "N"]
words = [
i for i in self.geometric(data=words)
] # List of selected words
if len(words) >= 1: # There are synonyms
for word in words:
synonyms1 = wordnet.synsets(
word[1], wordnet.NOUN
) # Return nouns only
synonyms = list(
set(
chain.from_iterable(
[syn.lemma_names() for syn in synonyms1]
)
)
)
synonyms_ = [] # Synonyms with no underscores goes here
for w in synonyms:
if "_" not in w:
synonyms_.append(w) # Remove words with underscores
if len(synonyms_) >= 1:
synonym = self.geometric(data=synonyms_).tolist()
if synonym: # There is a synonym
data[int(word[0])] = synonym[
0
].lower() # Take the first success
return " ".join(data)
def augment(self, data):
"""
Data augmentation for text. Generate new dataset based on verb/nouns synonyms.
:type data: str
:param data: sentence used for data augmentation
:rtype: str
:return: The augmented data
"""
# Error handling
if type(data) is not str:
raise TypeError("Only strings are supported")
data = self.replace(data)
return data
class LossFunctionWrapper(tf.keras.losses.Loss):
def __init__(
self, fn, reduction=losses_utils.ReductionV2.AUTO, name=None, **kwargs
):
super(LossFunctionWrapper, self).__init__(reduction=reduction, name=name)
self.fn = fn
self._fn_kwargs = kwargs
def call(self, y_true, y_pred):
if tensor_util.is_tensor(y_pred) and tensor_util.is_tensor(y_true):
y_pred, y_true = tf_losses_util.squeeze_or_expand_dimensions(y_pred, y_true)
return self.fn(y_true, y_pred, **self._fn_kwargs)
def get_config(self):
config = {}
for k, v in six.iteritems(self._fn_kwargs):
config[k] = (
tf.keras.backend.eval(v) if tf_utils.is_tensor_or_variable(v) else v
)
base_config = super(LossFunctionWrapper, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def distance_based_log_loss(y_true, y_pred):
y_pred = ops.convert_to_tensor(y_pred)
y_true = math_ops.cast(y_true, y_pred.dtype)
margin = 1.0
p = (1.0 + tf.math.exp(-margin)) / (1.0 + tf.math.exp(y_pred - margin))
return tf.keras.backend.binary_crossentropy(
target=y_true, output=p, label_smoothing=0.05
)
class DBLLogLoss(LossFunctionWrapper):
def __init__(
self, reduction=losses_utils.ReductionV2.AUTO, name="distance_based_log_loss"
):
super(DBLLogLoss, self).__init__(
distance_based_log_loss, name=name, reduction=reduction
)
class MaskCalculator(tf.keras.layers.Layer):
def __init__(self, output_dim, **kwargs):
self.output_dim = output_dim
super(MaskCalculator, self).__init__(**kwargs)
def build(self, input_shape):
super(MaskCalculator, self).build(input_shape)
def call(self, inputs, **kwargs):
return tf.keras.backend.permute_dimensions(
x=tf.keras.backend.repeat(
x=tf.keras.backend.cast(
x=tf.keras.backend.greater(x=inputs, y=0), dtype="float32"
),
n=self.output_dim,
),
pattern=(0, 2, 1),
)
def compute_output_shape(self, input_shape):
assert len(input_shape) == 1
shape = list(input_shape)
shape.append(self.output_dim)
return tuple(shape)
class DatasetGen(tf.keras.utils.Sequence):
def __init__(
self,
data: Dict[str, Tuple[List[int], float, float]],
data_IDs: List[str],
output_scaler: StandardScaler,
token_indices: np.ndarray,
pad_token_id: int,
median_distance_between_pairs: float,
batch_size: int,
batches_per_epoch: Union[int, None] = None,
):
self.data = copy.deepcopy(data)
self.token_indices = token_indices
self.pad_token_id = pad_token_id
self.batch_size = batch_size
self.median_distance_between_pairs = median_distance_between_pairs
self.batches_per_epoch = batches_per_epoch
self.output_scaler = output_scaler
self.pairs = set()
for key1 in data_IDs:
for key2 in data_IDs:
if key1 == key2:
continue
if ((key1, key2) not in self.pairs) and (
(key2, key1) not in self.pairs
):
self.pairs.add((key1, key2))
self.pairs = sorted(list(self.pairs))
def __len__(self):
if self.batches_per_epoch is None:
return int(np.ceil(len(self.pairs) / float(self.batch_size)))
return self.batches_per_epoch
def __getitem__(self, idx):
x_left = np.zeros(
shape=(self.batch_size, self.token_indices.shape[1]), dtype=np.int32
)
x_right = np.zeros(
shape=(self.batch_size, self.token_indices.shape[1]), dtype=np.int32
)
batch_y = [
np.zeros((self.batch_size, 1), dtype=np.int32),
np.zeros((self.batch_size, 1), dtype=np.float32),
np.zeros((self.batch_size, 1), dtype=np.float32),
]
if self.batches_per_epoch is None:
batch_start = idx * self.batch_size
batch_end = min(len(self.pairs), batch_start + self.batch_size)
for sample_idx in range(batch_end - batch_start):
left_key, right_key = self.pairs[sample_idx + batch_start]
left_idx = self.data[left_key][0][0]
left_target = self.data[left_key][1]
right_idx = self.data[right_key][0][0]
right_target = self.data[right_key][1]
x_left[sample_idx] = self.token_indices[left_idx]
x_right[sample_idx] = self.token_indices[right_idx]
if abs(left_target - right_target) < self.median_distance_between_pairs:
batch_y[0][sample_idx, 0] = 1
else:
batch_y[0][sample_idx, 0] = 0
batch_y[1][sample_idx, 0] = left_target
batch_y[2][sample_idx, 0] = right_target
n_pad = self.batch_size - (batch_end - batch_start)
if n_pad > 0:
for sample_idx in range(batch_end - batch_start, self.batch_size):
x_left[sample_idx] = x_left[sample_idx - 1]
x_right[sample_idx] = x_right[sample_idx - 1]
batch_y[0][sample_idx, 0] = batch_y[0][sample_idx - 1, 0]
batch_y[1][sample_idx, 0] = batch_y[1][sample_idx - 1, 0]
batch_y[2][sample_idx, 0] = batch_y[2][sample_idx - 1, 0]
else:
for sample_idx in range(self.batch_size):
left_key, right_key = random.choice(self.pairs)
left_idx = random.choice(self.data[left_key][0])
left_target = np.random.normal(
loc=self.data[left_key][1], scale=self.data[left_key][2]
)
right_idx = random.choice(self.data[right_key][0])
right_target = np.random.normal(
loc=self.data[right_key][1], scale=self.data[right_key][2]
)
x_left[sample_idx] = self.token_indices[left_idx]
x_right[sample_idx] = self.token_indices[right_idx]
if abs(left_target - right_target) < self.median_distance_between_pairs:
batch_y[0][sample_idx, 0] = 1
else:
batch_y[0][sample_idx, 0] = 0
batch_y[1][sample_idx, 0] = left_target
batch_y[2][sample_idx, 0] = right_target
batch_x = [
x_left,
generate_attention_mask(x_left, self.pad_token_id),
x_right,
generate_attention_mask(x_right, self.pad_token_id),
]
del x_left, x_right
batch_y[1] = self.output_scaler.transform(batch_y[1])
batch_y[2] = self.output_scaler.transform(batch_y[2])
return batch_x, batch_y, None
def generate_attention_mask(token_indices: np.ndarray, padding_id: int) -> np.ndarray:
attention = np.zeros(token_indices.shape, dtype=np.int32)
for sample_idx in range(token_indices.shape[0]):
for token_idx in range(token_indices.shape[1]):
if token_indices[sample_idx, token_idx] == padding_id:
break
attention[sample_idx, token_idx] = 1
return attention
def load_data_for_training(
fname: str,
) -> List[Dict[str, Tuple[List[str], float, float]]]:
loaded_header = []
id_col_idx = -1
text_col_idx = -1
target_col_idx = -1
std_col_idx = -1
line_idx = 1
data = dict()
set_of_texts = set()
t = Wordnet(v=True, n=True, p=0.5)
with codecs.open(fname, mode="r", encoding="utf-8") as fp:
data_reader = csv.reader(fp, quotechar='"', delimiter=",")
for row in data_reader:
if len(row) > 0:
err_msg = f"File {fname}: line {line_idx} is wrong!"
if len(loaded_header) == 0:
loaded_header = copy.copy(row)
try:
text_col_idx = loaded_header.index("excerpt")
except:
text_col_idx = -1
if text_col_idx <= 0:
raise ValueError(err_msg + ' Field "excerpt" is not found!')
try:
id_col_idx = loaded_header.index("id")
except:
id_col_idx = -1
if id_col_idx < 0:
raise ValueError(err_msg + ' Field "id" is not found!')
try:
target_col_idx = loaded_header.index("target")
except:
target_col_idx = -1
if target_col_idx < 0:
raise ValueError(err_msg + ' Field "target" is not found!')
try:
std_col_idx = loaded_header.index("standard_error")
except:
std_col_idx = -1
if std_col_idx < 0:
err_msg2 = f'{err_msg} Field "standard_error" is not found!'
raise ValueError(err_msg2)
else:
sample_id = row[id_col_idx]
if sample_id != sample_id.strip():
raise ValueError(err_msg + f" {sample_id} is wrong sample ID!")
if sample_id in data:
err_msg2 = f"{err_msg} {sample_id} is not unique sample ID!"
raise ValueError(err_msg2)
text = row[text_col_idx].replace("\r", "\n")
if len(text) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
sentences = []
for paragraph in map(lambda it: it.strip(), text.split("\n")):
if len(paragraph) > 0:
sentences += nltk.sent_tokenize(paragraph)
if len(sentences) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
text = " ".join([cur_sent.lower() for cur_sent in sentences])
if text.lower() in set_of_texts:
raise ValueError(err_msg + f" Text {sample_id} is not unique!")
set_of_texts.add(text.lower())
try:
target_val = float(row[target_col_idx])
ok = True
except:
target_val = 0.0
ok = False
if not ok:
err_msg2 = err_msg
err_msg2 += (
f" {row[target_col_idx]} is wrong target for "
f"text {sample_id}."
)
raise ValueError(err_msg2)
try:
std_val = float(row[std_col_idx])
ok = std_val > 0.0
except:
std_val = 0.0
ok = False
if not ok:
err_msg2 = err_msg
err_msg2 += (
f" {row[std_col_idx]} is wrong standard error"
f" for text {sample_id}."
)
warnings.warn(err_msg2)
else:
augmented_texts = set()
for _ in range(5):
new_augmented_text = []
for cur_sent in sentences:
new_sent = t.augment(cur_sent.lower()).strip().lower()
if len(new_sent) > 0:
new_augmented_text.append(new_sent)
assert len(new_augmented_text) > 0
random.shuffle(new_augmented_text)
new_augmented_text = " ".join(new_augmented_text)
if (len(new_augmented_text) > 0) and (
new_augmented_text.lower() not in set_of_texts
):
set_of_texts.add(new_augmented_text)
augmented_texts.add(new_augmented_text)
del new_augmented_text
if text in augmented_texts:
added_texts = [text] + list(augmented_texts - {text})
else:
added_texts = [text] + list(augmented_texts)
data[sample_id] = (added_texts, target_val, std_val)
line_idx += 1
return data
def load_data_for_testing(fname: str, batch_size: int):
loaded_header = []
id_col_idx = -1
text_col_idx = -1
target_col_idx = -1
std_col_idx = -1
line_idx = 1
data = dict()
with codecs.open(fname, mode="r", encoding="utf-8") as fp:
data_reader = csv.reader(fp, quotechar='"', delimiter=",")
for row in data_reader:
if len(row) > 0:
err_msg = f"File {fname}: line {line_idx} is wrong!"
if len(loaded_header) == 0:
loaded_header = copy.copy(row)
try:
text_col_idx = loaded_header.index("excerpt")
except:
text_col_idx = -1
if text_col_idx <= 0:
raise ValueError(err_msg + ' Field "excerpt" is not found!')
try:
id_col_idx = loaded_header.index("id")
except:
id_col_idx = -1
if id_col_idx < 0:
raise ValueError(err_msg + ' Field "id" is not found!')
else:
sample_id = row[id_col_idx]
if sample_id != sample_id.strip():
raise ValueError(err_msg + f" {sample_id} is wrong sample ID!")
if sample_id in data:
err_msg2 = f"{err_msg} {sample_id} is not unique sample ID!"
raise ValueError(err_msg2)
text = row[text_col_idx].replace("\n", " ").replace("\r", " ")
text = " ".join(text.split()).strip()
if len(text) == 0:
raise ValueError(err_msg + f" Text {sample_id} is empty!")
data[sample_id] = text
if len(data) >= batch_size:
yield data
del data
data = dict()
line_idx += 1
if len(data) > 0:
yield data
def train_output_scaler(
data: Dict[str, Tuple[List[int], float, float]]
) -> StandardScaler:
outputs_for_training = np.empty((len(data), 1), dtype=np.float64)
for idx, sample_id in enumerate(list(data.keys())):
outputs_for_training[idx, 0] = data[sample_id][1]
return StandardScaler().fit(outputs_for_training)
def tokenize_data(
data: Union[List[Dict[str, str]], List[Dict[str, Tuple[List[str], float, float]]]],
tokenizer: AutoTokenizer,
max_seq_len: int,
) -> Tuple[
Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]], np.ndarray
]:
tokenized_data = dict()
all_tokens_matrix = []
for sample_idx, cur_ID in enumerate(sorted(list(data.keys()))):
if isinstance(data[cur_ID], str):
tokens = tokenizer.tokenize(data[cur_ID])
tokenized_data[cur_ID] = len(all_tokens_matrix)
token_ids = tokenizer.convert_tokens_to_ids(
[tokenizer.cls_token] + tokens + [tokenizer.sep_token]
)
ndiff = max_seq_len - len(token_ids)
if ndiff > 0:
token_ids += [tokenizer.pad_token_id for _ in range(ndiff)]
elif ndiff < 0:
token_ids = token_ids[:max_seq_len]
all_tokens_matrix.append(token_ids)
else:
text_idx_list = []
for cur_text in data[cur_ID][0]:
tokens = tokenizer.tokenize(cur_text)
token_ids = tokenizer.convert_tokens_to_ids(
[tokenizer.cls_token] + tokens + [tokenizer.sep_token]
)
ndiff = max_seq_len - len(token_ids)
if ndiff > 0:
token_ids += [tokenizer.pad_token_id for _ in range(ndiff)]
elif ndiff < 0:
token_ids = token_ids[:max_seq_len]
text_idx_list.append(len(all_tokens_matrix))
all_tokens_matrix.append(token_ids)
tokenized_data[cur_ID] = (text_idx_list, data[cur_ID][1], data[cur_ID][2])
return tokenized_data, np.array(all_tokens_matrix, dtype=np.int32)
def print_info_about_data(
data: Union[List[Dict[str, str]], List[Dict[str, Tuple[List[str], float, float]]]],
identifiers: List[str],
):
for_training = isinstance(data[identifiers[0]], tuple)
if for_training:
print(f"Number of samples for training is {len(data)}.")
else:
print(f"Number of samples for submission is {len(data)}.")
print("")
print(f"{len(identifiers)} random samples:")
for cur_id in identifiers:
print("")
print(f" Sample {cur_id}")
if for_training:
print(" Text:")
print(f" {data[cur_id][0][0]}")
print(f" Number of augmented texts is {len(data[cur_id][0]) - 1}.")
if (len(data[cur_id][0]) - 1) > 0:
if (len(data[cur_id][0]) - 1) > 1:
print(" 2 augmented texts:")
for augmented in data[cur_id][0][1:3]:
print(f" {augmented}")
else:
print(" Augmented text:")
for augmented in data[cur_id][0][1:2]:
print(f" {augmented}")
print(" Target:")
print(f" {data[cur_id][1]} +- {data[cur_id][2]}")
else:
print(f" {data[cur_id]}")
def print_info_about_tokenized_data(
data: Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]],
matrix: np.ndarray,
identifiers: List[str],
):
for_training = isinstance(data[identifiers[0]], tuple)
if for_training:
print(f"Number of tokenized samples for training is {len(data)}.")
else:
print(f"Number of tokenized samples for submission is {len(data)}.")
print("")
print(f"{len(identifiers)} random samples:")
for cur_id in identifiers:
print("")
print(f"Sample {cur_id}")
if for_training:
sample_idx = data[cur_id][0][0]
else:
sample_idx = data[cur_id]
print(matrix[sample_idx].tolist())
def find_median_distance_between_pairs(
data: Dict[str, Tuple[List[int], float, float]], identifiers: List[str]
) -> float:
distances = []
assert len(identifiers) == len(set(identifiers))
for idx, first_id in enumerate(identifiers):
first_target = data[first_id][1]
for second_id in identifiers[(idx + 1) :]:
second_target = data[second_id][1]
distances.append(abs(first_target - second_target))
distances.sort()
distances = np.array(distances, dtype=np.float32)
n = distances.shape[0]
print("Mean distance between training pairs is {0:.5f}.".format(np.mean(distances)))
print(
"Minimal distance between training pairs is {0:.5f}.".format(np.min(distances))
)
print(
"Maximal distance between training pairs is {0:.5f}.".format(np.max(distances))
)
print(
"Median distance between training pairs is {0:.5f}.".format(
distances[(n - 1) // 2]
)
)
return distances[(n - 1) // 2]
def tf_euclidean_distance(vects):
x, y = vects
sum_square = tf.keras.backend.sum(
tf.keras.backend.square(x - y), axis=1, keepdims=True
)
return tf.keras.backend.sqrt(
tf.keras.backend.maximum(sum_square, tf.keras.backend.epsilon())
)
def tf_eucl_dist_output_shape(shapes):
shape1, shape2 = shapes
return (shape1[0], 1)
def build_neural_network(
bert_name: str, max_seq_len: int, batch_size: int
) -> Tuple[tf.keras.Model, tf.keras.Model, tf.keras.Model]:
transformer_model = TFAutoModel.from_pretrained(
pretrained_model_name_or_path=bert_name, name="DistilTransformer"
)
transformer_config = AutoConfig.from_pretrained(bert_name)
print("Transformer Configuration")
print("=========================")
print(transformer_config)
left_tokens = tf.keras.layers.Input(
shape=(max_seq_len,), batch_size=batch_size, dtype=tf.int32, name="word_ids"
)
left_attention = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="attention_mask",
)
right_tokens = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="right_word_ids",
)
right_attention = tf.keras.layers.Input(
shape=(max_seq_len,),
batch_size=batch_size,
dtype=tf.int32,
name="right_attention_mask",
)
left_sequence_output = transformer_model([left_tokens, left_attention])[0]
left_output_mask = MaskCalculator(
output_dim=transformer_config.hidden_size,
trainable=False,
name="OutMaskCalculator",
)(left_attention)
left_masked_output = tf.keras.layers.Multiply(name="OutMaskMultiplicator")(
[left_output_mask, left_sequence_output]
)
left_masked_output = tf.keras.layers.Masking(name="OutMasking")(left_masked_output)
left_output = tf.keras.layers.GlobalAvgPool1D(name="AvePool")(left_masked_output)
left_output = tf.keras.layers.Lambda(
function=lambda x: tf.math.l2_normalize(x, axis=1), name="Emdedding"
)(left_output)
right_sequence_output = transformer_model([right_tokens, right_attention])[0]
right_output_mask = MaskCalculator(
output_dim=transformer_config.hidden_size,
trainable=False,
name="OutMaskCalculator_right",
)(right_attention)
right_masked_output = tf.keras.layers.Multiply(name="OutMaskMultiplicator_right")(
[right_output_mask, right_sequence_output]
)
right_masked_output = tf.keras.layers.Masking(name="OutMasking_right")(
right_masked_output
)
right_output = tf.keras.layers.GlobalAvgPool1D(name="AvePool_right")(
right_masked_output
)
right_output = tf.keras.layers.Lambda(
lambda x: tf.math.l2_normalize(x, axis=1), name="Emdedding_right"
)(right_output)
distance_output = tf.keras.layers.Lambda(
function=tf_euclidean_distance,
output_shape=tf_eucl_dist_output_shape,
name="L2DistLayer",
)([left_output, right_output])
regression_layer = tf.keras.layers.Dense(
units=1,
input_dim=transformer_config.hidden_size,
activation=None,
kernel_initializer=tf.keras.initializers.GlorotNormal(seed=42),
bias_initializer="zeros",
kernel_regularizer=tf.keras.regularizers.l1_l2(l1=1e-4, l2=1e-3),
name="RegressionLayer",
)
left_regression_output = regression_layer(left_output)
right_regression_output = regression_layer(right_output)
regression_model = tf.keras.Model(
inputs=[left_tokens, left_attention],
outputs=left_regression_output,
name="RegressionModel",
)
regression_model.build(
input_shape=[(batch_size, max_seq_len), (batch_size, max_seq_len)]
)
feature_extraction_model = tf.keras.Model(
inputs=[left_tokens, left_attention],
outputs=left_output,
name="FeatureExtractionModel",
)
feature_extraction_model.build(
input_shape=[(batch_size, max_seq_len), (batch_size, max_seq_len)]
)
siamese_model = tf.keras.Model(
inputs=[left_tokens, left_attention, right_tokens, right_attention],
outputs=[distance_output, left_regression_output, right_regression_output],
name="SiameseModel",
)
radam = tfa.optimizers.RectifiedAdam(learning_rate=1e-5)
ranger = tfa.optimizers.Lookahead(radam, sync_period=6, slow_step_size=0.5)
losses = [
DBLLogLoss(),
tf.keras.losses.MeanSquaredError(),
tf.keras.losses.MeanSquaredError(),
]
loss_weights = [0.7, 1.0, 1.0]
siamese_model.compile(optimizer=ranger, loss=losses, loss_weights=loss_weights)
return siamese_model, regression_model, feature_extraction_model
def show_minibatch(X: List[np.ndarray], y: List[np.ndarray]):
assert len(X) == 4
assert len(y) == 3
print("")
print("X1")
for it in X[0].tolist():
print(it)
print("")
print("X2")
for it in X[1].tolist():
print(it)
print("")
print("X3")
for it in X[2].tolist():
print(it)
print("")
print("X4")
for it in X[3].tolist():
print(it)
print("")
print("y1")
for it in y[0].tolist():
print(it)
print("")
print("y2")
for it in y[1].tolist():
print(it)
print("")
print("y3")
for it in y[2].tolist():
print(it)
def show_tsne(
fe: tf.keras.Model,
batch_size: int,
data: Dict[str, Tuple[List[int], float, float]],
token_matrix: np.ndarray,
identifiers: List[str],
pad_id: int,
title: str,
figure_id: int,
):
indices = list(map(lambda it: data[it][0][0], identifiers))
colors = np.array(list(map(lambda it: data[it][1], identifiers)), dtype=np.float64)
area = np.array(list(map(lambda it: data[it][2], identifiers)), dtype=np.float64)
area /= np.max(area)
area *= 10.0
area = np.power(area, 2)
texts = token_matrix[indices]
ndiff = texts.shape[0] % batch_size
if ndiff > 0:
last_text_idx = texts.shape[0] - 1
texts = np.vstack(
[texts]
+ [
texts[last_text_idx : (last_text_idx + 1)]
for _ in range(batch_size - ndiff)
]
)
attentions = generate_attention_mask(texts, pad_id)
assert texts.shape[0] % batch_size == 0, f"{texts.shape[0] % batch_size}"
features = fe.predict([texts, attentions], batch_size=batch_size)
features = features[: len(indices)]
projected_features = TSNE(n_components=2, n_jobs=-1).fit_transform(features)
fig = plt.figure(figure_id, figsize=(11, 11))
plt.scatter(
x=projected_features[:, 0],
y=projected_features[:, 1],
marker="o",
cmap=plt.cm.get_cmap("jet"),
s=area,
c=colors,
norm=Normalize(vmin=np.min(colors), vmax=np.max(colors)),
)
plt.title("t-SNE projections of texts " + title)
plt.colorbar()
plt.show()
def show_training_process(
history: tf.keras.callbacks.History, metric_name: str, figure_id: int
):
val_metric_name = "val_" + metric_name
possible_metrics = list(history.history.keys())
if metric_name not in history.history:
err_msg = f'The metric "{metric_name}" is not found!'
err_msg += f" Available metrics are: {possible_metrics}."
raise ValueError(err_msg)
fig = plt.figure(figure_id, figsize=(7, 7))
metric_values = history.history[metric_name]
plt.plot(
list(range(len(metric_values))),
metric_values,
label="Training {0}".format(metric_name),
)
if val_metric_name in history.history:
val_metric_values = history.history["val_" + metric_name]
assert len(metric_values) == len(val_metric_values)
plt.plot(
list(range(len(val_metric_values))),
val_metric_values,
label="Validation {0}".format(metric_name),
)
plt.xlabel("Epochs")
plt.ylabel(metric_name)
plt.title("Training process")
plt.legend(loc="best")
plt.show()
def do_predictions(
regressor: tf.keras.Model,
output_scaler: StandardScaler,
batch_size: int,
data: Union[Dict[str, int], Dict[str, Tuple[List[int], float, float]]],
token_matrix: np.ndarray,
pad_id: int,
identifiers: Union[List[str], None] = None,
) -> Dict[str, float]:
if identifiers is None:
identifiers_ = sorted(list(data.keys()))
else:
identifiers_ = sorted(identifiers)
indices = list(
map(
lambda it: data[it] if isinstance(data[it], int) else data[it][0][0],
identifiers_,
)
)
texts = token_matrix[indices]
ndiff = texts.shape[0] % batch_size
if ndiff > 0:
last_text_idx = texts.shape[0] - 1
texts = np.vstack(
[texts]
+ [
texts[last_text_idx : (last_text_idx + 1)]
for _ in range(batch_size - ndiff)
]
)
attentions = generate_attention_mask(texts, pad_id)
assert texts.shape[0] % batch_size == 0, f"{texts.shape[0] % batch_size}"
predictions = regressor.predict([texts, attentions], batch_size=batch_size)
predictions = np.reshape(predictions, newshape=(predictions.shape[0], 1))
predictions = output_scaler.inverse_transform(predictions)
return dict(
map(lambda idx: (identifiers_[idx], predictions[idx, 0]), range(len(indices)))
)
random.seed(42)
np.random.seed(42)
tf.random.set_seed(42)
MAX_TEXT_LEN = 128
PRETRAINED_BERT = "/kaggle/input/tfdistilbertbaseuncased"
MINIBATCH_SIZE = 32
DATA_DIR = "/kaggle/input/commonlitreadabilityprize"
MODEL_DIR = "/kaggle/working"
print(f"{DATA_DIR} {os.path.isdir(DATA_DIR)}")
print(f"{MODEL_DIR} {os.path.isdir(MODEL_DIR)}")
trainset_name = os.path.join(DATA_DIR, "train.csv")
print(f"{trainset_name} {os.path.isfile(trainset_name)}")
testset_name = os.path.join(DATA_DIR, "test.csv")
print(f"{testset_name} {os.path.isfile(testset_name)}")
submission_name = os.path.join(MODEL_DIR, "submission.csv")
print(f"{submission_name} {os.path.isfile(submission_name)}")
siamese_model_name = os.path.join(MODEL_DIR, "siamese_nn.h5")
regression_model_name = os.path.join(MODEL_DIR, "regression_nn.h5")
scaler_name = os.path.join(MODEL_DIR, "output_scaler.pkl")
figure_identifier = 1
data_for_training = load_data_for_training(trainset_name)
assert len(data_for_training) > 100
all_IDs = sorted(list(data_for_training.keys()))
selected_IDs_for_training = random.sample(population=all_IDs, k=3)
print_info_about_data(data_for_training, selected_IDs_for_training)
pretrained_tokenizer = AutoTokenizer.from_pretrained(PRETRAINED_BERT)
print(f"Vocabulary size is {pretrained_tokenizer.vocab_size}.")
labels_for_training, tokens_for_training = tokenize_data(
data=data_for_training, tokenizer=pretrained_tokenizer, max_seq_len=MAX_TEXT_LEN
)
print_info_about_tokenized_data(
data=labels_for_training,
matrix=tokens_for_training,
identifiers=selected_IDs_for_training,
)
label_scaler = train_output_scaler(labels_for_training)
with open(scaler_name, "wb") as scaler_fp:
pickle.dump(label_scaler, scaler_fp)
random.shuffle(all_IDs)
n_train_size = int(round(len(all_IDs) * 0.85))
n_val_size = int(round(len(all_IDs) * 0.04))
IDs_for_training = all_IDs[:n_train_size]
IDs_for_validation = all_IDs[n_train_size : (n_train_size + n_val_size)]
IDs_for_final_testing = all_IDs[(n_train_size + n_val_size) :]
median_dist = find_median_distance_between_pairs(
data=labels_for_training, identifiers=IDs_for_training
)
datagen_for_validation = DatasetGen(
data=labels_for_training,
data_IDs=IDs_for_validation,
token_indices=tokens_for_training,
median_distance_between_pairs=median_dist,
pad_token_id=pretrained_tokenizer.pad_token_id,
batch_size=MINIBATCH_SIZE,
output_scaler=label_scaler,
)
n_batches_per_validset = len(datagen_for_validation)
print(f"Mini-batches per validation set is {n_batches_per_validset}.")
X_, y_, _ = datagen_for_validation[0]
show_minibatch(X_, y_)
n_batches_per_epoch = n_batches_per_validset * 2
datagen_for_training = DatasetGen(
data=labels_for_training,
data_IDs=IDs_for_training,
token_indices=tokens_for_training,
median_distance_between_pairs=median_dist,
pad_token_id=pretrained_tokenizer.pad_token_id,
batch_size=MINIBATCH_SIZE,
batches_per_epoch=n_batches_per_epoch,
output_scaler=label_scaler,
)
X_, y_, _ = datagen_for_training[0]
show_minibatch(X_, y_)
model_for_training, model_for_inference, model_for_fe = build_neural_network(
bert_name=PRETRAINED_BERT, max_seq_len=MAX_TEXT_LEN, batch_size=MINIBATCH_SIZE
)
model_for_training.summary()
model_for_inference.summary()
model_for_fe.summary()
show_tsne(
fe=model_for_fe,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
identifiers=IDs_for_validation + IDs_for_final_testing,
pad_id=pretrained_tokenizer.pad_token_id,
title="before training",
figure_id=figure_identifier,
)
figure_identifier += 1
predictions_for_testing = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
pad_id=pretrained_tokenizer.pad_token_id,
identifiers=IDs_for_final_testing,
)
error = 0.0
for cur_id in IDs_for_final_testing:
difference = predictions_for_testing[cur_id] - labels_for_training[cur_id][1]
error += difference * difference
error /= float(len(IDs_for_final_testing))
error = np.sqrt(error)
print(f"RMSE before training = {error}")
del predictions_for_testing, error
callbacks = [
tf.keras.callbacks.ModelCheckpoint(
filepath=siamese_model_name,
save_best_only=True,
monitor="val_loss",
save_weights_only=True,
verbose=True,
),
tf.keras.callbacks.EarlyStopping(
monitor="val_loss", patience=7, verbose=True, restore_best_weights=True
),
tfa.callbacks.TimeStopping(seconds=int(round(3600 * 2.6)), verbose=True),
]
history = model_for_training.fit(
datagen_for_training,
validation_data=datagen_for_validation,
epochs=1000,
callbacks=callbacks,
)
model_for_training.load_weights(siamese_model_name)
model_for_inference.save_weights(regression_model_name)
os.remove(siamese_model_name)
show_training_process(history, "loss", figure_identifier)
figure_identifier += 1
show_tsne(
fe=model_for_fe,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
identifiers=IDs_for_validation + IDs_for_final_testing,
pad_id=pretrained_tokenizer.pad_token_id,
title="after training",
figure_id=figure_identifier,
)
figure_identifier += 1
predictions_for_testing = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_training,
token_matrix=tokens_for_training,
pad_id=pretrained_tokenizer.pad_token_id,
identifiers=IDs_for_final_testing,
)
error = 0.0
for cur_id in IDs_for_final_testing:
difference = predictions_for_testing[cur_id] - labels_for_training[cur_id][1]
error += difference * difference
error /= float(len(IDs_for_final_testing))
error = np.sqrt(error)
print(f"RMSE after training = {error}")
del datagen_for_training, datagen_for_validation
del labels_for_training, tokens_for_training
del data_for_training
del IDs_for_training, IDs_for_validation, IDs_for_final_testing
del model_for_training
gc.collect()
with codecs.open(submission_name, mode="w", encoding="utf-8") as fp:
data_writer = csv.writer(fp, quotechar='"', delimiter=",")
data_writer.writerow(["id", "target"])
for data_part in load_data_for_testing(testset_name, MINIBATCH_SIZE * 8):
labels_for_submission, tokens_for_submission = tokenize_data(
data=data_part, tokenizer=pretrained_tokenizer, max_seq_len=MAX_TEXT_LEN
)
del data_part
predictions_for_submission = do_predictions(
regressor=model_for_inference,
output_scaler=label_scaler,
batch_size=MINIBATCH_SIZE,
data=labels_for_submission,
token_matrix=tokens_for_submission,
pad_id=pretrained_tokenizer.pad_token_id,
)
for cur_id in predictions_for_submission:
data_writer.writerow([cur_id, f"{predictions_for_submission[cur_id]}"])
del predictions_for_submission
del labels_for_submission, tokens_for_submission
gc.collect()
| false | 0 | 12,260 | 0 | 12,291 | 12,260 |
||
69393950
|
<jupyter_start><jupyter_text>Cotton Disease Dataset
Kaggle dataset identifier: cotton-disease-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import matplotlib.pyplot as plt
train_data_path = "../input/cotton-disease-dataset/Cotton Disease/train"
test_data_path = "../input/cotton-disease-dataset/Cotton Disease/test"
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20, 20))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
training_datagen = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest",
)
training_data = training_datagen.flow_from_directory(
train_data_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_data = test_datagen.flow_from_directory(
test_data_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
images = [training_data[0][0][0] for i in range(5)]
plotImages(images)
model = keras.models.Sequential(
[
keras.layers.Conv2D(filters=32, kernel_size=3, input_shape=[150, 150, 3]),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=64, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=128, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=256, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.5),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation="relu"),
keras.layers.Dropout(0.1),
keras.layers.Dense(units=256, activation="relu"),
keras.layers.Dropout(0.25),
keras.layers.Dense(units=4, activation="softmax"),
]
)
model.compile(
optimizer=Adam(learning_rate=0.0001),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
history = model.fit(
training_data,
epochs=500,
verbose=1,
validation_data=test_data,
)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
from tensorflow.keras.preprocessing import image
import numpy as np
IMAGE_SIZE = (150, 150)
def predict_it(image_location):
test_image = image.load_img(image_location, target_size=IMAGE_SIZE)
plt.imshow(test_image)
test_image = image.img_to_array(test_image)
test_image = test_image / 255
test_image = np.expand_dims(test_image, axis=0)
preds = np.argmax(model.predict(test_image))
if preds == 0:
print("The leaf is diseased cotton leaf")
elif preds == 1:
print("The leaf is diseased cotton plant")
elif preds == 2:
print("The leaf is fresh cotton leaf")
else:
print("The leaf is fresh cotton plant")
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/diseased cotton leaf/dis_leaf (100)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/diseased cotton plant/dd (10)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/fresh cotton leaf/d (106)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/fresh cotton plant/dsd (141)_iaip.jpg"
)
validation_path = "../input/cotton-disease-dataset/Cotton Disease/val"
valid_data = test_datagen.flow_from_directory(
validation_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
predicted = model.predict(valid_data[0][0])
actual = []
for i in valid_data[0][1]:
actual.append(np.argmax(i))
predict = []
for i in predicted:
predict.append(np.argmax(i))
from sklearn.metrics import accuracy_score
accuracy_score(actual, predict)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393950.ipynb
|
cotton-disease-dataset
|
janmejaybhoi
|
[{"Id": 69393950, "ScriptId": 18941687, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7117667, "CreationDate": "07/30/2021 12:48:09", "VersionNumber": 2.0, "Title": "Cotton Plant Disease Prediction using CNN", "EvaluationDate": "07/30/2021", "IsChange": false, "TotalLines": 146.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 146.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92504766, "KernelVersionId": 69393950, "SourceDatasetVersionId": 1512654}]
|
[{"Id": 1512654, "DatasetId": 891340, "DatasourceVersionId": 1546899, "CreatorUserId": 4077335, "LicenseName": "CC0: Public Domain", "CreationDate": "09/24/2020 14:17:11", "VersionNumber": 1.0, "Title": "Cotton Disease Dataset", "Slug": "cotton-disease-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 891340, "CreatorUserId": 4077335, "OwnerUserId": 4077335.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1512654.0, "CurrentDatasourceVersionId": 1546899.0, "ForumId": 906906, "Type": 2, "CreationDate": "09/24/2020 14:17:11", "LastActivityDate": "09/24/2020", "TotalViews": 36020, "TotalDownloads": 3604, "TotalVotes": 58, "TotalKernels": 42}]
|
[{"Id": 4077335, "UserName": "janmejaybhoi", "DisplayName": "D3v", "RegisterDate": "11/21/2019", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import Adam
import matplotlib.pyplot as plt
train_data_path = "../input/cotton-disease-dataset/Cotton Disease/train"
test_data_path = "../input/cotton-disease-dataset/Cotton Disease/test"
def plotImages(images_arr):
fig, axes = plt.subplots(1, 5, figsize=(20, 20))
axes = axes.flatten()
for img, ax in zip(images_arr, axes):
ax.imshow(img)
plt.tight_layout()
plt.show()
training_datagen = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode="nearest",
)
training_data = training_datagen.flow_from_directory(
train_data_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_data = test_datagen.flow_from_directory(
test_data_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
images = [training_data[0][0][0] for i in range(5)]
plotImages(images)
model = keras.models.Sequential(
[
keras.layers.Conv2D(filters=32, kernel_size=3, input_shape=[150, 150, 3]),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=64, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=128, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Conv2D(filters=256, kernel_size=3),
keras.layers.MaxPooling2D(pool_size=(2, 2)),
keras.layers.Dropout(0.5),
keras.layers.Flatten(),
keras.layers.Dense(units=128, activation="relu"),
keras.layers.Dropout(0.1),
keras.layers.Dense(units=256, activation="relu"),
keras.layers.Dropout(0.25),
keras.layers.Dense(units=4, activation="softmax"),
]
)
model.compile(
optimizer=Adam(learning_rate=0.0001),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
history = model.fit(
training_data,
epochs=500,
verbose=1,
validation_data=test_data,
)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.show()
from tensorflow.keras.preprocessing import image
import numpy as np
IMAGE_SIZE = (150, 150)
def predict_it(image_location):
test_image = image.load_img(image_location, target_size=IMAGE_SIZE)
plt.imshow(test_image)
test_image = image.img_to_array(test_image)
test_image = test_image / 255
test_image = np.expand_dims(test_image, axis=0)
preds = np.argmax(model.predict(test_image))
if preds == 0:
print("The leaf is diseased cotton leaf")
elif preds == 1:
print("The leaf is diseased cotton plant")
elif preds == 2:
print("The leaf is fresh cotton leaf")
else:
print("The leaf is fresh cotton plant")
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/diseased cotton leaf/dis_leaf (100)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/diseased cotton plant/dd (10)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/fresh cotton leaf/d (106)_iaip.jpg"
)
predict_it(
"../input/cotton-disease-dataset/Cotton Disease/val/fresh cotton plant/dsd (141)_iaip.jpg"
)
validation_path = "../input/cotton-disease-dataset/Cotton Disease/val"
valid_data = test_datagen.flow_from_directory(
validation_path, target_size=(150, 150), batch_size=32, class_mode="categorical"
)
predicted = model.predict(valid_data[0][0])
actual = []
for i in valid_data[0][1]:
actual.append(np.argmax(i))
predict = []
for i in predicted:
predict.append(np.argmax(i))
from sklearn.metrics import accuracy_score
accuracy_score(actual, predict)
| false | 0 | 1,606 | 0 | 1,634 | 1,606 |
||
69393025
|
<jupyter_start><jupyter_text>GTSRB - German Traffic Sign Recognition Benchmark
### Context
The German Traffic Sign Benchmark is a multi-class, single-image classification challenge held at the International Joint Conference on Neural Networks (IJCNN) 2011. We cordially invite researchers from relevant fields to participate: The competition is designed to allow for participation without special domain knowledge. Our benchmark has the following properties:
- Single-image, multi-class classification problem
- More than 40 classes
- More than 50,000 images in total
- Large, lifelike database
Kaggle dataset identifier: gtsrb-german-traffic-sign
<jupyter_script># Prepare all our necessary libraries
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
# pytorch libraries
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import (
ToTensor,
Pad,
Compose,
CenterCrop,
ToPILImage,
Normalize,
ConvertImageDtype,
Resize,
)
from torchvision.models import resnet50
from torch import nn
from torch.nn import init, Linear, ReLU, Softmax
from torch.nn.init import xavier_uniform_
from torch.optim import SGD, Adam
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from seaborn import heatmap
import datetime
# libs für AE
from cleverhans.torch.attacks.projected_gradient_descent import (
projected_gradient_descent,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("You are using Device: ", device)
# Prepare our Dataset Structure, as it has to be normalized for pytorch
from torchvision.io import read_image
class GTSRB(Dataset):
def __init__(
self, annotations_file, img_dir, transform=None, target_transform=None
):
self.img_labels = pd.read_csv(annotations_file)[["Path", "ClassId"]]
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
label = self.img_labels.iloc[idx, 1]
image = read_image(img_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
img_dir = "/kaggle/input/gtsrb-german-traffic-sign/"
test_file = "/kaggle/input/gtsrb-german-traffic-sign/Test.csv"
test_data = GTSRB(
img_dir=img_dir,
annotations_file=test_file,
transform=Compose([Resize((30, 30)), ConvertImageDtype(torch.float32)]),
)
from torch.utils.data import DataLoader
test_dataloader = DataLoader(test_data)
# Display image and label.
test_features, test_labels = next(iter(test_dataloader))
print(f"Feature batch shape: {test_features.size()}")
print(f"Labels batch shape: {test_labels.size()}")
img = test_features[0]
label = test_labels[0]
img = ToPILImage()(img).convert("RGB")
plt.imshow(img)
plt.show()
print(f"Label: {label}")
# downloading resent50 pretrained on ImageNet
# adjust resnet50 to my dataset
class r50(nn.Module):
def __init__(self, pretrained_model):
super(r50, self).__init__()
self.rn50 = pretrained_model
self.fl1 = nn.Linear(1000, 256)
self.fl2 = nn.Linear(256, 43)
def forward(self, X):
X = self.rn50(X)
X = F.relu(self.fl1(X))
X = F.dropout(X, p=0.25)
X = self.fl2(X)
return X
class conv_net(nn.Module):
def __init__(self):
super(conv_net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (5, 5))
self.conv2 = nn.Conv2d(32, 64, (3, 3))
self.pool = nn.MaxPool2d((2, 2))
self.dropout1 = nn.Dropout(p=0.25)
self.conv3 = nn.Conv2d(64, 3, (3, 3))
self.linear1 = Linear(75, 256)
self.dropout2 = nn.Dropout(p=0.5)
self.linear2 = Linear(256, 43)
def forward(self, X):
X = F.relu(self.conv1(X))
X = self.pool(F.relu(self.conv2(X)))
X = self.dropout1(X)
X = self.pool(F.relu(self.conv3(X)))
X = self.dropout1(X)
X = torch.flatten(X, 1)
X = F.relu(self.linear1(X))
X = self.dropout2(X)
X = self.linear2(X)
return X
# SELECT YOUR MODEL HERE
alternative_model = True
if alternative_model:
model = conv_net()
PATH = "/kaggle/input/alternative-gtsrb/alt_gtsrb.pth"
else:
resn50 = resnet50(pretrained=True, progress=True)
model = r50(resn50)
PATH = "/kaggle/input/gtsrb/gtsrbX.pth"
model.load_state_dict(torch.load(PATH, map_location=torch.device("cpu")))
model.eval()
# put on cuda if possible
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("You are using Device: ", device)
model.to(device)
test_features = test_features.to(device)
# Export the model for translation purposes
batch_size = 1
torch.onnx.export(
model, # model being run
test_features, # model input (or a tuple for multiple inputs)
"gtsrb.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
dynamic_axes={
"input": {0: "batch_size"}, # variable length axes
"output": {0: "batch_size"},
},
)
torch.save(test_data, "/kaggle/working/test_data.pt")
torch.save(test_data, "kaggle/working/test_data.h5")
torch.save(test_data, "kaggle/working/test_data.csv")
# AE Generator
# look at some AEs
ae_generator = (
(
projected_gradient_descent(
model.to(device),
image.to(device),
0.05,
0.0075,
10,
np.inf,
clip_min=0,
clip_max=1,
)
.detach()
.to(device),
label,
)
for image, label in test_dataloader
)
for i in range(3):
# print(next(ae_generator))
ae = next(ae_generator)
image = ToPILImage()(ae[0][0]).convert("RGB")
plt.imshow(image)
plt.show()
print(ae[1])
def heatmapping(cm):
precision = []
recall = []
class_count = []
for i in range(43):
precision.append(round(cm[i][i] / cm[i].sum(), 2))
recall.append(round(cm[i][i] / cm[i].sum()))
class_count.append(cm[i].sum())
metrics = pd.DataFrame(
{"precision": precision, "recall": recall, "class_count": class_count}
)
plt.subplots(figsize=(20, 15))
heatmap(cm)
return metrics
cm = np.zeros((43, 43))
predictions = []
truth = []
ae_count = len(test_dataloader) - 10
epsilon = [0.2, 0.1, 0.0925, 0.085, 0.075, 0.065, 0.05, 0.025]
metrics = []
for eps in epsilon:
correct = 0
ae_generator = (
(
projected_gradient_descent(
model.to(device),
image.to(device),
eps,
eps / 10,
15,
np.inf,
clip_min=0,
clip_max=1,
)
.detach()
.to(device),
label,
)
for image, label in test_dataloader
)
for i in range(ae_count):
ae = next(ae_generator)
image, label = ae[0].to(device), ae[1].to(device)
predicted = F.softmax(model(image), dim=1).argmax()
predictions.append(predicted)
truth.append(label)
correct += predicted == label
cm[int(label)][int(predicted)] += 1
print(
"Accuracy of the network on the {tst_len} adversarial images with perturbation {e}: {acc}".format(
acc=int(correct) / ae_count, tst_len=ae_count, e=eps
)
)
metrics.append(heatmapping(cm))
# check accuracy on test set
correct = 0
total = 0
cm = np.zeros((43, 43))
predictions = []
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in test_dataloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
output = model(images)
predicted = output.argmax()
predictions.append(predicted)
total += 1
correct += predicted == labels
cm[int(labels)][int(predicted)] += 1
print(
"Accuracy of the network on the {tst_len} test images: {acc}".format(
acc=int(correct) / int(total), tst_len=len(test_data)
)
)
f = open("/kaggle/working/metrics.csv", "a")
for df in metrics:
df.to_csv(f, decimal=",")
f.close()
"""precision: correctly predicted class A/predicted class A
recall: number of correctly predicted class A photos out of the number of actual class A
precision = []
recall = []
class_count = []
for i in range(43):
precision.append(round(cm[i][i]/cm[i].sum(),2))
recall.append(round(cm[i][i]/cm[i].sum()))
class_count.append(cm[i].sum())
metrics = pd.DataFrame({"precision" : precision, "recall" : recall, "class_count": class_count})"""
"""from seaborn import heatmap
plt.subplots(figsize=(20,15))
heatmap(cm)"""
"""metrics"""
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393025.ipynb
|
gtsrb-german-traffic-sign
|
meowmeowmeowmeowmeow
|
[{"Id": 69393025, "ScriptId": 17630110, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4838292, "CreationDate": "07/30/2021 12:34:56", "VersionNumber": 31.0, "Title": "GTSRB TestPerformance", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 254.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92502497, "KernelVersionId": 69393025, "SourceDatasetVersionId": 191501}]
|
[{"Id": 191501, "DatasetId": 82373, "DatasourceVersionId": 202552, "CreatorUserId": 1996737, "LicenseName": "CC0: Public Domain", "CreationDate": "11/25/2018 18:12:34", "VersionNumber": 1.0, "Title": "GTSRB - German Traffic Sign Recognition Benchmark", "Slug": "gtsrb-german-traffic-sign", "Subtitle": "Multi-class, single-image classification challenge", "Description": "### Context\n\nThe German Traffic Sign Benchmark is a multi-class, single-image classification challenge held at the International Joint Conference on Neural Networks (IJCNN) 2011. We cordially invite researchers from relevant fields to participate: The competition is designed to allow for participation without special domain knowledge. Our benchmark has the following properties:\n\n - Single-image, multi-class classification problem \n - More than 40 classes\n - More than 50,000 images in total \n - Large, lifelike database\n\n### Acknowledgements\n\n\n[INI Benchmark Website][1]\n\n\n\n [1]: http://benchmark.ini.rub.de/", "VersionNotes": "Initial release", "TotalCompressedBytes": 354216226.0, "TotalUncompressedBytes": 352579659.0}]
|
[{"Id": 82373, "CreatorUserId": 1996737, "OwnerUserId": 1996737.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 191501.0, "CurrentDatasourceVersionId": 202552.0, "ForumId": 91803, "Type": 2, "CreationDate": "11/25/2018 18:12:34", "LastActivityDate": "11/25/2018", "TotalViews": 453753, "TotalDownloads": 89254, "TotalVotes": 998, "TotalKernels": 281}]
|
[{"Id": 1996737, "UserName": "meowmeowmeowmeowmeow", "DisplayName": "Mykola", "RegisterDate": "06/15/2018", "PerformanceTier": 2}]
|
# Prepare all our necessary libraries
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
# pytorch libraries
import torch
from torch.utils.data import Dataset
from torchvision import datasets
from torchvision.transforms import (
ToTensor,
Pad,
Compose,
CenterCrop,
ToPILImage,
Normalize,
ConvertImageDtype,
Resize,
)
from torchvision.models import resnet50
from torch import nn
from torch.nn import init, Linear, ReLU, Softmax
from torch.nn.init import xavier_uniform_
from torch.optim import SGD, Adam
import torch.nn.functional as F
from torch.nn.init import xavier_uniform_
from seaborn import heatmap
import datetime
# libs für AE
from cleverhans.torch.attacks.projected_gradient_descent import (
projected_gradient_descent,
)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("You are using Device: ", device)
# Prepare our Dataset Structure, as it has to be normalized for pytorch
from torchvision.io import read_image
class GTSRB(Dataset):
def __init__(
self, annotations_file, img_dir, transform=None, target_transform=None
):
self.img_labels = pd.read_csv(annotations_file)[["Path", "ClassId"]]
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
label = self.img_labels.iloc[idx, 1]
image = read_image(img_path)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
img_dir = "/kaggle/input/gtsrb-german-traffic-sign/"
test_file = "/kaggle/input/gtsrb-german-traffic-sign/Test.csv"
test_data = GTSRB(
img_dir=img_dir,
annotations_file=test_file,
transform=Compose([Resize((30, 30)), ConvertImageDtype(torch.float32)]),
)
from torch.utils.data import DataLoader
test_dataloader = DataLoader(test_data)
# Display image and label.
test_features, test_labels = next(iter(test_dataloader))
print(f"Feature batch shape: {test_features.size()}")
print(f"Labels batch shape: {test_labels.size()}")
img = test_features[0]
label = test_labels[0]
img = ToPILImage()(img).convert("RGB")
plt.imshow(img)
plt.show()
print(f"Label: {label}")
# downloading resent50 pretrained on ImageNet
# adjust resnet50 to my dataset
class r50(nn.Module):
def __init__(self, pretrained_model):
super(r50, self).__init__()
self.rn50 = pretrained_model
self.fl1 = nn.Linear(1000, 256)
self.fl2 = nn.Linear(256, 43)
def forward(self, X):
X = self.rn50(X)
X = F.relu(self.fl1(X))
X = F.dropout(X, p=0.25)
X = self.fl2(X)
return X
class conv_net(nn.Module):
def __init__(self):
super(conv_net, self).__init__()
self.conv1 = nn.Conv2d(3, 32, (5, 5))
self.conv2 = nn.Conv2d(32, 64, (3, 3))
self.pool = nn.MaxPool2d((2, 2))
self.dropout1 = nn.Dropout(p=0.25)
self.conv3 = nn.Conv2d(64, 3, (3, 3))
self.linear1 = Linear(75, 256)
self.dropout2 = nn.Dropout(p=0.5)
self.linear2 = Linear(256, 43)
def forward(self, X):
X = F.relu(self.conv1(X))
X = self.pool(F.relu(self.conv2(X)))
X = self.dropout1(X)
X = self.pool(F.relu(self.conv3(X)))
X = self.dropout1(X)
X = torch.flatten(X, 1)
X = F.relu(self.linear1(X))
X = self.dropout2(X)
X = self.linear2(X)
return X
# SELECT YOUR MODEL HERE
alternative_model = True
if alternative_model:
model = conv_net()
PATH = "/kaggle/input/alternative-gtsrb/alt_gtsrb.pth"
else:
resn50 = resnet50(pretrained=True, progress=True)
model = r50(resn50)
PATH = "/kaggle/input/gtsrb/gtsrbX.pth"
model.load_state_dict(torch.load(PATH, map_location=torch.device("cpu")))
model.eval()
# put on cuda if possible
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print("You are using Device: ", device)
model.to(device)
test_features = test_features.to(device)
# Export the model for translation purposes
batch_size = 1
torch.onnx.export(
model, # model being run
test_features, # model input (or a tuple for multiple inputs)
"gtsrb.onnx", # where to save the model (can be a file or file-like object)
export_params=True, # store the trained parameter weights inside the model file
opset_version=10, # the ONNX version to export the model to
do_constant_folding=True, # whether to execute constant folding for optimization
input_names=["input"], # the model's input names
output_names=["output"], # the model's output names
dynamic_axes={
"input": {0: "batch_size"}, # variable length axes
"output": {0: "batch_size"},
},
)
torch.save(test_data, "/kaggle/working/test_data.pt")
torch.save(test_data, "kaggle/working/test_data.h5")
torch.save(test_data, "kaggle/working/test_data.csv")
# AE Generator
# look at some AEs
ae_generator = (
(
projected_gradient_descent(
model.to(device),
image.to(device),
0.05,
0.0075,
10,
np.inf,
clip_min=0,
clip_max=1,
)
.detach()
.to(device),
label,
)
for image, label in test_dataloader
)
for i in range(3):
# print(next(ae_generator))
ae = next(ae_generator)
image = ToPILImage()(ae[0][0]).convert("RGB")
plt.imshow(image)
plt.show()
print(ae[1])
def heatmapping(cm):
precision = []
recall = []
class_count = []
for i in range(43):
precision.append(round(cm[i][i] / cm[i].sum(), 2))
recall.append(round(cm[i][i] / cm[i].sum()))
class_count.append(cm[i].sum())
metrics = pd.DataFrame(
{"precision": precision, "recall": recall, "class_count": class_count}
)
plt.subplots(figsize=(20, 15))
heatmap(cm)
return metrics
cm = np.zeros((43, 43))
predictions = []
truth = []
ae_count = len(test_dataloader) - 10
epsilon = [0.2, 0.1, 0.0925, 0.085, 0.075, 0.065, 0.05, 0.025]
metrics = []
for eps in epsilon:
correct = 0
ae_generator = (
(
projected_gradient_descent(
model.to(device),
image.to(device),
eps,
eps / 10,
15,
np.inf,
clip_min=0,
clip_max=1,
)
.detach()
.to(device),
label,
)
for image, label in test_dataloader
)
for i in range(ae_count):
ae = next(ae_generator)
image, label = ae[0].to(device), ae[1].to(device)
predicted = F.softmax(model(image), dim=1).argmax()
predictions.append(predicted)
truth.append(label)
correct += predicted == label
cm[int(label)][int(predicted)] += 1
print(
"Accuracy of the network on the {tst_len} adversarial images with perturbation {e}: {acc}".format(
acc=int(correct) / ae_count, tst_len=ae_count, e=eps
)
)
metrics.append(heatmapping(cm))
# check accuracy on test set
correct = 0
total = 0
cm = np.zeros((43, 43))
predictions = []
# since we're not training, we don't need to calculate the gradients for our outputs
with torch.no_grad():
for data in test_dataloader:
images, labels = data
images, labels = images.to(device), labels.to(device)
output = model(images)
predicted = output.argmax()
predictions.append(predicted)
total += 1
correct += predicted == labels
cm[int(labels)][int(predicted)] += 1
print(
"Accuracy of the network on the {tst_len} test images: {acc}".format(
acc=int(correct) / int(total), tst_len=len(test_data)
)
)
f = open("/kaggle/working/metrics.csv", "a")
for df in metrics:
df.to_csv(f, decimal=",")
f.close()
"""precision: correctly predicted class A/predicted class A
recall: number of correctly predicted class A photos out of the number of actual class A
precision = []
recall = []
class_count = []
for i in range(43):
precision.append(round(cm[i][i]/cm[i].sum(),2))
recall.append(round(cm[i][i]/cm[i].sum()))
class_count.append(cm[i].sum())
metrics = pd.DataFrame({"precision" : precision, "recall" : recall, "class_count": class_count})"""
"""from seaborn import heatmap
plt.subplots(figsize=(20,15))
heatmap(cm)"""
"""metrics"""
| false | 0 | 2,703 | 0 | 2,852 | 2,703 |
||
69393429
|
<jupyter_start><jupyter_text>progresbar2-local
Kaggle dataset identifier: progresbar2local
<jupyter_script># # The Bernstein Bears CRP Submission 1
# install necessary libraries from input
# import progressbar library for offline usage
# import text stat library for additional ml data prep
FAST_DEV_RUN = False
USE_CHECKPOINT = True
USE_HIDDEN_IN_RGR = False
N_FEATURES_TO_USE_HEAD = 1
N_FEATURES_TO_USE_TAIL = None
# in this kernel, run train on all data to maximize score on held out data but use what we learned about optimal parameters
# set to 16 bit precision to cut compute requirements/increase batch size capacity
USE_16_BIT_PRECISION = True
# set a seed value for consistent experimentation; optional, else leave as None
SEED_VAL = 42
# set a train-validation split, .7 means 70% of train data and 30% to validation set
TRAIN_VALID_SPLIT = 0.8 # if None, then don't split
# set hyperparameters learned from tuning: https://www.kaggle.com/justinchae/tune-roberta-pytorch-lightning-optuna
MAX_EPOCHS = 4
BATCH_SIZE = 16
GRADIENT_CLIP_VAL = 0.18318092164684585
LEARNING_RATE = 3.613894271216525e-05
TOKENIZER_MAX_LEN = 363
WARMUP_STEPS = 292
WEIGHT_DECAY = 0.004560699842170359
import kaggle_config
from kaggle_config import (
WORKFLOW_ROOT,
DATA_PATH,
CACHE_PATH,
FIG_PATH,
MODEL_PATH,
ANALYSIS_PATH,
KAGGLE_INPUT,
CHECKPOINTS_PATH,
LOGS_PATH,
)
INPUTS, DEVICE = kaggle_config.run()
KAGGLE_TRAIN_PATH = kaggle_config.get_train_path(INPUTS)
KAGGLE_TEST_PATH = kaggle_config.get_test_path(INPUTS)
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size
from pytorch_lightning.tuner.lr_finder import _LRFinder, lr_find
import torchmetrics
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from optuna.samplers import TPESampler, RandomSampler, CmaEsSampler
from optuna.visualization import (
plot_intermediate_values,
plot_optimization_history,
plot_param_importances,
)
import optuna.integration.lightgbm as lgb
import lightgbm as lgm
from sklearn.model_selection import (
KFold,
cross_val_score,
RepeatedKFold,
train_test_split,
)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import (
RFE,
f_regression,
mutual_info_regression,
SequentialFeatureSelector,
)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import math
import textstat
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split
import tensorflow as tf
from transformers import (
RobertaForSequenceClassification,
RobertaTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import os
import pandas as pd
import numpy as np
import gc
from functools import partial
from typing import List, Dict
from typing import Optional
from argparse import ArgumentParser
import random
if SEED_VAL:
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
seed_everything(SEED_VAL)
NUM_DATALOADER_WORKERS = os.cpu_count()
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
n_tpus = len(tf.config.list_logical_devices("TPU"))
except ValueError:
n_tpus = 0
ACCELERATOR_TYPE = {}
ACCELERATOR_TYPE.update(
{"gpus": torch.cuda.device_count() if torch.cuda.is_available() else None}
)
ACCELERATOR_TYPE.update({"tpu_cores": n_tpus if n_tpus > 0 else None})
# still debugging how to best toggle between tpu and gpu; there's too much code to configure to work simply
print("ACCELERATOR_TYPE:\n", ACCELERATOR_TYPE)
PRETTRAINED_ROBERTA_BASE_MODEL_PATH = "/kaggle/input/pre-trained-roberta-base"
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH = "/kaggle/input/tokenizer-roberta"
PRETRAINED_ROBERTA_BASE_TOKENIZER = RobertaTokenizer.from_pretrained(
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH
)
TUNED_CHECKPOINT_PATH = "/kaggle/input/best-crp-ckpt-4/crp_roberta_trial_4.ckpt"
# from: https://www.kaggle.com/justinchae/crp-regression-with-roberta-and-lightgbm
TUNED_BEST_ROBERTA_PATH = "/kaggle/input/my-best-tuned-roberta"
"""Implementing Lightning instead of torch.nn.Module
"""
class LitRobertaLogitRegressor(pl.LightningModule):
def __init__(
self,
pre_trained_path: str,
output_hidden_states: bool = False,
num_labels: int = 1,
layer_1_output_size: int = 64,
layer_2_output_size: int = 1,
learning_rate: float = 1e-5,
task_name: Optional[str] = None,
warmup_steps: int = 100,
weight_decay: float = 0.0,
adam_epsilon: float = 1e-8,
batch_size: Optional[int] = None,
train_size: Optional[int] = None,
max_epochs: Optional[int] = None,
n_gpus: Optional[int] = 0,
n_tpus: Optional[int] = 0,
accumulate_grad_batches=None,
tokenizer=None,
do_decode=False,
):
"""refactored from: https://www.kaggle.com/justinchae/my-bert-tuner and https://www.kaggle.com/justinchae/roberta-tuner"""
super(LitRobertaLogitRegressor, self).__init__()
# this saves class params as self.hparams
self.save_hyperparameters()
self.model = RobertaForSequenceClassification.from_pretrained(
self.hparams.pre_trained_path,
output_hidden_states=self.hparams.output_hidden_states,
num_labels=self.hparams.num_labels,
)
self.accelerator_multiplier = n_gpus if n_gpus > 0 else 1
self.config = self.model.config
self.parameters = self.model.parameters
self.save_pretrained = self.model.save_pretrained
# these layers are not currently used, tbd in future iteration
self.layer_1 = torch.nn.Linear(768, layer_1_output_size)
self.layer_2 = torch.nn.Linear(layer_1_output_size, layer_2_output_size)
self.tokenizer = tokenizer
self.do_decode = do_decode
self.output_hidden_states = output_hidden_states
def rmse_loss(x, y):
criterion = F.mse_loss
loss = torch.sqrt(criterion(x, y))
return loss
# TODO: enable toggle for various loss funcs and torchmetrics package
self.loss_func = rmse_loss
# self.eval_func = rmse_loss
def setup(self, stage=None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
# Calculate total steps
tb_size = self.hparams.batch_size * self.accelerator_multiplier
ab_size = self.hparams.accumulate_grad_batches * float(
self.hparams.max_epochs
)
self.total_steps = (self.hparams.train_size // tb_size) // ab_size
def extract_logit_only(self, input_ids, attention_mask) -> float:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
logit = output.logits
logit = logit.cpu().numpy().astype(float)
return logit
def extract_hidden_only(self, input_ids, attention_mask) -> np.array:
output = self.model(input_ids=input_ids, attention_mask=input_ids)
hidden_states = output.hidden_states
x = torch.stack(hidden_states[-4:]).sum(0)
m1 = torch.nn.Sequential(self.layer_1, self.layer_2, torch.nn.Flatten())
x = m1(x)
x = torch.squeeze(x).cpu().numpy()
return x
def forward(self, input_ids, attention_mask) -> torch.Tensor:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
x = output.logits
return x
def training_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# per docs, keep train step separate from forward call
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
y_hat = output.logits
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("val_loss", loss)
return loss
def predict(self, batch, batch_idx: int, dataloader_idx: int = None):
# creating this predict method overrides the pl predict method
target, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# convert to numpy then list like struct to zip with ids
y_hat = y_hat.cpu().numpy().ravel()
# customizing the predict behavior to account for unique ids
if self.tokenizer is not None and self.do_decode:
target = target.cpu().numpy().ravel() if len(target) > 0 else None
excerpt = self.tokenizer.batch_decode(
input_ids.cpu().numpy(),
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
if self.output_hidden_states:
hidden_states = self.extract_hidden_only(
input_ids=input_ids, attention_mask=attention_mask
)
else:
hidden_states = None
if target is not None:
predictions = list(
zip(
kaggle_ids,
target,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"target",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(
zip(
kaggle_ids,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(zip(kaggle_ids, y_hat))
predictions = pd.DataFrame(predictions, columns=["id", "target"])
return predictions
def configure_optimizers(self) -> torch.optim.Optimizer:
# Reference: https://pytorch-lightning.readthedocs.io/en/latest/notebooks/lightning_examples/text-transformers.html
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def my_collate_fn(
batch,
tokenizer,
max_length: int = 100,
return_tensors: str = "pt",
padding: str = "max_length",
truncation: bool = True,
):
# source: https://www.kaggle.com/justinchae/nn-utils
labels = []
batch_texts = []
kaggle_ids = []
for _label, batch_text, kaggle_id in batch:
if _label is not None:
labels.append(_label)
batch_texts.append(batch_text)
kaggle_ids.append(kaggle_id)
if _label is not None:
labels = torch.tensor(labels, dtype=torch.float)
encoded_batch = tokenizer(
batch_texts,
return_tensors=return_tensors,
padding=padding,
max_length=max_length,
truncation=truncation,
)
return labels, encoded_batch, kaggle_ids
class CommonLitDataset(Dataset):
def __init__(
self,
df,
text_col: str = "excerpt",
label_col: str = "target",
kaggle_id: str = "id",
sample_size: Optional[str] = None,
):
self.df = df if sample_size is None else df.sample(sample_size)
self.text_col = text_col
self.label_col = label_col
self.kaggle_id = kaggle_id
self.num_labels = (
len(df[label_col].unique()) if label_col in df.columns else None
)
# source: https://www.kaggle.com/justinchae/nn-utils
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
result = None
text = self.df.iloc[idx][self.text_col]
kaggle_id = self.df.iloc[idx][self.kaggle_id]
if "target" in self.df.columns:
target = self.df.iloc[idx][self.label_col]
return target, text, kaggle_id
else:
return None, text, kaggle_id
class CommonLitDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer,
train_path,
collate_fn=None,
max_length: int = 280,
batch_size: int = 16,
valid_path: Optional[str] = None,
test_path: Optional[str] = None,
train_valid_split: float = 0.6,
dtypes=None,
shuffle_dataloader: bool = True,
num_dataloader_workers: int = NUM_DATALOADER_WORKERS,
kfold: Optional[dict] = None,
):
super(CommonLitDataModule, self).__init__()
self.tokenizer = tokenizer
self.train_path = train_path
self.valid_path = valid_path
self.test_path = test_path
self.train_valid_split = train_valid_split
self.dtypes = {"id": str} if dtypes is None else dtypes
self.train_size = None
self.train_df, self.train_data = None, None
self.valid_df, self.valid_data = None, None
self.test_df, self.test_data = None, None
if collate_fn is not None:
self.collate_fn = partial(
collate_fn, tokenizer=tokenizer, max_length=max_length
)
else:
self.collate_fn = partial(
my_collate_fn, batch=batch_size, tokenizer=tokenizer
)
self.shuffle_dataloader = shuffle_dataloader
self.batch_size = batch_size
self.num_dataloader_workers = num_dataloader_workers
# refactored from: https://www.kaggle.com/justinchae/nn-utils
def _strip_extraneous(self, df):
strip_cols = ["url_legal", "license"]
if all(col in df.columns for col in strip_cols):
extraneous_data = strip_cols
return df.drop(columns=extraneous_data)
else:
return df
def prepare(self, prep_type=None):
if prep_type == "train":
# creates just an instance of the train data as a pandas df
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
if prep_type == "train_stage_2":
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
if self.train_valid_split is not None and self.valid_path is None:
self.train_size = int(len(self.train_df) * self.train_valid_split)
self.train_data, self.valid_data = random_split(
self.train_data,
[self.train_size, len(self.train_df) - self.train_size],
)
elif self.valid_path is not None:
self.valid_df = (
self.valid_path
if isinstance(self.valid_path, pd.DataFrame)
else pd.read_csv(self.valid_path, dtype=self.dtypes)
)
self.valid_data = CommonLitDataset(df=self.valid_df)
if stage == "predict":
self.test_df = (
self.test_path
if isinstance(self.test_path, pd.DataFrame)
else pd.read_csv(self.test_path, dtype=self.dtypes)
)
self.test_df = self._strip_extraneous(self.test_df)
self.test_data = CommonLitDataset(df=self.test_df)
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def kfold_data(self):
# TODO: wondering how to integrate kfolds into the datamodule
pass
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=self.shuffle_dataloader,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def val_dataloader(self) -> DataLoader:
if self.valid_data is None:
return None
else:
return DataLoader(
self.valid_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def predict_dataloader(self) -> DataLoader:
if self.test_data is None:
return None
else:
return DataLoader(
self.test_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def add_textstat_features(df):
# adding the text standard seems to boost the accuracy score a bit
df["text_standard"] = df["excerpt"].apply(lambda x: textstat.text_standard(x))
df["text_standard_category"] = df["text_standard"].astype("category").cat.codes
# counting ratio of difficult words by lexicon count
df["difficult_words_ratio"] = df["excerpt"].apply(
lambda x: textstat.difficult_words(x)
)
df["difficult_words_ratio"] = df.apply(
lambda x: x["difficult_words_ratio"] / textstat.lexicon_count(x["excerpt"]),
axis=1,
)
df["syllable_ratio"] = df["excerpt"].apply(lambda x: textstat.syllable_count(x))
df["syllable_ratio"] = df.apply(
lambda x: x["syllable_ratio"] / textstat.lexicon_count(x["excerpt"]), axis=1
)
### You can add/remove any feature below and it will be used in training and test
df["coleman_liau_index"] = df["excerpt"].apply(
lambda x: textstat.coleman_liau_index(x)
)
df["flesch_reading_ease"] = df["excerpt"].apply(
lambda x: textstat.flesch_reading_ease(x)
)
df["smog_index"] = df["excerpt"].apply(lambda x: textstat.smog_index(x))
df["gunning_fog"] = df["excerpt"].apply(lambda x: textstat.gunning_fog(x))
df["flesch_kincaid_grade"] = df["excerpt"].apply(
lambda x: textstat.flesch_kincaid_grade(x)
)
df["automated_readability_index"] = df["excerpt"].apply(
lambda x: textstat.automated_readability_index(x)
)
df["dale_chall_readability_score"] = df["excerpt"].apply(
lambda x: textstat.dale_chall_readability_score(x)
)
df["linsear_write_formula"] = df["excerpt"].apply(
lambda x: textstat.linsear_write_formula(x)
)
###
df = df.drop(columns=["excerpt", "text_standard"])
return df
def process_hidden_states(df, drop_hidden_states=False):
# for convenience, moving hidden states to the far right of the df
if drop_hidden_states:
df.drop(columns=["hidden_states"], inplace=True)
return df
elif "hidden_states" in df.columns:
df["hidden_state"] = df["hidden_states"]
df.drop(columns=["hidden_states"], inplace=True)
temp = df["hidden_state"].apply(pd.Series)
temp = temp.rename(columns=lambda x: "hidden_state_" + str(x))
df = pd.concat([df, temp], axis=1)
df.drop(columns=["hidden_state"], inplace=True)
return df
else:
print("hidden_states not found in dataframe, skipping process_hidden_states")
return df
datamodule = CommonLitDataModule(
collate_fn=my_collate_fn,
tokenizer=PRETRAINED_ROBERTA_BASE_TOKENIZER,
train_path=KAGGLE_TRAIN_PATH,
test_path=KAGGLE_TEST_PATH,
max_length=TOKENIZER_MAX_LEN,
batch_size=BATCH_SIZE,
train_valid_split=TRAIN_VALID_SPLIT,
)
# manually calling this stage since we need some params to set up model initially
datamodule.setup(stage="fit")
if USE_CHECKPOINT:
# model = LitRobertaLogitRegressor.load_from_checkpoint(TUNED_CHECKPOINT_PATH)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
model = LitRobertaLogitRegressor(
pre_trained_path=TUNED_BEST_ROBERTA_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
output_hidden_states=USE_HIDDEN_IN_RGR,
n_gpus=ACCELERATOR_TYPE["gpus"],
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
max_epochs=MAX_EPOCHS,
tokenizer=datamodule.tokenizer,
)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
else:
checkpoint_filename = f"crp_roberta_trial_main"
checkpoint_save = ModelCheckpoint(
dirpath=CHECKPOINTS_PATH, filename=checkpoint_filename
)
early_stopping_callback = EarlyStopping(monitor="val_loss", patience=2)
trainer = pl.Trainer(
max_epochs=MAX_EPOCHS,
gpus=ACCELERATOR_TYPE["gpus"],
tpu_cores=ACCELERATOR_TYPE["tpu_cores"],
precision=16 if USE_16_BIT_PRECISION else 32,
default_root_dir=CHECKPOINTS_PATH,
gradient_clip_val=GRADIENT_CLIP_VAL,
stochastic_weight_avg=True,
callbacks=[checkpoint_save, early_stopping_callback],
fast_dev_run=FAST_DEV_RUN,
)
model = LitRobertaLogitRegressor(
pre_trained_path=PRETTRAINED_ROBERTA_BASE_MODEL_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
n_gpus=trainer.gpus,
n_tpus=trainer.tpu_cores,
max_epochs=trainer.max_epochs,
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
tokenizer=datamodule.tokenizer,
)
trainer.fit(model, datamodule=datamodule)
# let's also save the tuned roberta state which our model wraps around
model_file_name = f"tuned_roberta_model"
model_file_path = os.path.join(MODEL_PATH, model_file_name)
model.save_pretrained(model_file_path)
# clean up memory
torch.cuda.empty_cache()
gc.collect()
# freeze the model for prediction
model.eval()
model.freeze()
datamodule.setup(stage="predict")
model.do_decode = True
# run predict on the test data
train_data_stage_two = trainer.predict(
model=model, dataloaders=datamodule.train_dataloader()
)
train_data_stage_two = pd.concat(train_data_stage_two).reset_index(drop=True)
train_data_stage_two = pd.merge(
left=train_data_stage_two,
right=datamodule.train_df.drop(columns=["standard_error", "target"]),
left_on="id",
right_on="id",
)
print(train_data_stage_two)
# TODO: test whether we need to save and upload the fine-tuned state of roberta or if pytorch lightning checkpoints take care of it all
train_data_stage_three = add_textstat_features(train_data_stage_two)
label_data = train_data_stage_three[["id"]].copy(deep=True)
train_data = train_data_stage_three.drop(
columns=["id", "target", "text_standard_category"]
).copy(deep=True)
train_data_cols = list(train_data.columns)
target_data = train_data_stage_three[["target"]].copy(deep=True)
scaler = StandardScaler()
train_data_scaled = scaler.fit_transform(train_data)
train_data_scaled = pd.DataFrame(train_data_scaled, columns=train_data_cols)
TARGET_SCALER = StandardScaler()
target_data_scaled = TARGET_SCALER.fit_transform(target_data)
target_data_scaled = pd.DataFrame(target_data_scaled, columns=["target"])
regr = SVR(kernel="linear")
regr.fit(train_data_scaled, target_data_scaled["target"])
print(" Assessment of Features ")
print("R2 Score: ", regr.score(train_data_scaled, target_data_scaled["target"]))
print(
"RSME Score: ",
math.sqrt(
mean_squared_error(
target_data_scaled["target"], regr.predict(train_data_scaled)
)
),
)
# regr.coef_ is a array of n, 1
feats_coef = list(zip(train_data_cols, regr.coef_[0]))
feature_analysis = pd.DataFrame(feats_coef, columns=["feature_col", "coef_val"])
feature_analysis["coef_val"] = feature_analysis["coef_val"] # .abs()
feature_analysis = feature_analysis.sort_values("coef_val", ascending=False)
feature_analysis.plot.barh(
x="feature_col", y="coef_val", title="Comparison of Features and Importance"
)
# select the top n features for use in final regression approach
best_n_features = feature_analysis.head(N_FEATURES_TO_USE_HEAD)["feature_col"].to_list()
# the opposite
if N_FEATURES_TO_USE_TAIL is not None:
worst_n_features = feature_analysis.tail(N_FEATURES_TO_USE_TAIL)[
"feature_col"
].to_list()
best_n_features.extend(worst_n_features)
# manually adding this categorical feature in
if "text_standard_category" not in best_n_features:
best_n_features.append("text_standard_category")
best_n_features = list(set(best_n_features))
train_data = train_data_stage_three[best_n_features]
DATASET = train_data.copy(deep=True)
DATASET["target"] = target_data_scaled["target"]
DATASET["id"] = label_data["id"]
temp_cols = list(
DATASET.drop(columns=["id", "target", "text_standard_category"]).columns
)
DATASET_scaled = DATASET[temp_cols]
scaler = StandardScaler()
DATASET_scaled = scaler.fit_transform(DATASET_scaled)
DATASET_scaled = pd.DataFrame(DATASET_scaled, columns=temp_cols)
DATASET_scaled[["id", "target", "text_standard_category"]] = DATASET[
["id", "target", "text_standard_category"]
]
print(DATASET_scaled)
Dataset = DATASET_scaled
# https://medium.com/optuna/lightgbm-tuner-new-optuna-integration-for-hyperparameter-optimization-8b7095e99258
# https://www.kaggle.com/corochann/optuna-tutorial-for-hyperparameter-optimization
RGR_MODELS = []
def objective(trial: optuna.trial.Trial, n_folds=5, shuffle=True):
params = {
"metric": "rmse",
"boosting_type": "gbdt",
"verbose": -1,
"num_leaves": trial.suggest_int("num_leaves", 4, 512),
"max_depth": trial.suggest_int("max_depth", 4, 512),
"max_bin": trial.suggest_int("max_bin", 4, 512),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 64, 512),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.1, 1.0),
"bagging_freq": trial.suggest_int("max_bin", 5, 10),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"learning_rate": trial.suggest_float("bagging_fraction", 0.0005, 0.01),
"n_estimators": trial.suggest_int("num_leaves", 10, 10000),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
}
fold = KFold(
n_splits=n_folds, shuffle=shuffle, random_state=SEED_VAL if shuffle else None
)
valid_score = []
best_model_tracker = {}
for fold_idx, (train_idx, valid_idx) in enumerate(fold.split(range(len(DATASET)))):
train_data = (
Dataset.iloc[train_idx].drop(columns=["id", "target"]).copy(deep=True)
)
train_target = Dataset[["target"]].iloc[train_idx].copy(deep=True)
valid_data = (
Dataset.iloc[valid_idx].drop(columns=["id", "target"]).copy(deep=True)
)
valid_target = Dataset[["target"]].iloc[valid_idx].copy(deep=True)
lgbm_train = lgm.Dataset(
train_data,
label=train_target,
categorical_feature=["text_standard_category"],
)
lgbm_valid = lgm.Dataset(
valid_data,
label=valid_target,
categorical_feature=["text_standard_category"],
)
curr_model = lgm.train(
params,
train_set=lgbm_train,
valid_sets=[lgbm_train, lgbm_valid],
verbose_eval=-1,
)
valid_pred = curr_model.predict(
valid_data, num_iteration=curr_model.best_iteration
)
best_score = curr_model.best_score["valid_1"]["rmse"]
best_model_tracker.update({best_score: curr_model})
valid_score.append(best_score)
best_model_score = min([k for k, v in best_model_tracker.items()])
best_model = best_model_tracker[best_model_score]
RGR_MODELS.append(best_model)
# RGR_MODELS.append({best_model_score: best_model})
# worst_rgr_model_idx = max([d.keys[0] for d in RGR_MODELS])
# RGR_MODELS[worst_rgr_model_idx] = {best_model_score: None}
score = np.mean(valid_score)
return score
study = optuna.create_study(storage="sqlite:///lgm-study.db")
study.optimize(objective, n_trials=256)
plot_optimization_history(study).show()
print("Best Trial: ", study.best_trial, "\n")
# use the study parameters to create and train a lgbm regressor
lgm_train_data = DATASET_scaled.drop(columns=["id"]).copy(deep=True)
x_features = lgm_train_data.loc[:, lgm_train_data.columns != "target"]
y_train = lgm_train_data[["target"]]
lgm_train_set_full = lgm.Dataset(
data=x_features, categorical_feature=["text_standard_category"], label=y_train
)
gbm = lgm.train(
study.best_trial.params,
lgm_train_set_full,
)
model.do_decode = True
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
# run predict on the test data
submission_stage_1 = trainer.predict(
model=model, dataloaders=datamodule.predict_dataloader()
)
submission_stage_1 = pd.concat(submission_stage_1).reset_index(drop=True)
print(" Submission Stage 1: After RoBERTA\n")
print(submission_stage_1)
submission_stage_2 = pd.merge(
left=submission_stage_1,
right=datamodule.test_df,
left_on="id",
right_on="id",
how="left",
)
submission_stage_2 = add_textstat_features(submission_stage_2)
feature_cols = list(submission_stage_2.drop(columns=["id"]).copy(deep=True).columns)
predict_data = submission_stage_2.drop(columns=["id"]).copy(deep=True)
predict_data = predict_data[best_n_features]
temp_cols = list(predict_data.drop(columns=["text_standard_category"]).columns)
predict_data_scaled = predict_data[temp_cols]
predict_data_scaled = scaler.transform(predict_data_scaled)
predict_data_scaled = pd.DataFrame(predict_data_scaled, columns=temp_cols)
predict_data_scaled["text_standard_category"] = predict_data["text_standard_category"]
submission = submission_stage_2[["id"]].copy(deep=True)
submission["target"] = gbm.predict(predict_data_scaled)
submission["target"] = TARGET_SCALER.inverse_transform(submission["target"])
print(" Final Stage After LGBM\n")
print(submission)
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393429.ipynb
|
progresbar2local
|
justinchae
|
[{"Id": 69393429, "ScriptId": 18638229, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4319244, "CreationDate": "07/30/2021 12:40:32", "VersionNumber": 36.0, "Title": "The Bernstein Bears CRP Submission 1", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 887.0, "LinesInsertedFromPrevious": 13.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 874.0, "LinesInsertedFromFork": 409.0, "LinesDeletedFromFork": 274.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 478.0, "TotalVotes": 0}]
|
[{"Id": 92503477, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2311525}, {"Id": 92503478, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2312589}, {"Id": 92503476, "KernelVersionId": 69393429, "SourceDatasetVersionId": 2311499}]
|
[{"Id": 2311525, "DatasetId": 1394642, "DatasourceVersionId": 2352908, "CreatorUserId": 4319244, "LicenseName": "Unknown", "CreationDate": "06/07/2021 14:51:02", "VersionNumber": 1.0, "Title": "progresbar2-local", "Slug": "progresbar2local", "Subtitle": "Downloaded for offline use in kaggle \"no internet\" kernels", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1394642, "CreatorUserId": 4319244, "OwnerUserId": 4319244.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2311525.0, "CurrentDatasourceVersionId": 2352908.0, "ForumId": 1413893, "Type": 2, "CreationDate": "06/07/2021 14:51:02", "LastActivityDate": "06/07/2021", "TotalViews": 934, "TotalDownloads": 4, "TotalVotes": 1, "TotalKernels": 3}]
|
[{"Id": 4319244, "UserName": "justinchae", "DisplayName": "Justin Chae", "RegisterDate": "01/12/2020", "PerformanceTier": 1}]
|
# # The Bernstein Bears CRP Submission 1
# install necessary libraries from input
# import progressbar library for offline usage
# import text stat library for additional ml data prep
FAST_DEV_RUN = False
USE_CHECKPOINT = True
USE_HIDDEN_IN_RGR = False
N_FEATURES_TO_USE_HEAD = 1
N_FEATURES_TO_USE_TAIL = None
# in this kernel, run train on all data to maximize score on held out data but use what we learned about optimal parameters
# set to 16 bit precision to cut compute requirements/increase batch size capacity
USE_16_BIT_PRECISION = True
# set a seed value for consistent experimentation; optional, else leave as None
SEED_VAL = 42
# set a train-validation split, .7 means 70% of train data and 30% to validation set
TRAIN_VALID_SPLIT = 0.8 # if None, then don't split
# set hyperparameters learned from tuning: https://www.kaggle.com/justinchae/tune-roberta-pytorch-lightning-optuna
MAX_EPOCHS = 4
BATCH_SIZE = 16
GRADIENT_CLIP_VAL = 0.18318092164684585
LEARNING_RATE = 3.613894271216525e-05
TOKENIZER_MAX_LEN = 363
WARMUP_STEPS = 292
WEIGHT_DECAY = 0.004560699842170359
import kaggle_config
from kaggle_config import (
WORKFLOW_ROOT,
DATA_PATH,
CACHE_PATH,
FIG_PATH,
MODEL_PATH,
ANALYSIS_PATH,
KAGGLE_INPUT,
CHECKPOINTS_PATH,
LOGS_PATH,
)
INPUTS, DEVICE = kaggle_config.run()
KAGGLE_TRAIN_PATH = kaggle_config.get_train_path(INPUTS)
KAGGLE_TEST_PATH = kaggle_config.get_test_path(INPUTS)
import pytorch_lightning as pl
from pytorch_lightning import loggers as pl_loggers
from pytorch_lightning import seed_everything
from pytorch_lightning.callbacks import ModelCheckpoint, EarlyStopping
from pytorch_lightning.tuner.batch_size_scaling import scale_batch_size
from pytorch_lightning.tuner.lr_finder import _LRFinder, lr_find
import torchmetrics
import optuna
from optuna.integration import PyTorchLightningPruningCallback
from optuna.samplers import TPESampler, RandomSampler, CmaEsSampler
from optuna.visualization import (
plot_intermediate_values,
plot_optimization_history,
plot_param_importances,
)
import optuna.integration.lightgbm as lgb
import lightgbm as lgm
from sklearn.model_selection import (
KFold,
cross_val_score,
RepeatedKFold,
train_test_split,
)
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.decomposition import PCA
from sklearn.feature_selection import (
RFE,
f_regression,
mutual_info_regression,
SequentialFeatureSelector,
)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
from sklearn.metrics import mean_squared_error
import math
import textstat
import torch
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.dataset import random_split
import tensorflow as tf
from transformers import (
RobertaForSequenceClassification,
RobertaTokenizer,
AdamW,
get_linear_schedule_with_warmup,
)
import os
import pandas as pd
import numpy as np
import gc
from functools import partial
from typing import List, Dict
from typing import Optional
from argparse import ArgumentParser
import random
if SEED_VAL:
random.seed(SEED_VAL)
np.random.seed(SEED_VAL)
seed_everything(SEED_VAL)
NUM_DATALOADER_WORKERS = os.cpu_count()
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver(tpu="")
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
n_tpus = len(tf.config.list_logical_devices("TPU"))
except ValueError:
n_tpus = 0
ACCELERATOR_TYPE = {}
ACCELERATOR_TYPE.update(
{"gpus": torch.cuda.device_count() if torch.cuda.is_available() else None}
)
ACCELERATOR_TYPE.update({"tpu_cores": n_tpus if n_tpus > 0 else None})
# still debugging how to best toggle between tpu and gpu; there's too much code to configure to work simply
print("ACCELERATOR_TYPE:\n", ACCELERATOR_TYPE)
PRETTRAINED_ROBERTA_BASE_MODEL_PATH = "/kaggle/input/pre-trained-roberta-base"
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH = "/kaggle/input/tokenizer-roberta"
PRETRAINED_ROBERTA_BASE_TOKENIZER = RobertaTokenizer.from_pretrained(
PRETRAINED_ROBERTA_BASE_TOKENIZER_PATH
)
TUNED_CHECKPOINT_PATH = "/kaggle/input/best-crp-ckpt-4/crp_roberta_trial_4.ckpt"
# from: https://www.kaggle.com/justinchae/crp-regression-with-roberta-and-lightgbm
TUNED_BEST_ROBERTA_PATH = "/kaggle/input/my-best-tuned-roberta"
"""Implementing Lightning instead of torch.nn.Module
"""
class LitRobertaLogitRegressor(pl.LightningModule):
def __init__(
self,
pre_trained_path: str,
output_hidden_states: bool = False,
num_labels: int = 1,
layer_1_output_size: int = 64,
layer_2_output_size: int = 1,
learning_rate: float = 1e-5,
task_name: Optional[str] = None,
warmup_steps: int = 100,
weight_decay: float = 0.0,
adam_epsilon: float = 1e-8,
batch_size: Optional[int] = None,
train_size: Optional[int] = None,
max_epochs: Optional[int] = None,
n_gpus: Optional[int] = 0,
n_tpus: Optional[int] = 0,
accumulate_grad_batches=None,
tokenizer=None,
do_decode=False,
):
"""refactored from: https://www.kaggle.com/justinchae/my-bert-tuner and https://www.kaggle.com/justinchae/roberta-tuner"""
super(LitRobertaLogitRegressor, self).__init__()
# this saves class params as self.hparams
self.save_hyperparameters()
self.model = RobertaForSequenceClassification.from_pretrained(
self.hparams.pre_trained_path,
output_hidden_states=self.hparams.output_hidden_states,
num_labels=self.hparams.num_labels,
)
self.accelerator_multiplier = n_gpus if n_gpus > 0 else 1
self.config = self.model.config
self.parameters = self.model.parameters
self.save_pretrained = self.model.save_pretrained
# these layers are not currently used, tbd in future iteration
self.layer_1 = torch.nn.Linear(768, layer_1_output_size)
self.layer_2 = torch.nn.Linear(layer_1_output_size, layer_2_output_size)
self.tokenizer = tokenizer
self.do_decode = do_decode
self.output_hidden_states = output_hidden_states
def rmse_loss(x, y):
criterion = F.mse_loss
loss = torch.sqrt(criterion(x, y))
return loss
# TODO: enable toggle for various loss funcs and torchmetrics package
self.loss_func = rmse_loss
# self.eval_func = rmse_loss
def setup(self, stage=None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
# Calculate total steps
tb_size = self.hparams.batch_size * self.accelerator_multiplier
ab_size = self.hparams.accumulate_grad_batches * float(
self.hparams.max_epochs
)
self.total_steps = (self.hparams.train_size // tb_size) // ab_size
def extract_logit_only(self, input_ids, attention_mask) -> float:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
logit = output.logits
logit = logit.cpu().numpy().astype(float)
return logit
def extract_hidden_only(self, input_ids, attention_mask) -> np.array:
output = self.model(input_ids=input_ids, attention_mask=input_ids)
hidden_states = output.hidden_states
x = torch.stack(hidden_states[-4:]).sum(0)
m1 = torch.nn.Sequential(self.layer_1, self.layer_2, torch.nn.Flatten())
x = m1(x)
x = torch.squeeze(x).cpu().numpy()
return x
def forward(self, input_ids, attention_mask) -> torch.Tensor:
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
x = output.logits
return x
def training_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# per docs, keep train step separate from forward call
output = self.model(input_ids=input_ids, attention_mask=attention_mask)
y_hat = output.logits
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx: int) -> float:
# refactored from: https://www.kaggle.com/justinchae/epoch-utils
labels, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# quick reshape to align labels to predictions
labels = labels.view(-1, 1)
loss = self.loss_func(y_hat, labels)
self.log("val_loss", loss)
return loss
def predict(self, batch, batch_idx: int, dataloader_idx: int = None):
# creating this predict method overrides the pl predict method
target, encoded_batch, kaggle_ids = batch
input_ids = encoded_batch["input_ids"]
attention_mask = encoded_batch["attention_mask"]
# this self call is calling the forward method
y_hat = self(input_ids, attention_mask)
# convert to numpy then list like struct to zip with ids
y_hat = y_hat.cpu().numpy().ravel()
# customizing the predict behavior to account for unique ids
if self.tokenizer is not None and self.do_decode:
target = target.cpu().numpy().ravel() if len(target) > 0 else None
excerpt = self.tokenizer.batch_decode(
input_ids.cpu().numpy(),
skip_special_tokens=True,
clean_up_tokenization_spaces=True,
)
if self.output_hidden_states:
hidden_states = self.extract_hidden_only(
input_ids=input_ids, attention_mask=attention_mask
)
else:
hidden_states = None
if target is not None:
predictions = list(
zip(
kaggle_ids,
target,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"target",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(
zip(
kaggle_ids,
y_hat
# , hidden_states
)
)
predictions = pd.DataFrame(
predictions,
columns=[
"id",
"logit"
# , 'hidden_states'
],
)
else:
predictions = list(zip(kaggle_ids, y_hat))
predictions = pd.DataFrame(predictions, columns=["id", "target"])
return predictions
def configure_optimizers(self) -> torch.optim.Optimizer:
# Reference: https://pytorch-lightning.readthedocs.io/en/latest/notebooks/lightning_examples/text-transformers.html
model = self.model
no_decay = ["bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p
for n, p in model.named_parameters()
if not any(nd in n for nd in no_decay)
],
"weight_decay": self.hparams.weight_decay,
},
{
"params": [
p
for n, p in model.named_parameters()
if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=self.hparams.learning_rate,
eps=self.hparams.adam_epsilon,
)
scheduler = get_linear_schedule_with_warmup(
optimizer,
num_warmup_steps=self.hparams.warmup_steps,
num_training_steps=self.total_steps,
)
scheduler = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return [optimizer], [scheduler]
def my_collate_fn(
batch,
tokenizer,
max_length: int = 100,
return_tensors: str = "pt",
padding: str = "max_length",
truncation: bool = True,
):
# source: https://www.kaggle.com/justinchae/nn-utils
labels = []
batch_texts = []
kaggle_ids = []
for _label, batch_text, kaggle_id in batch:
if _label is not None:
labels.append(_label)
batch_texts.append(batch_text)
kaggle_ids.append(kaggle_id)
if _label is not None:
labels = torch.tensor(labels, dtype=torch.float)
encoded_batch = tokenizer(
batch_texts,
return_tensors=return_tensors,
padding=padding,
max_length=max_length,
truncation=truncation,
)
return labels, encoded_batch, kaggle_ids
class CommonLitDataset(Dataset):
def __init__(
self,
df,
text_col: str = "excerpt",
label_col: str = "target",
kaggle_id: str = "id",
sample_size: Optional[str] = None,
):
self.df = df if sample_size is None else df.sample(sample_size)
self.text_col = text_col
self.label_col = label_col
self.kaggle_id = kaggle_id
self.num_labels = (
len(df[label_col].unique()) if label_col in df.columns else None
)
# source: https://www.kaggle.com/justinchae/nn-utils
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
result = None
text = self.df.iloc[idx][self.text_col]
kaggle_id = self.df.iloc[idx][self.kaggle_id]
if "target" in self.df.columns:
target = self.df.iloc[idx][self.label_col]
return target, text, kaggle_id
else:
return None, text, kaggle_id
class CommonLitDataModule(pl.LightningDataModule):
def __init__(
self,
tokenizer,
train_path,
collate_fn=None,
max_length: int = 280,
batch_size: int = 16,
valid_path: Optional[str] = None,
test_path: Optional[str] = None,
train_valid_split: float = 0.6,
dtypes=None,
shuffle_dataloader: bool = True,
num_dataloader_workers: int = NUM_DATALOADER_WORKERS,
kfold: Optional[dict] = None,
):
super(CommonLitDataModule, self).__init__()
self.tokenizer = tokenizer
self.train_path = train_path
self.valid_path = valid_path
self.test_path = test_path
self.train_valid_split = train_valid_split
self.dtypes = {"id": str} if dtypes is None else dtypes
self.train_size = None
self.train_df, self.train_data = None, None
self.valid_df, self.valid_data = None, None
self.test_df, self.test_data = None, None
if collate_fn is not None:
self.collate_fn = partial(
collate_fn, tokenizer=tokenizer, max_length=max_length
)
else:
self.collate_fn = partial(
my_collate_fn, batch=batch_size, tokenizer=tokenizer
)
self.shuffle_dataloader = shuffle_dataloader
self.batch_size = batch_size
self.num_dataloader_workers = num_dataloader_workers
# refactored from: https://www.kaggle.com/justinchae/nn-utils
def _strip_extraneous(self, df):
strip_cols = ["url_legal", "license"]
if all(col in df.columns for col in strip_cols):
extraneous_data = strip_cols
return df.drop(columns=extraneous_data)
else:
return df
def prepare(self, prep_type=None):
if prep_type == "train":
# creates just an instance of the train data as a pandas df
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
if prep_type == "train_stage_2":
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def setup(self, stage: Optional[str] = None) -> None:
if stage == "fit":
# when this class is called by trainer.fit, this stage runs and so on
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
if self.train_valid_split is not None and self.valid_path is None:
self.train_size = int(len(self.train_df) * self.train_valid_split)
self.train_data, self.valid_data = random_split(
self.train_data,
[self.train_size, len(self.train_df) - self.train_size],
)
elif self.valid_path is not None:
self.valid_df = (
self.valid_path
if isinstance(self.valid_path, pd.DataFrame)
else pd.read_csv(self.valid_path, dtype=self.dtypes)
)
self.valid_data = CommonLitDataset(df=self.valid_df)
if stage == "predict":
self.test_df = (
self.test_path
if isinstance(self.test_path, pd.DataFrame)
else pd.read_csv(self.test_path, dtype=self.dtypes)
)
self.test_df = self._strip_extraneous(self.test_df)
self.test_data = CommonLitDataset(df=self.test_df)
self.train_df = (
self.train_path
if isinstance(self.train_path, pd.DataFrame)
else pd.read_csv(self.train_path, dtype=self.dtypes)
)
self.train_df = self._strip_extraneous(self.train_df)
self.train_size = int(len(self.train_df))
self.train_data = CommonLitDataset(df=self.train_df)
def kfold_data(self):
# TODO: wondering how to integrate kfolds into the datamodule
pass
def train_dataloader(self) -> DataLoader:
return DataLoader(
self.train_data,
batch_size=self.batch_size,
shuffle=self.shuffle_dataloader,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def val_dataloader(self) -> DataLoader:
if self.valid_data is None:
return None
else:
return DataLoader(
self.valid_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def predict_dataloader(self) -> DataLoader:
if self.test_data is None:
return None
else:
return DataLoader(
self.test_data,
batch_size=self.batch_size,
shuffle=False,
collate_fn=self.collate_fn,
num_workers=self.num_dataloader_workers,
pin_memory=True,
)
def add_textstat_features(df):
# adding the text standard seems to boost the accuracy score a bit
df["text_standard"] = df["excerpt"].apply(lambda x: textstat.text_standard(x))
df["text_standard_category"] = df["text_standard"].astype("category").cat.codes
# counting ratio of difficult words by lexicon count
df["difficult_words_ratio"] = df["excerpt"].apply(
lambda x: textstat.difficult_words(x)
)
df["difficult_words_ratio"] = df.apply(
lambda x: x["difficult_words_ratio"] / textstat.lexicon_count(x["excerpt"]),
axis=1,
)
df["syllable_ratio"] = df["excerpt"].apply(lambda x: textstat.syllable_count(x))
df["syllable_ratio"] = df.apply(
lambda x: x["syllable_ratio"] / textstat.lexicon_count(x["excerpt"]), axis=1
)
### You can add/remove any feature below and it will be used in training and test
df["coleman_liau_index"] = df["excerpt"].apply(
lambda x: textstat.coleman_liau_index(x)
)
df["flesch_reading_ease"] = df["excerpt"].apply(
lambda x: textstat.flesch_reading_ease(x)
)
df["smog_index"] = df["excerpt"].apply(lambda x: textstat.smog_index(x))
df["gunning_fog"] = df["excerpt"].apply(lambda x: textstat.gunning_fog(x))
df["flesch_kincaid_grade"] = df["excerpt"].apply(
lambda x: textstat.flesch_kincaid_grade(x)
)
df["automated_readability_index"] = df["excerpt"].apply(
lambda x: textstat.automated_readability_index(x)
)
df["dale_chall_readability_score"] = df["excerpt"].apply(
lambda x: textstat.dale_chall_readability_score(x)
)
df["linsear_write_formula"] = df["excerpt"].apply(
lambda x: textstat.linsear_write_formula(x)
)
###
df = df.drop(columns=["excerpt", "text_standard"])
return df
def process_hidden_states(df, drop_hidden_states=False):
# for convenience, moving hidden states to the far right of the df
if drop_hidden_states:
df.drop(columns=["hidden_states"], inplace=True)
return df
elif "hidden_states" in df.columns:
df["hidden_state"] = df["hidden_states"]
df.drop(columns=["hidden_states"], inplace=True)
temp = df["hidden_state"].apply(pd.Series)
temp = temp.rename(columns=lambda x: "hidden_state_" + str(x))
df = pd.concat([df, temp], axis=1)
df.drop(columns=["hidden_state"], inplace=True)
return df
else:
print("hidden_states not found in dataframe, skipping process_hidden_states")
return df
datamodule = CommonLitDataModule(
collate_fn=my_collate_fn,
tokenizer=PRETRAINED_ROBERTA_BASE_TOKENIZER,
train_path=KAGGLE_TRAIN_PATH,
test_path=KAGGLE_TEST_PATH,
max_length=TOKENIZER_MAX_LEN,
batch_size=BATCH_SIZE,
train_valid_split=TRAIN_VALID_SPLIT,
)
# manually calling this stage since we need some params to set up model initially
datamodule.setup(stage="fit")
if USE_CHECKPOINT:
# model = LitRobertaLogitRegressor.load_from_checkpoint(TUNED_CHECKPOINT_PATH)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
model = LitRobertaLogitRegressor(
pre_trained_path=TUNED_BEST_ROBERTA_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
output_hidden_states=USE_HIDDEN_IN_RGR,
n_gpus=ACCELERATOR_TYPE["gpus"],
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
max_epochs=MAX_EPOCHS,
tokenizer=datamodule.tokenizer,
)
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
else:
checkpoint_filename = f"crp_roberta_trial_main"
checkpoint_save = ModelCheckpoint(
dirpath=CHECKPOINTS_PATH, filename=checkpoint_filename
)
early_stopping_callback = EarlyStopping(monitor="val_loss", patience=2)
trainer = pl.Trainer(
max_epochs=MAX_EPOCHS,
gpus=ACCELERATOR_TYPE["gpus"],
tpu_cores=ACCELERATOR_TYPE["tpu_cores"],
precision=16 if USE_16_BIT_PRECISION else 32,
default_root_dir=CHECKPOINTS_PATH,
gradient_clip_val=GRADIENT_CLIP_VAL,
stochastic_weight_avg=True,
callbacks=[checkpoint_save, early_stopping_callback],
fast_dev_run=FAST_DEV_RUN,
)
model = LitRobertaLogitRegressor(
pre_trained_path=PRETTRAINED_ROBERTA_BASE_MODEL_PATH,
train_size=datamodule.train_size,
batch_size=datamodule.batch_size,
n_gpus=trainer.gpus,
n_tpus=trainer.tpu_cores,
max_epochs=trainer.max_epochs,
accumulate_grad_batches=trainer.accumulate_grad_batches,
learning_rate=LEARNING_RATE,
warmup_steps=WARMUP_STEPS,
tokenizer=datamodule.tokenizer,
)
trainer.fit(model, datamodule=datamodule)
# let's also save the tuned roberta state which our model wraps around
model_file_name = f"tuned_roberta_model"
model_file_path = os.path.join(MODEL_PATH, model_file_name)
model.save_pretrained(model_file_path)
# clean up memory
torch.cuda.empty_cache()
gc.collect()
# freeze the model for prediction
model.eval()
model.freeze()
datamodule.setup(stage="predict")
model.do_decode = True
# run predict on the test data
train_data_stage_two = trainer.predict(
model=model, dataloaders=datamodule.train_dataloader()
)
train_data_stage_two = pd.concat(train_data_stage_two).reset_index(drop=True)
train_data_stage_two = pd.merge(
left=train_data_stage_two,
right=datamodule.train_df.drop(columns=["standard_error", "target"]),
left_on="id",
right_on="id",
)
print(train_data_stage_two)
# TODO: test whether we need to save and upload the fine-tuned state of roberta or if pytorch lightning checkpoints take care of it all
train_data_stage_three = add_textstat_features(train_data_stage_two)
label_data = train_data_stage_three[["id"]].copy(deep=True)
train_data = train_data_stage_three.drop(
columns=["id", "target", "text_standard_category"]
).copy(deep=True)
train_data_cols = list(train_data.columns)
target_data = train_data_stage_three[["target"]].copy(deep=True)
scaler = StandardScaler()
train_data_scaled = scaler.fit_transform(train_data)
train_data_scaled = pd.DataFrame(train_data_scaled, columns=train_data_cols)
TARGET_SCALER = StandardScaler()
target_data_scaled = TARGET_SCALER.fit_transform(target_data)
target_data_scaled = pd.DataFrame(target_data_scaled, columns=["target"])
regr = SVR(kernel="linear")
regr.fit(train_data_scaled, target_data_scaled["target"])
print(" Assessment of Features ")
print("R2 Score: ", regr.score(train_data_scaled, target_data_scaled["target"]))
print(
"RSME Score: ",
math.sqrt(
mean_squared_error(
target_data_scaled["target"], regr.predict(train_data_scaled)
)
),
)
# regr.coef_ is a array of n, 1
feats_coef = list(zip(train_data_cols, regr.coef_[0]))
feature_analysis = pd.DataFrame(feats_coef, columns=["feature_col", "coef_val"])
feature_analysis["coef_val"] = feature_analysis["coef_val"] # .abs()
feature_analysis = feature_analysis.sort_values("coef_val", ascending=False)
feature_analysis.plot.barh(
x="feature_col", y="coef_val", title="Comparison of Features and Importance"
)
# select the top n features for use in final regression approach
best_n_features = feature_analysis.head(N_FEATURES_TO_USE_HEAD)["feature_col"].to_list()
# the opposite
if N_FEATURES_TO_USE_TAIL is not None:
worst_n_features = feature_analysis.tail(N_FEATURES_TO_USE_TAIL)[
"feature_col"
].to_list()
best_n_features.extend(worst_n_features)
# manually adding this categorical feature in
if "text_standard_category" not in best_n_features:
best_n_features.append("text_standard_category")
best_n_features = list(set(best_n_features))
train_data = train_data_stage_three[best_n_features]
DATASET = train_data.copy(deep=True)
DATASET["target"] = target_data_scaled["target"]
DATASET["id"] = label_data["id"]
temp_cols = list(
DATASET.drop(columns=["id", "target", "text_standard_category"]).columns
)
DATASET_scaled = DATASET[temp_cols]
scaler = StandardScaler()
DATASET_scaled = scaler.fit_transform(DATASET_scaled)
DATASET_scaled = pd.DataFrame(DATASET_scaled, columns=temp_cols)
DATASET_scaled[["id", "target", "text_standard_category"]] = DATASET[
["id", "target", "text_standard_category"]
]
print(DATASET_scaled)
Dataset = DATASET_scaled
# https://medium.com/optuna/lightgbm-tuner-new-optuna-integration-for-hyperparameter-optimization-8b7095e99258
# https://www.kaggle.com/corochann/optuna-tutorial-for-hyperparameter-optimization
RGR_MODELS = []
def objective(trial: optuna.trial.Trial, n_folds=5, shuffle=True):
params = {
"metric": "rmse",
"boosting_type": "gbdt",
"verbose": -1,
"num_leaves": trial.suggest_int("num_leaves", 4, 512),
"max_depth": trial.suggest_int("max_depth", 4, 512),
"max_bin": trial.suggest_int("max_bin", 4, 512),
"min_data_in_leaf": trial.suggest_int("min_data_in_leaf", 64, 512),
"bagging_fraction": trial.suggest_uniform("bagging_fraction", 0.1, 1.0),
"bagging_freq": trial.suggest_int("max_bin", 5, 10),
"feature_fraction": trial.suggest_uniform("feature_fraction", 0.4, 1.0),
"learning_rate": trial.suggest_float("bagging_fraction", 0.0005, 0.01),
"n_estimators": trial.suggest_int("num_leaves", 10, 10000),
"lambda_l1": trial.suggest_loguniform("lambda_l1", 1e-8, 10.0),
"lambda_l2": trial.suggest_loguniform("lambda_l2", 1e-8, 10.0),
}
fold = KFold(
n_splits=n_folds, shuffle=shuffle, random_state=SEED_VAL if shuffle else None
)
valid_score = []
best_model_tracker = {}
for fold_idx, (train_idx, valid_idx) in enumerate(fold.split(range(len(DATASET)))):
train_data = (
Dataset.iloc[train_idx].drop(columns=["id", "target"]).copy(deep=True)
)
train_target = Dataset[["target"]].iloc[train_idx].copy(deep=True)
valid_data = (
Dataset.iloc[valid_idx].drop(columns=["id", "target"]).copy(deep=True)
)
valid_target = Dataset[["target"]].iloc[valid_idx].copy(deep=True)
lgbm_train = lgm.Dataset(
train_data,
label=train_target,
categorical_feature=["text_standard_category"],
)
lgbm_valid = lgm.Dataset(
valid_data,
label=valid_target,
categorical_feature=["text_standard_category"],
)
curr_model = lgm.train(
params,
train_set=lgbm_train,
valid_sets=[lgbm_train, lgbm_valid],
verbose_eval=-1,
)
valid_pred = curr_model.predict(
valid_data, num_iteration=curr_model.best_iteration
)
best_score = curr_model.best_score["valid_1"]["rmse"]
best_model_tracker.update({best_score: curr_model})
valid_score.append(best_score)
best_model_score = min([k for k, v in best_model_tracker.items()])
best_model = best_model_tracker[best_model_score]
RGR_MODELS.append(best_model)
# RGR_MODELS.append({best_model_score: best_model})
# worst_rgr_model_idx = max([d.keys[0] for d in RGR_MODELS])
# RGR_MODELS[worst_rgr_model_idx] = {best_model_score: None}
score = np.mean(valid_score)
return score
study = optuna.create_study(storage="sqlite:///lgm-study.db")
study.optimize(objective, n_trials=256)
plot_optimization_history(study).show()
print("Best Trial: ", study.best_trial, "\n")
# use the study parameters to create and train a lgbm regressor
lgm_train_data = DATASET_scaled.drop(columns=["id"]).copy(deep=True)
x_features = lgm_train_data.loc[:, lgm_train_data.columns != "target"]
y_train = lgm_train_data[["target"]]
lgm_train_set_full = lgm.Dataset(
data=x_features, categorical_feature=["text_standard_category"], label=y_train
)
gbm = lgm.train(
study.best_trial.params,
lgm_train_set_full,
)
model.do_decode = True
trainer = pl.Trainer(
gpus=ACCELERATOR_TYPE["gpus"], tpu_cores=ACCELERATOR_TYPE["tpu_cores"]
)
# run predict on the test data
submission_stage_1 = trainer.predict(
model=model, dataloaders=datamodule.predict_dataloader()
)
submission_stage_1 = pd.concat(submission_stage_1).reset_index(drop=True)
print(" Submission Stage 1: After RoBERTA\n")
print(submission_stage_1)
submission_stage_2 = pd.merge(
left=submission_stage_1,
right=datamodule.test_df,
left_on="id",
right_on="id",
how="left",
)
submission_stage_2 = add_textstat_features(submission_stage_2)
feature_cols = list(submission_stage_2.drop(columns=["id"]).copy(deep=True).columns)
predict_data = submission_stage_2.drop(columns=["id"]).copy(deep=True)
predict_data = predict_data[best_n_features]
temp_cols = list(predict_data.drop(columns=["text_standard_category"]).columns)
predict_data_scaled = predict_data[temp_cols]
predict_data_scaled = scaler.transform(predict_data_scaled)
predict_data_scaled = pd.DataFrame(predict_data_scaled, columns=temp_cols)
predict_data_scaled["text_standard_category"] = predict_data["text_standard_category"]
submission = submission_stage_2[["id"]].copy(deep=True)
submission["target"] = gbm.predict(predict_data_scaled)
submission["target"] = TARGET_SCALER.inverse_transform(submission["target"])
print(" Final Stage After LGBM\n")
print(submission)
submission.to_csv("submission.csv", index=False)
| false | 0 | 9,748 | 0 | 9,772 | 9,748 |
||
69393152
|
<jupyter_start><jupyter_text>cov19detection_dataset
Kaggle dataset identifier: cov19detection-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('cov19detection-dataset/imginfo.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 6334 entries, 0 to 6333
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 6334 non-null int64
1 id_x 6334 non-null object
2 boxes 4294 non-null object
3 label 6334 non-null object
4 StudyInstanceUID 6334 non-null object
5 Negative for Pneumonia 6334 non-null int64
6 Typical Appearance 6334 non-null int64
7 Indeterminate Appearance 6334 non-null int64
8 Atypical Appearance 6334 non-null int64
9 path 6334 non-null object
10 fold 6334 non-null int64
11 mean 6334 non-null float64
12 var 6334 non-null float64
13 dim1 6334 non-null int64
14 dim2 6334 non-null int64
15 crop_xmin 6334 non-null int64
16 crop_ymin 6334 non-null int64
17 crop_xmax 6334 non-null int64
18 crop_ymax 6334 non-null int64
19 crop_fxmin 6334 non-null float64
20 crop_fymin 6334 non-null float64
21 crop_fxmax 6334 non-null float64
22 crop_fymax 6334 non-null float64
dtypes: float64(6), int64(12), object(5)
memory usage: 1.1+ MB
<jupyter_text>Examples:
{
"Unnamed: 0": 0,
"id_x": "000a312787f2_image",
"boxes": "[{'x': 789.28836, 'y': 582.43035, 'width': 1026.65662, 'height': 1917.30292}, {'x': 2245.91208, 'y': 591.20528, 'width': 1094.66162, 'height': 1761.54944}]",
"label": "opacity 1 789.28836 582.43035 1815.94498 2499.73327 opacity 1 2245.91208 591.20528 3340.5737 2352.75472",
"StudyInstanceUID": "5776db0cec75_study",
"Negative for Pneumonia": 0,
"Typical Appearance": 1,
"Indeterminate Appearance": 0,
"Atypical Appearance": 0,
"path": "../input/siim-covid19-detection/train/5776db0cec75/81456c9c5423/000a312787f2.dcm",
"fold": 0,
"mean": 128.6391528474,
"var": 67.920095367,
"dim1": 3488,
"dim2": 4256,
"crop_xmin": 0,
"crop_ymin": 260,
"crop_xmax": 3487,
"crop_ymax": 3828,
"crop_fxmin": 0.0,
"...": "and 3 more columns"
}
{
"Unnamed: 0": 1,
"id_x": "000c3a3f293f_image",
"boxes": null,
"label": "none 1 0 0 1 1",
"StudyInstanceUID": "ff0879eb20ed_study",
"Negative for Pneumonia": 1,
"Typical Appearance": 0,
"Indeterminate Appearance": 0,
"Atypical Appearance": 0,
"path": "../input/siim-covid19-detection/train/ff0879eb20ed/d8a644cc4f93/000c3a3f293f.dcm",
"fold": 1,
"mean": 161.1355225989,
"var": 50.2639362976,
"dim1": 2320,
"dim2": 2832,
"crop_xmin": 0,
"crop_ymin": 0,
"crop_xmax": 2320,
"crop_ymax": 2832,
"crop_fxmin": 0.0,
"...": "and 3 more columns"
}
{
"Unnamed: 0": 2,
"id_x": "0012ff7358bc_image",
"boxes": "[{'x': 677.42216, 'y': 197.97662, 'width': 867.79767, 'height': 999.78214}, {'x': 1792.69064, 'y': 402.5525, 'width': 617.02734, 'height': 1204.358}]",
"label": "opacity 1 677.42216 197.97662 1545.21983 1197.75876 opacity 1 1792.69064 402.5525 2409.71798 1606.9105",
"StudyInstanceUID": "9d514ce429a7_study",
"Negative for Pneumonia": 0,
"Typical Appearance": 1,
"Indeterminate Appearance": 0,
"Atypical Appearance": 0,
"path": "../input/siim-covid19-detection/train/9d514ce429a7/22897cd1daa0/0012ff7358bc.dcm",
"fold": 3,
"mean": 106.9416714773,
"var": 51.319914025,
"dim1": 2544,
"dim2": 3056,
"crop_xmin": 122,
"crop_ymin": 194,
"crop_xmax": 2544,
"crop_ymax": 3052,
"crop_fxmin": 0.0479559748,
"...": "and 3 more columns"
}
{
"Unnamed: 0": 3,
"id_x": "001398f4ff4f_image",
"boxes": "[{'x': 2729, 'y': 2181.33331, 'width': 948.00012, 'height': 604}]",
"label": "opacity 1 2729 2181.33331 3677.00012 2785.33331",
"StudyInstanceUID": "28dddc8559b2_study",
"Negative for Pneumonia": 0,
"Typical Appearance": 0,
"Indeterminate Appearance": 0,
"Atypical Appearance": 1,
"path": "../input/siim-covid19-detection/train/28dddc8559b2/4d47bc042ee6/001398f4ff4f.dcm",
"fold": 1,
"mean": 120.6609315925,
"var": 85.3619513012,
"dim1": 3520,
"dim2": 4280,
"crop_xmin": 309,
"crop_ymin": 400,
"crop_xmax": 3509,
"crop_ymax": 4280,
"crop_fxmin": 0.0877840909,
"...": "and 3 more columns"
}
<jupyter_script># # make dataframe
# #### I got the image information by [[siim-step1]get_imginfo](https://www.kaggle.com/kunihikofurugori/siim-step1-get-imginfo).
# #### Next step, I will make the dataframe based on image information.
# ## my public notebook
# ### [step1 get_imageinformation](https://www.kaggle.com/kunihikofurugori/siim-step1-get-imginfo).
# ### [step2 make_dataframe](https://www.kaggle.com/kunihikofurugori/step2-make-dataframe/edit/run/69201903).
# ### [step3-1 renew-imglev_ds](https://www.kaggle.com/kunihikofurugori/siim-step3-1-renew-imglev-ds)
# ### [step3-2 renew-studylev_ds](https://www.kaggle.com/kunihikofurugori/siim-step3-1-renew-studylev-ds)
#
#!pip install python-gdcm
#!pip install pylibjpeg-libjpeg
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
# import pydicom
# import gdcm
# from pydicom.pixel_data_handlers.util import apply_voi_lut
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image
import ast
ids = ["394536fc0dca_image"]
# # make df_image
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df = df[~df.id_x.isin(ids)]
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df = df.dropna(how="any")
df["boxes"] = df.boxes.apply(lambda x: ast.literal_eval(x))
df["boxnum"] = df.boxes.apply(lambda x: len(x))
df = df[df.boxnum.isin([1, 2, 3])]
df["box1"] = df.boxes.apply(lambda x: x[0])
df["box2"] = df.boxes.apply(lambda x: x[1] if len(x) > 1 else 0)
df["box3"] = df.boxes.apply(lambda x: x[2] if len(x) > 2 else 0)
df["xmin1"] = df.box1.apply(lambda x: x["x"])
df["ymin1"] = df.box1.apply(lambda x: x["y"])
df["xmax1"] = df.box1.apply(lambda x: x["x"] + x["width"])
df["ymax1"] = df.box1.apply(lambda x: x["y"] + x["height"])
df["area1"] = df.box1.apply(lambda x: (x["width"] * x["height"]))
df["xmin2"] = df.box2.apply(lambda x: x["x"] if x != 0 else 0)
df["ymin2"] = df.box2.apply(lambda x: x["y"] if x != 0 else 0)
df["xmax2"] = df.box2.apply(lambda x: x["x"] + x["width"] if x != 0 else 0)
df["ymax2"] = df.box2.apply(lambda x: x["y"] + x["height"] if x != 0 else 0)
df["area2"] = df.box2.apply(lambda x: x["width"] * x["height"] if x != 0 else 0)
df["xmin3"] = df.box3.apply(lambda x: x["x"] if x != 0 else 0)
df["ymin3"] = df.box3.apply(lambda x: x["y"] if x != 0 else 0)
df["xmax3"] = df.box3.apply(lambda x: x["x"] + x["width"] if x != 0 else 0)
df["ymax3"] = df.box3.apply(lambda x: x["y"] + x["height"] if x != 0 else 0)
df["area3"] = df.box3.apply(lambda x: x["width"] * x["height"] if x != 0 else 0)
df = df[df.dim2 > df.xmax1]
df = df[df.dim1 > df.ymax1]
df = df[df.dim2 > df.xmax2]
df = df[df.dim1 > df.ymax2]
df = df[df.dim2 > df.xmax3]
df = df[df.dim1 > df.ymax3]
df = df[df.xmin1 >= 0]
df = df[df.xmin2 >= 0]
df = df[df.xmin3 >= 0]
df = df[df.ymin1 >= 0]
df = df[df.ymin2 >= 0]
df = df[df.ymin3 >= 0]
df[
["fracxmin1", "fracxmax1", "fracxmin2", "fracxmax2", "fracxmin3", "fracxmax3"]
] = df.apply(
lambda x: x[["xmin1", "xmax1", "xmin2", "xmax2", "xmin3", "xmax3"]] / x.dim2, axis=1
)
df[
["fracymin1", "fracymax1", "fracymin2", "fracymax2", "fracymin3", "fracymax3"]
] = df.apply(
lambda x: x[["ymin1", "ymax1", "ymin2", "ymax2", "ymin3", "ymax3"]] / x.dim1, axis=1
)
df["fracarea1"] = df.apply(
lambda x: (x.fracxmax1 - x.fracxmin1) * (x.fracymax1 - x.fracymin1), axis=1
)
df["fracarea2"] = df.apply(
lambda x: (x.fracxmax2 - x.fracxmin2) * (x.fracymax2 - x.fracymin2), axis=1
)
df["fracarea3"] = df.apply(
lambda x: (x.fracxmax3 - x.fracxmin3) * (x.fracymax3 - x.fracymin3), axis=1
)
print(len(df))
df = df[(df.fracarea1 > 5e-3) | (df.fracarea1 == 0)]
df = df[(df.fracarea2 > 5e-3) | (df.fracarea2 == 0)]
df = df[(df.fracarea3 > 5e-3) | (df.fracarea3 == 0)]
print(len(df))
del df["Unnamed: 0"]
del df["label"]
del df["boxes"]
del df["StudyInstanceUID"]
df = df.rename(columns={"id_x": "id"})
df.columns
print(
df.fracxmin1.mean(), df.fracymin1.mean(), df.fracxmax1.mean(), df.fracymax1.mean()
)
print(
df.fracxmin2.mean(), df.fracymin2.mean(), df.fracxmax2.mean(), df.fracymax2.mean()
)
print(
df.fracxmin3.mean(), df.fracymin3.mean(), df.fracxmax3.mean(), df.fracymax3.mean()
)
# # safety cropping
"""
cnd1 = df.fracxmax1 > df.crop_fxmax
df.loc[cnd1,"crop_fxmax"] = df.fracxmax1*1.01
cnd2 = df.fracxmax2 > df.crop_fxmax
df.loc[cnd2,"crop_fxmax"] = df.fracxmax2*1.01
cnd3 = df.fracxmax3 > df.crop_fxmax
df.loc[cnd3,"crop_fxmax"] = df.fracxmax3*1.01
cnd4 = df.fracxmin1 < df.crop_fxmin
df.loc[cnd4,"crop_fxmin"] = df.fracxmin1*0.99
cnd5 = (df.fracxmin2 < df.crop_fxmin) | (df.fracxmin2 == 0)
df.loc[cnd5,"crop_fxmin"] = df.fracxmin2*0.99
cnd6 = (df.fracxmin3 < df.crop_fxmin) | (df.fracxmin3 == 0)
df.loc[cnd6,"crop_fxmin"] = df.fracxmin3*0.99
cnd1 = df.fracymax1 > df.crop_fymax
df.loc[cnd1,"crop_fymax"] = df.fracymax1*1.01
cnd2 = df.fracymax2 > df.crop_fymax
df.loc[cnd2,"crop_fymax"] = df.fracymax2*1.01
cnd3 = df.fracymax3 > df.crop_fymax
df.loc[cnd3,"crop_fymax"] = df.fracymax3*1.01
cnd4 = df.fracymin1 < df.crop_fymin
df.loc[cnd4,"crop_fymin"] = df.fracymin1*0.99
cnd5 = (df.fracymin2 < df.crop_fymin) | (df.fracymin2 == 0)
df.loc[cnd5,"crop_fymin"] = df.fracymin2*0.99
cnd6 = (df.fracymin3 < df.crop_fymin) | (df.fracymin3 == 0)
df.loc[cnd6,"crop_fymin"] = df.fracymin3*0.99
"""
area = []
area1 = df.fracarea1.values
area2 = df.fracarea2.values
area3 = df.fracarea3.values
area1 = area1[area1 > 0]
area2 = area2[area2 > 0]
area3 = area3[area3 > 0]
area = np.append(area, area1)
area = np.append(area, area2)
area = np.append(area, area3)
plt.hist(
area,
bins=np.logspace(np.log10(area.min()), np.log10(area.max()), 100),
range=[area.min(), area.max()],
)
plt.xscale("log")
plt.title(f"areamax:{area.max():.3g},areamin:{area.min():.3g}")
plt.show()
# df_image = df[df.columns[[0,5,6,7,8,9,10,11,30,31,32,33,34,35,36,37,38,39,40,41]]]
df_image = df[
[
"id",
"path",
"fold",
"mean",
"var",
"dim1",
"dim2",
"boxnum",
"fracxmin1",
"fracxmax1",
"fracxmin2",
"fracxmax2",
"fracxmin3",
"fracxmax3",
"fracymin1",
"fracymax1",
"fracymin2",
"fracymax2",
"fracymin3",
"fracymax3",
"crop_xmin",
"crop_ymin",
"crop_xmax",
"crop_ymax",
"crop_fxmin",
"crop_fymin",
"crop_fxmax",
"crop_fymax",
"augflag",
]
]
meanmax = df_image["mean"].values.max()
varmax = df_image["var"].values.max()
df_image["norm_mean"] = df_image["mean"].apply(lambda x: x / meanmax)
df_image["norm_var"] = df_image["var"].apply(lambda x: x / varmax)
df_image = df_image.reset_index(drop=True)
display(df_image)
from sklearn.model_selection import KFold, StratifiedKFold
RANDOM_STATE = 35
fold = 1
kfold = KFold(n_splits=5, random_state=RANDOM_STATE, shuffle=True)
df_image["test_fold"] = 0
splits = kfold.split(df_image)
train_indexs = []
test_indexs = []
for i, (train_index, test_index) in enumerate(splits):
print(train_index.shape, test_index.shape)
train_indexs.append(train_index)
test_indexs.append(test_index)
df_image.loc[test_index, "test_fold"] = i
df_image.to_csv("train_image.csv")
df_image.columns
# # make df_study
ids = [
"0b858129adb4_image",
"3d12cb6aad8b_image",
"41e9a794b342_image",
"681ed0b5dff2_image",
"0eb641cb0dcd_image",
"0f9709784c19_image",
"3b982073ec16_image",
"4f3d52d652dd_image",
"9c24e37a0ef5_image",
"1b92142f4362_image",
"6f749e2783e1_image",
"c3510a436bff_image",
"394536fc0dca_image",
]
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df = df.rename(columns={"id_x": "id"})
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df_study = df # [df.columns[[1,4,5,6,7,8,9,11,12,13,14,-1]]]
meanmax = df_study["mean"].values.max()
varmax = df_study["var"].values.max()
df_study["norm_mean"] = df_study["mean"].apply(lambda x: x / meanmax)
df_study["norm_var"] = df_study["var"].apply(lambda x: x / varmax)
print(len(df_study), len(ids))
df_study = df_study[~df_study.id.isin(ids)]
print(len(df_study))
df_study = df_study[~df_study.duplicated(keep="last", subset=["mean", "var"])]
print(len(df))
df_study.to_csv("train_study.csv")
df_study
# # make detection or non-detection image
ids = [
"0b858129adb4_image",
"3d12cb6aad8b_image",
"41e9a794b342_image",
"681ed0b5dff2_image",
"0eb641cb0dcd_image",
"0f9709784c19_image",
"3b982073ec16_image",
"4f3d52d652dd_image",
"394536fc0dca_image",
]
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df["none"] = 0
index = df[df.label == "none 1 0 0 1 1"].index
df.loc[index, "none"] = 1
index = df[(df.none == 1) & (df["Negative for Pneumonia"] == 0)].index
df = df.drop(index).reset_index(drop=True)
df = df.rename(columns={"id_x": "id"})
df = df[~df.id.isin(ids)]
df = df[~df.duplicated(keep="last", subset=["mean", "var"])].reset_index(drop=True)
df.to_csv("train_none.csv")
df
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393152.ipynb
|
cov19detection-dataset
|
kunihikofurugori
|
[{"Id": 69393152, "ScriptId": 18116571, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6220676, "CreationDate": "07/30/2021 12:36:41", "VersionNumber": 23.0, "Title": "[step2]make_dataframe", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 263.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 252.0, "LinesInsertedFromFork": 252.0, "LinesDeletedFromFork": 83.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 11.0, "TotalVotes": 0}]
|
[{"Id": 92502693, "KernelVersionId": 69393152, "SourceDatasetVersionId": 2465014}]
|
[{"Id": 2465014, "DatasetId": 1387439, "DatasourceVersionId": 2507457, "CreatorUserId": 6220676, "LicenseName": "Unknown", "CreationDate": "07/26/2021 13:11:17", "VersionNumber": 10.0, "Title": "cov19detection_dataset", "Slug": "cov19detection-dataset", "Subtitle": NaN, "Description": NaN, "VersionNotes": "imginfocsvfile", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1387439, "CreatorUserId": 6220676, "OwnerUserId": 6220676.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2465014.0, "CurrentDatasourceVersionId": 2507457.0, "ForumId": 1406656, "Type": 2, "CreationDate": "06/03/2021 22:21:59", "LastActivityDate": "06/03/2021", "TotalViews": 1738, "TotalDownloads": 8, "TotalVotes": 1, "TotalKernels": 1}]
|
[{"Id": 6220676, "UserName": "kunihikofurugori", "DisplayName": "furu-nag", "RegisterDate": "11/23/2020", "PerformanceTier": 3}]
|
# # make dataframe
# #### I got the image information by [[siim-step1]get_imginfo](https://www.kaggle.com/kunihikofurugori/siim-step1-get-imginfo).
# #### Next step, I will make the dataframe based on image information.
# ## my public notebook
# ### [step1 get_imageinformation](https://www.kaggle.com/kunihikofurugori/siim-step1-get-imginfo).
# ### [step2 make_dataframe](https://www.kaggle.com/kunihikofurugori/step2-make-dataframe/edit/run/69201903).
# ### [step3-1 renew-imglev_ds](https://www.kaggle.com/kunihikofurugori/siim-step3-1-renew-imglev-ds)
# ### [step3-2 renew-studylev_ds](https://www.kaggle.com/kunihikofurugori/siim-step3-1-renew-studylev-ds)
#
#!pip install python-gdcm
#!pip install pylibjpeg-libjpeg
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import cv2
# import pydicom
# import gdcm
# from pydicom.pixel_data_handlers.util import apply_voi_lut
import matplotlib.pyplot as plt
import glob
import os
from PIL import Image
import ast
ids = ["394536fc0dca_image"]
# # make df_image
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df = df[~df.id_x.isin(ids)]
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df = df.dropna(how="any")
df["boxes"] = df.boxes.apply(lambda x: ast.literal_eval(x))
df["boxnum"] = df.boxes.apply(lambda x: len(x))
df = df[df.boxnum.isin([1, 2, 3])]
df["box1"] = df.boxes.apply(lambda x: x[0])
df["box2"] = df.boxes.apply(lambda x: x[1] if len(x) > 1 else 0)
df["box3"] = df.boxes.apply(lambda x: x[2] if len(x) > 2 else 0)
df["xmin1"] = df.box1.apply(lambda x: x["x"])
df["ymin1"] = df.box1.apply(lambda x: x["y"])
df["xmax1"] = df.box1.apply(lambda x: x["x"] + x["width"])
df["ymax1"] = df.box1.apply(lambda x: x["y"] + x["height"])
df["area1"] = df.box1.apply(lambda x: (x["width"] * x["height"]))
df["xmin2"] = df.box2.apply(lambda x: x["x"] if x != 0 else 0)
df["ymin2"] = df.box2.apply(lambda x: x["y"] if x != 0 else 0)
df["xmax2"] = df.box2.apply(lambda x: x["x"] + x["width"] if x != 0 else 0)
df["ymax2"] = df.box2.apply(lambda x: x["y"] + x["height"] if x != 0 else 0)
df["area2"] = df.box2.apply(lambda x: x["width"] * x["height"] if x != 0 else 0)
df["xmin3"] = df.box3.apply(lambda x: x["x"] if x != 0 else 0)
df["ymin3"] = df.box3.apply(lambda x: x["y"] if x != 0 else 0)
df["xmax3"] = df.box3.apply(lambda x: x["x"] + x["width"] if x != 0 else 0)
df["ymax3"] = df.box3.apply(lambda x: x["y"] + x["height"] if x != 0 else 0)
df["area3"] = df.box3.apply(lambda x: x["width"] * x["height"] if x != 0 else 0)
df = df[df.dim2 > df.xmax1]
df = df[df.dim1 > df.ymax1]
df = df[df.dim2 > df.xmax2]
df = df[df.dim1 > df.ymax2]
df = df[df.dim2 > df.xmax3]
df = df[df.dim1 > df.ymax3]
df = df[df.xmin1 >= 0]
df = df[df.xmin2 >= 0]
df = df[df.xmin3 >= 0]
df = df[df.ymin1 >= 0]
df = df[df.ymin2 >= 0]
df = df[df.ymin3 >= 0]
df[
["fracxmin1", "fracxmax1", "fracxmin2", "fracxmax2", "fracxmin3", "fracxmax3"]
] = df.apply(
lambda x: x[["xmin1", "xmax1", "xmin2", "xmax2", "xmin3", "xmax3"]] / x.dim2, axis=1
)
df[
["fracymin1", "fracymax1", "fracymin2", "fracymax2", "fracymin3", "fracymax3"]
] = df.apply(
lambda x: x[["ymin1", "ymax1", "ymin2", "ymax2", "ymin3", "ymax3"]] / x.dim1, axis=1
)
df["fracarea1"] = df.apply(
lambda x: (x.fracxmax1 - x.fracxmin1) * (x.fracymax1 - x.fracymin1), axis=1
)
df["fracarea2"] = df.apply(
lambda x: (x.fracxmax2 - x.fracxmin2) * (x.fracymax2 - x.fracymin2), axis=1
)
df["fracarea3"] = df.apply(
lambda x: (x.fracxmax3 - x.fracxmin3) * (x.fracymax3 - x.fracymin3), axis=1
)
print(len(df))
df = df[(df.fracarea1 > 5e-3) | (df.fracarea1 == 0)]
df = df[(df.fracarea2 > 5e-3) | (df.fracarea2 == 0)]
df = df[(df.fracarea3 > 5e-3) | (df.fracarea3 == 0)]
print(len(df))
del df["Unnamed: 0"]
del df["label"]
del df["boxes"]
del df["StudyInstanceUID"]
df = df.rename(columns={"id_x": "id"})
df.columns
print(
df.fracxmin1.mean(), df.fracymin1.mean(), df.fracxmax1.mean(), df.fracymax1.mean()
)
print(
df.fracxmin2.mean(), df.fracymin2.mean(), df.fracxmax2.mean(), df.fracymax2.mean()
)
print(
df.fracxmin3.mean(), df.fracymin3.mean(), df.fracxmax3.mean(), df.fracymax3.mean()
)
# # safety cropping
"""
cnd1 = df.fracxmax1 > df.crop_fxmax
df.loc[cnd1,"crop_fxmax"] = df.fracxmax1*1.01
cnd2 = df.fracxmax2 > df.crop_fxmax
df.loc[cnd2,"crop_fxmax"] = df.fracxmax2*1.01
cnd3 = df.fracxmax3 > df.crop_fxmax
df.loc[cnd3,"crop_fxmax"] = df.fracxmax3*1.01
cnd4 = df.fracxmin1 < df.crop_fxmin
df.loc[cnd4,"crop_fxmin"] = df.fracxmin1*0.99
cnd5 = (df.fracxmin2 < df.crop_fxmin) | (df.fracxmin2 == 0)
df.loc[cnd5,"crop_fxmin"] = df.fracxmin2*0.99
cnd6 = (df.fracxmin3 < df.crop_fxmin) | (df.fracxmin3 == 0)
df.loc[cnd6,"crop_fxmin"] = df.fracxmin3*0.99
cnd1 = df.fracymax1 > df.crop_fymax
df.loc[cnd1,"crop_fymax"] = df.fracymax1*1.01
cnd2 = df.fracymax2 > df.crop_fymax
df.loc[cnd2,"crop_fymax"] = df.fracymax2*1.01
cnd3 = df.fracymax3 > df.crop_fymax
df.loc[cnd3,"crop_fymax"] = df.fracymax3*1.01
cnd4 = df.fracymin1 < df.crop_fymin
df.loc[cnd4,"crop_fymin"] = df.fracymin1*0.99
cnd5 = (df.fracymin2 < df.crop_fymin) | (df.fracymin2 == 0)
df.loc[cnd5,"crop_fymin"] = df.fracymin2*0.99
cnd6 = (df.fracymin3 < df.crop_fymin) | (df.fracymin3 == 0)
df.loc[cnd6,"crop_fymin"] = df.fracymin3*0.99
"""
area = []
area1 = df.fracarea1.values
area2 = df.fracarea2.values
area3 = df.fracarea3.values
area1 = area1[area1 > 0]
area2 = area2[area2 > 0]
area3 = area3[area3 > 0]
area = np.append(area, area1)
area = np.append(area, area2)
area = np.append(area, area3)
plt.hist(
area,
bins=np.logspace(np.log10(area.min()), np.log10(area.max()), 100),
range=[area.min(), area.max()],
)
plt.xscale("log")
plt.title(f"areamax:{area.max():.3g},areamin:{area.min():.3g}")
plt.show()
# df_image = df[df.columns[[0,5,6,7,8,9,10,11,30,31,32,33,34,35,36,37,38,39,40,41]]]
df_image = df[
[
"id",
"path",
"fold",
"mean",
"var",
"dim1",
"dim2",
"boxnum",
"fracxmin1",
"fracxmax1",
"fracxmin2",
"fracxmax2",
"fracxmin3",
"fracxmax3",
"fracymin1",
"fracymax1",
"fracymin2",
"fracymax2",
"fracymin3",
"fracymax3",
"crop_xmin",
"crop_ymin",
"crop_xmax",
"crop_ymax",
"crop_fxmin",
"crop_fymin",
"crop_fxmax",
"crop_fymax",
"augflag",
]
]
meanmax = df_image["mean"].values.max()
varmax = df_image["var"].values.max()
df_image["norm_mean"] = df_image["mean"].apply(lambda x: x / meanmax)
df_image["norm_var"] = df_image["var"].apply(lambda x: x / varmax)
df_image = df_image.reset_index(drop=True)
display(df_image)
from sklearn.model_selection import KFold, StratifiedKFold
RANDOM_STATE = 35
fold = 1
kfold = KFold(n_splits=5, random_state=RANDOM_STATE, shuffle=True)
df_image["test_fold"] = 0
splits = kfold.split(df_image)
train_indexs = []
test_indexs = []
for i, (train_index, test_index) in enumerate(splits):
print(train_index.shape, test_index.shape)
train_indexs.append(train_index)
test_indexs.append(test_index)
df_image.loc[test_index, "test_fold"] = i
df_image.to_csv("train_image.csv")
df_image.columns
# # make df_study
ids = [
"0b858129adb4_image",
"3d12cb6aad8b_image",
"41e9a794b342_image",
"681ed0b5dff2_image",
"0eb641cb0dcd_image",
"0f9709784c19_image",
"3b982073ec16_image",
"4f3d52d652dd_image",
"9c24e37a0ef5_image",
"1b92142f4362_image",
"6f749e2783e1_image",
"c3510a436bff_image",
"394536fc0dca_image",
]
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df = df.rename(columns={"id_x": "id"})
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df_study = df # [df.columns[[1,4,5,6,7,8,9,11,12,13,14,-1]]]
meanmax = df_study["mean"].values.max()
varmax = df_study["var"].values.max()
df_study["norm_mean"] = df_study["mean"].apply(lambda x: x / meanmax)
df_study["norm_var"] = df_study["var"].apply(lambda x: x / varmax)
print(len(df_study), len(ids))
df_study = df_study[~df_study.id.isin(ids)]
print(len(df_study))
df_study = df_study[~df_study.duplicated(keep="last", subset=["mean", "var"])]
print(len(df))
df_study.to_csv("train_study.csv")
df_study
# # make detection or non-detection image
ids = [
"0b858129adb4_image",
"3d12cb6aad8b_image",
"41e9a794b342_image",
"681ed0b5dff2_image",
"0eb641cb0dcd_image",
"0f9709784c19_image",
"3b982073ec16_image",
"4f3d52d652dd_image",
"394536fc0dca_image",
]
df = pd.read_csv("../input/cov19detection-dataset/imginfo.csv")
df["augflag"] = 0
index = df[
(df.crop_fxmax > 0.7)
& (df.crop_fymax > 0.7)
& (df.crop_fxmin < 0.3)
& (df.crop_fymin < 0.3)
].index
df.loc[index, "augflag"] = 1
df["none"] = 0
index = df[df.label == "none 1 0 0 1 1"].index
df.loc[index, "none"] = 1
index = df[(df.none == 1) & (df["Negative for Pneumonia"] == 0)].index
df = df.drop(index).reset_index(drop=True)
df = df.rename(columns={"id_x": "id"})
df = df[~df.id.isin(ids)]
df = df[~df.duplicated(keep="last", subset=["mean", "var"])].reset_index(drop=True)
df.to_csv("train_none.csv")
df
|
[{"cov19detection-dataset/imginfo.csv": {"column_names": "[\"Unnamed: 0\", \"id_x\", \"boxes\", \"label\", \"StudyInstanceUID\", \"Negative for Pneumonia\", \"Typical Appearance\", \"Indeterminate Appearance\", \"Atypical Appearance\", \"path\", \"fold\", \"mean\", \"var\", \"dim1\", \"dim2\", \"crop_xmin\", \"crop_ymin\", \"crop_xmax\", \"crop_ymax\", \"crop_fxmin\", \"crop_fymin\", \"crop_fxmax\", \"crop_fymax\"]", "column_data_types": "{\"Unnamed: 0\": \"int64\", \"id_x\": \"object\", \"boxes\": \"object\", \"label\": \"object\", \"StudyInstanceUID\": \"object\", \"Negative for Pneumonia\": \"int64\", \"Typical Appearance\": \"int64\", \"Indeterminate Appearance\": \"int64\", \"Atypical Appearance\": \"int64\", \"path\": \"object\", \"fold\": \"int64\", \"mean\": \"float64\", \"var\": \"float64\", \"dim1\": \"int64\", \"dim2\": \"int64\", \"crop_xmin\": \"int64\", \"crop_ymin\": \"int64\", \"crop_xmax\": \"int64\", \"crop_ymax\": \"int64\", \"crop_fxmin\": \"float64\", \"crop_fymin\": \"float64\", \"crop_fxmax\": \"float64\", \"crop_fymax\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 6334 entries, 0 to 6333\nData columns (total 23 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Unnamed: 0 6334 non-null int64 \n 1 id_x 6334 non-null object \n 2 boxes 4294 non-null object \n 3 label 6334 non-null object \n 4 StudyInstanceUID 6334 non-null object \n 5 Negative for Pneumonia 6334 non-null int64 \n 6 Typical Appearance 6334 non-null int64 \n 7 Indeterminate Appearance 6334 non-null int64 \n 8 Atypical Appearance 6334 non-null int64 \n 9 path 6334 non-null object \n 10 fold 6334 non-null int64 \n 11 mean 6334 non-null float64\n 12 var 6334 non-null float64\n 13 dim1 6334 non-null int64 \n 14 dim2 6334 non-null int64 \n 15 crop_xmin 6334 non-null int64 \n 16 crop_ymin 6334 non-null int64 \n 17 crop_xmax 6334 non-null int64 \n 18 crop_ymax 6334 non-null int64 \n 19 crop_fxmin 6334 non-null float64\n 20 crop_fymin 6334 non-null float64\n 21 crop_fxmax 6334 non-null float64\n 22 crop_fymax 6334 non-null float64\ndtypes: float64(6), int64(12), object(5)\nmemory usage: 1.1+ MB\n", "summary": "{\"Unnamed: 0\": {\"count\": 6334.0, \"mean\": 3166.5, \"std\": 1828.612634394356, \"min\": 0.0, \"25%\": 1583.25, \"50%\": 3166.5, \"75%\": 4749.75, \"max\": 6333.0}, \"Negative for Pneumonia\": {\"count\": 6334.0, \"mean\": 0.2740764130091569, \"std\": 0.44608289476268703, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Typical Appearance\": {\"count\": 6334.0, \"mean\": 0.47473950110514684, \"std\": 0.4994009233077838, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}, \"Indeterminate Appearance\": {\"count\": 6334.0, \"mean\": 0.17492895484685822, \"std\": 0.3799363177729579, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"Atypical Appearance\": {\"count\": 6334.0, \"mean\": 0.07625513103883802, \"std\": 0.2654268425855239, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"fold\": {\"count\": 6334.0, \"mean\": 1.5228923271234607, \"std\": 1.1153424262112464, \"min\": 0.0, \"25%\": 1.0, \"50%\": 2.0, \"75%\": 3.0, \"max\": 3.0}, \"mean\": {\"count\": 6334.0, \"mean\": 133.24683434723556, \"std\": 27.99183437169795, \"min\": 0.0, \"25%\": 114.73103902405234, \"50%\": 137.75916602639998, \"75%\": 152.24879457108534, \"max\": 246.2383744012128}, \"var\": {\"count\": 6334.0, \"mean\": 53.3834527090075, \"std\": 14.145883928818447, \"min\": 0.0, \"25%\": 44.28919479340918, \"50%\": 54.132192287463674, \"75%\": 62.62725154984327, \"max\": 102.23217666919643}, \"dim1\": {\"count\": 6334.0, \"mean\": 2744.6659299021157, \"std\": 582.1675285360781, \"min\": 0.0, \"25%\": 2336.0, \"50%\": 2544.0, \"75%\": 3194.75, \"max\": 4891.0}, \"dim2\": {\"count\": 6334.0, \"mean\": 3183.23760656773, \"std\": 692.7314706859942, \"min\": 0.0, \"25%\": 2836.0, \"50%\": 3028.0, \"75%\": 3408.0, \"max\": 4891.0}, \"crop_xmin\": {\"count\": 6334.0, \"mean\": 51.592990211556675, \"std\": 127.98799432039388, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 40.0, \"max\": 1916.0}, \"crop_ymin\": {\"count\": 6334.0, \"mean\": 60.82996526681402, \"std\": 149.60847047371675, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 24.0, \"max\": 1960.0}, \"crop_xmax\": {\"count\": 6334.0, \"mean\": 2730.9295863593306, \"std\": 574.5785667644149, \"min\": 0.0, \"25%\": 2336.0, \"50%\": 2544.0, \"75%\": 3052.0, \"max\": 4891.0}, \"crop_ymax\": {\"count\": 6334.0, \"mean\": 3133.2777076097254, \"std\": 684.2540943554824, \"min\": 0.0, \"25%\": 2832.0, \"50%\": 2989.0, \"75%\": 3408.0, \"max\": 4891.0}, \"crop_fxmin\": {\"count\": 6334.0, \"mean\": 0.01857164394086423, \"std\": 0.044029450883777195, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.015126653797259724, \"max\": 0.5}, \"crop_fymin\": {\"count\": 6334.0, \"mean\": 0.018727911500273465, \"std\": 0.04392204658023329, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.007920793229031175, \"max\": 0.5}, \"crop_fxmax\": {\"count\": 6334.0, \"mean\": 0.992582782448968, \"std\": 0.04501530778074556, \"min\": 0.0, \"25%\": 0.9996505313726217, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}, \"crop_fymax\": {\"count\": 6334.0, \"mean\": 0.9838674481000695, \"std\": 0.04701738052266493, \"min\": 0.0, \"25%\": 0.9981176159407512, \"50%\": 1.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Unnamed: 0\":{\"0\":0,\"1\":1,\"2\":2,\"3\":3},\"id_x\":{\"0\":\"000a312787f2_image\",\"1\":\"000c3a3f293f_image\",\"2\":\"0012ff7358bc_image\",\"3\":\"001398f4ff4f_image\"},\"boxes\":{\"0\":\"[{'x': 789.28836, 'y': 582.43035, 'width': 1026.65662, 'height': 1917.30292}, {'x': 2245.91208, 'y': 591.20528, 'width': 1094.66162, 'height': 1761.54944}]\",\"1\":null,\"2\":\"[{'x': 677.42216, 'y': 197.97662, 'width': 867.79767, 'height': 999.78214}, {'x': 1792.69064, 'y': 402.5525, 'width': 617.02734, 'height': 1204.358}]\",\"3\":\"[{'x': 2729, 'y': 2181.33331, 'width': 948.00012, 'height': 604}]\"},\"label\":{\"0\":\"opacity 1 789.28836 582.43035 1815.94498 2499.73327 opacity 1 2245.91208 591.20528 3340.5737 2352.75472\",\"1\":\"none 1 0 0 1 1\",\"2\":\"opacity 1 677.42216 197.97662 1545.21983 1197.75876 opacity 1 1792.69064 402.5525 2409.71798 1606.9105\",\"3\":\"opacity 1 2729 2181.33331 3677.00012 2785.33331\"},\"StudyInstanceUID\":{\"0\":\"5776db0cec75_study\",\"1\":\"ff0879eb20ed_study\",\"2\":\"9d514ce429a7_study\",\"3\":\"28dddc8559b2_study\"},\"Negative for Pneumonia\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"Typical Appearance\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0},\"Indeterminate Appearance\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"Atypical Appearance\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"path\":{\"0\":\"..\\/input\\/siim-covid19-detection\\/train\\/5776db0cec75\\/81456c9c5423\\/000a312787f2.dcm\",\"1\":\"..\\/input\\/siim-covid19-detection\\/train\\/ff0879eb20ed\\/d8a644cc4f93\\/000c3a3f293f.dcm\",\"2\":\"..\\/input\\/siim-covid19-detection\\/train\\/9d514ce429a7\\/22897cd1daa0\\/0012ff7358bc.dcm\",\"3\":\"..\\/input\\/siim-covid19-detection\\/train\\/28dddc8559b2\\/4d47bc042ee6\\/001398f4ff4f.dcm\"},\"fold\":{\"0\":0,\"1\":1,\"2\":3,\"3\":1},\"mean\":{\"0\":128.6391528474,\"1\":161.1355225989,\"2\":106.9416714773,\"3\":120.6609315925},\"var\":{\"0\":67.920095367,\"1\":50.2639362976,\"2\":51.319914025,\"3\":85.3619513012},\"dim1\":{\"0\":3488,\"1\":2320,\"2\":2544,\"3\":3520},\"dim2\":{\"0\":4256,\"1\":2832,\"2\":3056,\"3\":4280},\"crop_xmin\":{\"0\":0,\"1\":0,\"2\":122,\"3\":309},\"crop_ymin\":{\"0\":260,\"1\":0,\"2\":194,\"3\":400},\"crop_xmax\":{\"0\":3487,\"1\":2320,\"2\":2544,\"3\":3509},\"crop_ymax\":{\"0\":3828,\"1\":2832,\"2\":3052,\"3\":4280},\"crop_fxmin\":{\"0\":0.0,\"1\":0.0,\"2\":0.0479559748,\"3\":0.0877840909},\"crop_fymin\":{\"0\":0.0610902256,\"1\":0.0,\"2\":0.0634816754,\"3\":0.0934579439},\"crop_fxmax\":{\"0\":0.9997133028,\"1\":1.0,\"2\":1.0,\"3\":0.996875},\"crop_fymax\":{\"0\":0.8994360902,\"1\":1.0,\"2\":0.9986910995,\"3\":1.0}}"}}]
| true | 1 |
<start_data_description><data_path>cov19detection-dataset/imginfo.csv:
<column_names>
['Unnamed: 0', 'id_x', 'boxes', 'label', 'StudyInstanceUID', 'Negative for Pneumonia', 'Typical Appearance', 'Indeterminate Appearance', 'Atypical Appearance', 'path', 'fold', 'mean', 'var', 'dim1', 'dim2', 'crop_xmin', 'crop_ymin', 'crop_xmax', 'crop_ymax', 'crop_fxmin', 'crop_fymin', 'crop_fxmax', 'crop_fymax']
<column_types>
{'Unnamed: 0': 'int64', 'id_x': 'object', 'boxes': 'object', 'label': 'object', 'StudyInstanceUID': 'object', 'Negative for Pneumonia': 'int64', 'Typical Appearance': 'int64', 'Indeterminate Appearance': 'int64', 'Atypical Appearance': 'int64', 'path': 'object', 'fold': 'int64', 'mean': 'float64', 'var': 'float64', 'dim1': 'int64', 'dim2': 'int64', 'crop_xmin': 'int64', 'crop_ymin': 'int64', 'crop_xmax': 'int64', 'crop_ymax': 'int64', 'crop_fxmin': 'float64', 'crop_fymin': 'float64', 'crop_fxmax': 'float64', 'crop_fymax': 'float64'}
<dataframe_Summary>
{'Unnamed: 0': {'count': 6334.0, 'mean': 3166.5, 'std': 1828.612634394356, 'min': 0.0, '25%': 1583.25, '50%': 3166.5, '75%': 4749.75, 'max': 6333.0}, 'Negative for Pneumonia': {'count': 6334.0, 'mean': 0.2740764130091569, 'std': 0.44608289476268703, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Typical Appearance': {'count': 6334.0, 'mean': 0.47473950110514684, 'std': 0.4994009233077838, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}, 'Indeterminate Appearance': {'count': 6334.0, 'mean': 0.17492895484685822, 'std': 0.3799363177729579, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'Atypical Appearance': {'count': 6334.0, 'mean': 0.07625513103883802, 'std': 0.2654268425855239, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'fold': {'count': 6334.0, 'mean': 1.5228923271234607, 'std': 1.1153424262112464, 'min': 0.0, '25%': 1.0, '50%': 2.0, '75%': 3.0, 'max': 3.0}, 'mean': {'count': 6334.0, 'mean': 133.24683434723556, 'std': 27.99183437169795, 'min': 0.0, '25%': 114.73103902405234, '50%': 137.75916602639998, '75%': 152.24879457108534, 'max': 246.2383744012128}, 'var': {'count': 6334.0, 'mean': 53.3834527090075, 'std': 14.145883928818447, 'min': 0.0, '25%': 44.28919479340918, '50%': 54.132192287463674, '75%': 62.62725154984327, 'max': 102.23217666919643}, 'dim1': {'count': 6334.0, 'mean': 2744.6659299021157, 'std': 582.1675285360781, 'min': 0.0, '25%': 2336.0, '50%': 2544.0, '75%': 3194.75, 'max': 4891.0}, 'dim2': {'count': 6334.0, 'mean': 3183.23760656773, 'std': 692.7314706859942, 'min': 0.0, '25%': 2836.0, '50%': 3028.0, '75%': 3408.0, 'max': 4891.0}, 'crop_xmin': {'count': 6334.0, 'mean': 51.592990211556675, 'std': 127.98799432039388, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 40.0, 'max': 1916.0}, 'crop_ymin': {'count': 6334.0, 'mean': 60.82996526681402, 'std': 149.60847047371675, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 24.0, 'max': 1960.0}, 'crop_xmax': {'count': 6334.0, 'mean': 2730.9295863593306, 'std': 574.5785667644149, 'min': 0.0, '25%': 2336.0, '50%': 2544.0, '75%': 3052.0, 'max': 4891.0}, 'crop_ymax': {'count': 6334.0, 'mean': 3133.2777076097254, 'std': 684.2540943554824, 'min': 0.0, '25%': 2832.0, '50%': 2989.0, '75%': 3408.0, 'max': 4891.0}, 'crop_fxmin': {'count': 6334.0, 'mean': 0.01857164394086423, 'std': 0.044029450883777195, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.015126653797259724, 'max': 0.5}, 'crop_fymin': {'count': 6334.0, 'mean': 0.018727911500273465, 'std': 0.04392204658023329, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.007920793229031175, 'max': 0.5}, 'crop_fxmax': {'count': 6334.0, 'mean': 0.992582782448968, 'std': 0.04501530778074556, 'min': 0.0, '25%': 0.9996505313726217, '50%': 1.0, '75%': 1.0, 'max': 1.0}, 'crop_fymax': {'count': 6334.0, 'mean': 0.9838674481000695, 'std': 0.04701738052266493, 'min': 0.0, '25%': 0.9981176159407512, '50%': 1.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 6334 entries, 0 to 6333
Data columns (total 23 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Unnamed: 0 6334 non-null int64
1 id_x 6334 non-null object
2 boxes 4294 non-null object
3 label 6334 non-null object
4 StudyInstanceUID 6334 non-null object
5 Negative for Pneumonia 6334 non-null int64
6 Typical Appearance 6334 non-null int64
7 Indeterminate Appearance 6334 non-null int64
8 Atypical Appearance 6334 non-null int64
9 path 6334 non-null object
10 fold 6334 non-null int64
11 mean 6334 non-null float64
12 var 6334 non-null float64
13 dim1 6334 non-null int64
14 dim2 6334 non-null int64
15 crop_xmin 6334 non-null int64
16 crop_ymin 6334 non-null int64
17 crop_xmax 6334 non-null int64
18 crop_ymax 6334 non-null int64
19 crop_fxmin 6334 non-null float64
20 crop_fymin 6334 non-null float64
21 crop_fxmax 6334 non-null float64
22 crop_fymax 6334 non-null float64
dtypes: float64(6), int64(12), object(5)
memory usage: 1.1+ MB
<some_examples>
{'Unnamed: 0': {'0': 0, '1': 1, '2': 2, '3': 3}, 'id_x': {'0': '000a312787f2_image', '1': '000c3a3f293f_image', '2': '0012ff7358bc_image', '3': '001398f4ff4f_image'}, 'boxes': {'0': "[{'x': 789.28836, 'y': 582.43035, 'width': 1026.65662, 'height': 1917.30292}, {'x': 2245.91208, 'y': 591.20528, 'width': 1094.66162, 'height': 1761.54944}]", '1': None, '2': "[{'x': 677.42216, 'y': 197.97662, 'width': 867.79767, 'height': 999.78214}, {'x': 1792.69064, 'y': 402.5525, 'width': 617.02734, 'height': 1204.358}]", '3': "[{'x': 2729, 'y': 2181.33331, 'width': 948.00012, 'height': 604}]"}, 'label': {'0': 'opacity 1 789.28836 582.43035 1815.94498 2499.73327 opacity 1 2245.91208 591.20528 3340.5737 2352.75472', '1': 'none 1 0 0 1 1', '2': 'opacity 1 677.42216 197.97662 1545.21983 1197.75876 opacity 1 1792.69064 402.5525 2409.71798 1606.9105', '3': 'opacity 1 2729 2181.33331 3677.00012 2785.33331'}, 'StudyInstanceUID': {'0': '5776db0cec75_study', '1': 'ff0879eb20ed_study', '2': '9d514ce429a7_study', '3': '28dddc8559b2_study'}, 'Negative for Pneumonia': {'0': 0, '1': 1, '2': 0, '3': 0}, 'Typical Appearance': {'0': 1, '1': 0, '2': 1, '3': 0}, 'Indeterminate Appearance': {'0': 0, '1': 0, '2': 0, '3': 0}, 'Atypical Appearance': {'0': 0, '1': 0, '2': 0, '3': 1}, 'path': {'0': '../input/siim-covid19-detection/train/5776db0cec75/81456c9c5423/000a312787f2.dcm', '1': '../input/siim-covid19-detection/train/ff0879eb20ed/d8a644cc4f93/000c3a3f293f.dcm', '2': '../input/siim-covid19-detection/train/9d514ce429a7/22897cd1daa0/0012ff7358bc.dcm', '3': '../input/siim-covid19-detection/train/28dddc8559b2/4d47bc042ee6/001398f4ff4f.dcm'}, 'fold': {'0': 0, '1': 1, '2': 3, '3': 1}, 'mean': {'0': 128.6391528474, '1': 161.1355225989, '2': 106.9416714773, '3': 120.6609315925}, 'var': {'0': 67.920095367, '1': 50.2639362976, '2': 51.319914025, '3': 85.3619513012}, 'dim1': {'0': 3488, '1': 2320, '2': 2544, '3': 3520}, 'dim2': {'0': 4256, '1': 2832, '2': 3056, '3': 4280}, 'crop_xmin': {'0': 0, '1': 0, '2': 122, '3': 309}, 'crop_ymin': {'0': 260, '1': 0, '2': 194, '3': 400}, 'crop_xmax': {'0': 3487, '1': 2320, '2': 2544, '3': 3509}, 'crop_ymax': {'0': 3828, '1': 2832, '2': 3052, '3': 4280}, 'crop_fxmin': {'0': 0.0, '1': 0.0, '2': 0.0479559748, '3': 0.0877840909}, 'crop_fymin': {'0': 0.0610902256, '1': 0.0, '2': 0.0634816754, '3': 0.0934579439}, 'crop_fxmax': {'0': 0.9997133028, '1': 1.0, '2': 1.0, '3': 0.996875}, 'crop_fymax': {'0': 0.8994360902, '1': 1.0, '2': 0.9986910995, '3': 1.0}}
<end_description>
| 4,223 | 0 | 6,515 | 4,223 |
69393392
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
final_test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
train_data.head()
train_labels = train_data.label
train_data.drop(["label"], axis=1, inplace=True)
train_data.shape
from sklearn.model_selection import train_test_split
train_data, test_data, train_labels, test_labels = train_test_split(
train_data, train_labels, test_size=0.2, random_state=42
)
train_data.shape
test_data.shape
train_data.iloc[:, 1].dtype
train_data = train_data.to_numpy().reshape(train_data.shape[0], 28, 28, 1)
test_data = test_data.to_numpy().reshape(test_data.shape[0], 28, 28, 1)
final_test_data = final_test_data.to_numpy().reshape(
final_test_data.shape[0], 28, 28, 1
)
# normalization
train_data = train_data / 255.0
test_data = test_data / 255.0
final_test_data = final_test_data / 255.0
from keras.utils import to_categorical
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(validation_split=0.2)
from keras import models
from keras import layers
from keras import optimizers
my_model = models.Sequential()
# Convolutional layers
my_model.add(
layers.Conv2D(
40, (3, 3), activation="relu", input_shape=(28, 28, 1), name="First_Conv"
)
)
my_model.add(layers.MaxPooling2D((2, 2)))
my_model.add(layers.Dropout(0.3))
# my_model.add(layers.BatchNormalization())
my_model.add(layers.Conv2D(80, (3, 3), activation="relu", name="Second_Conv"))
my_model.add(layers.MaxPooling2D((2, 2)))
my_model.add(layers.Dropout(0.3))
my_model.add(layers.Conv2D(160, (3, 3), activation="relu", name="Third_Conv"))
# Dense classificator
my_model.add(layers.Flatten())
# my_model.add(layers.BatchNormalization())
my_model.add(layers.Dropout(0.3))
my_model.add(layers.Dense(64, activation="relu", name="First_Dense"))
# my_model.add(layers.BatchNormalization())
# my_model.add(layers.Dense(64, activation = 'relu', name = 'Second_Dense'))
my_model.add(layers.Dense(10, activation="softmax", name="Output"))
my_model.summary()
from keras.callbacks import EarlyStopping
callbacks_list = [
EarlyStopping(monitor="categorical_accuracy", patience=3, restore_best_weights=True)
]
optimizer_use = optimizers.RMSprop(momentum=0.05)
my_model.compile(
optimizer=optimizer_use,
loss="categorical_crossentropy",
metrics=["categorical_accuracy"],
)
# history = my_model.fit(train_data, train_labels, epochs = 10, batch_size = 64, validation_split = 0.2, callbacks = callbacks_list)
history = my_model.fit(
datagen.flow(train_data, train_labels, batch_size=64),
epochs=10,
validation_data=datagen.flow(train_data, train_labels, batch_size=12),
callbacks=callbacks_list,
)
evaluations = my_model.evaluate(test_data, test_labels)
history.history.keys()
loss_values = history.history["loss"]
val_loss_values = history.history["val_loss"]
acc_values = history.history["categorical_accuracy"]
val_acc_values = history.history["val_categorical_accuracy"]
import matplotlib.pyplot as plt
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "b", label="Training loss")
plt.plot(epochs, val_loss_values, "r", label="Validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.clf()
plt.plot(epochs, acc_values, "b", label="Training accuracy")
plt.plot(epochs, val_acc_values, "r", label="Validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
evaluations[1]
predictions = my_model.predict(final_test_data)
predictions[2]
predictions
predictions = np.argmax(predictions, axis=1)
submission = pd.DataFrame({"ImageId": range(1, 28001), "Label": predictions})
submission.to_csv("sub.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393392.ipynb
| null | null |
[{"Id": 69393392, "ScriptId": 18788363, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1570180, "CreationDate": "07/30/2021 12:39:52", "VersionNumber": 4.0, "Title": "MNIST with CNN", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 5.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 117.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
train_data = pd.read_csv("/kaggle/input/digit-recognizer/train.csv")
final_test_data = pd.read_csv("/kaggle/input/digit-recognizer/test.csv")
train_data.head()
train_labels = train_data.label
train_data.drop(["label"], axis=1, inplace=True)
train_data.shape
from sklearn.model_selection import train_test_split
train_data, test_data, train_labels, test_labels = train_test_split(
train_data, train_labels, test_size=0.2, random_state=42
)
train_data.shape
test_data.shape
train_data.iloc[:, 1].dtype
train_data = train_data.to_numpy().reshape(train_data.shape[0], 28, 28, 1)
test_data = test_data.to_numpy().reshape(test_data.shape[0], 28, 28, 1)
final_test_data = final_test_data.to_numpy().reshape(
final_test_data.shape[0], 28, 28, 1
)
# normalization
train_data = train_data / 255.0
test_data = test_data / 255.0
final_test_data = final_test_data / 255.0
from keras.utils import to_categorical
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
from keras.preprocessing.image import ImageDataGenerator
datagen = ImageDataGenerator(validation_split=0.2)
from keras import models
from keras import layers
from keras import optimizers
my_model = models.Sequential()
# Convolutional layers
my_model.add(
layers.Conv2D(
40, (3, 3), activation="relu", input_shape=(28, 28, 1), name="First_Conv"
)
)
my_model.add(layers.MaxPooling2D((2, 2)))
my_model.add(layers.Dropout(0.3))
# my_model.add(layers.BatchNormalization())
my_model.add(layers.Conv2D(80, (3, 3), activation="relu", name="Second_Conv"))
my_model.add(layers.MaxPooling2D((2, 2)))
my_model.add(layers.Dropout(0.3))
my_model.add(layers.Conv2D(160, (3, 3), activation="relu", name="Third_Conv"))
# Dense classificator
my_model.add(layers.Flatten())
# my_model.add(layers.BatchNormalization())
my_model.add(layers.Dropout(0.3))
my_model.add(layers.Dense(64, activation="relu", name="First_Dense"))
# my_model.add(layers.BatchNormalization())
# my_model.add(layers.Dense(64, activation = 'relu', name = 'Second_Dense'))
my_model.add(layers.Dense(10, activation="softmax", name="Output"))
my_model.summary()
from keras.callbacks import EarlyStopping
callbacks_list = [
EarlyStopping(monitor="categorical_accuracy", patience=3, restore_best_weights=True)
]
optimizer_use = optimizers.RMSprop(momentum=0.05)
my_model.compile(
optimizer=optimizer_use,
loss="categorical_crossentropy",
metrics=["categorical_accuracy"],
)
# history = my_model.fit(train_data, train_labels, epochs = 10, batch_size = 64, validation_split = 0.2, callbacks = callbacks_list)
history = my_model.fit(
datagen.flow(train_data, train_labels, batch_size=64),
epochs=10,
validation_data=datagen.flow(train_data, train_labels, batch_size=12),
callbacks=callbacks_list,
)
evaluations = my_model.evaluate(test_data, test_labels)
history.history.keys()
loss_values = history.history["loss"]
val_loss_values = history.history["val_loss"]
acc_values = history.history["categorical_accuracy"]
val_acc_values = history.history["val_categorical_accuracy"]
import matplotlib.pyplot as plt
epochs = range(1, len(loss_values) + 1)
plt.plot(epochs, loss_values, "b", label="Training loss")
plt.plot(epochs, val_loss_values, "r", label="Validation loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.legend()
plt.show()
plt.clf()
plt.plot(epochs, acc_values, "b", label="Training accuracy")
plt.plot(epochs, val_acc_values, "r", label="Validation accuracy")
plt.xlabel("Epochs")
plt.ylabel("Accuracy")
plt.legend()
plt.show()
evaluations[1]
predictions = my_model.predict(final_test_data)
predictions[2]
predictions
predictions = np.argmax(predictions, axis=1)
submission = pd.DataFrame({"ImageId": range(1, 28001), "Label": predictions})
submission.to_csv("sub.csv", index=False)
| false | 0 | 1,479 | 0 | 1,479 | 1,479 |
||
69393622
|
<jupyter_start><jupyter_text>Water Quality
# Context
`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`
# Content
The water_potability.csv file contains water quality metrics for 3276 different water bodies.
### 1. pH value:
```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ```
### 2. Hardness:
```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.
Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```
### 3. Solids (Total dissolved solids - TDS):
```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```
### 4. Chloramines:
```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```
### 5. Sulfate:
```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```
### 6. Conductivity:
```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ```
### 7. Organic_carbon:
```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```
### 8. Trihalomethanes:
```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```
### 9. Turbidity:
```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```
### 10. Potability:
```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```
Kaggle dataset identifier: water-potability
<jupyter_code>import pandas as pd
df = pd.read_csv('water-potability/water_potability.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<jupyter_text>Examples:
{
"ph": NaN,
"Hardness": 204.8904554713,
"Solids": 20791.318980747,
"Chloramines": 7.3002118732,
"Sulfate": 368.5164413498,
"Conductivity": 564.3086541722,
"Organic_carbon": 10.379783078100001,
"Trihalomethanes": 86.9909704615,
"Turbidity": 2.9631353806,
"Potability": 0.0
}
{
"ph": 3.7160800754,
"Hardness": 129.4229205149,
"Solids": 18630.0578579703,
"Chloramines": 6.6352458839,
"Sulfate": NaN,
"Conductivity": 592.8853591349,
"Organic_carbon": 15.1800131164,
"Trihalomethanes": 56.3290762845,
"Turbidity": 4.5006562749,
"Potability": 0.0
}
{
"ph": 8.0991241893,
"Hardness": 224.2362593936,
"Solids": 19909.5417322924,
"Chloramines": 9.2758836027,
"Sulfate": NaN,
"Conductivity": 418.6062130645,
"Organic_carbon": 16.8686369296,
"Trihalomethanes": 66.4200925118,
"Turbidity": 3.0559337497,
"Potability": 0.0
}
{
"ph": 8.3167658842,
"Hardness": 214.3733940856,
"Solids": 22018.4174407753,
"Chloramines": 8.0593323774,
"Sulfate": 356.8861356431,
"Conductivity": 363.2665161642,
"Organic_carbon": 18.4365244955,
"Trihalomethanes": 100.3416743651,
"Turbidity": 4.6287705368,
"Potability": 0.0
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
sns.set()
warnings.simplefilter("ignore")
data_orig = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
print(data_orig.shape)
data_orig.head()
data = data_orig.copy()
data.info()
data.isnull().sum()
feat_cols = [
"ph",
"Hardness",
"Solids",
"Chloramines",
"Sulfate",
"Conductivity",
"Organic_carbon",
"Trihalomethanes",
"Turbidity",
]
output_col = "Potability"
fig, ax1 = plt.subplots(4, 2, figsize=(20, 20))
k = 0
skew_vals = data[feat_cols].skew()
for i in range(4):
for j in range(2):
sns.histplot(
data=data, x=feat_cols[k], ax=ax1[i][j], label="{:.4}".format(skew_vals[k])
)
ax1[i][j].legend()
k += 1
plt.show()
"""
All columns are almost normally distributed
Solids has the highest skewness value
"""
fig = plt.figure(figsize=(10, 7))
plt.pie(
data[output_col].value_counts(),
labels=["Not Potable", "Potable"],
autopct="%1.1f%%",
)
## checking for outliers
fig, ax2 = plt.subplots(4, 2, figsize=(30, 30))
k = 0
q1 = data[feat_cols].quantile(0.25)
q3 = data[feat_cols].quantile(0.75)
iqr = q3 - q1
upper_lim = dict(q3 + 1.5 * iqr)
lower_lim = dict(q1 - 1.5 * iqr)
for i in range(4):
for j in range(2):
col = feat_cols[k]
out_count = sum(data[col] > upper_lim[col]) + sum(data[col] < lower_lim[col])
out_prop = out_count / len(data)
sns.boxplot(data=data, x=feat_cols[k], ax=ax2[i][j])
ax2[i][j].set_title("Outliers Proporation : {:.2f}%".format(out_prop * 100))
k += 1
plt.show()
"""
Most columns contain outliers that are normally distributed except Solids column
Will outliers effect model performance? ,(can they be safely removed of replaced?)
Since columns ['ph','sulfates','trihalomethanes'] have outliers and are normally distributed
will replace missing values with median/mode.
"""
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393622.ipynb
|
water-potability
|
adityakadiwal
|
[{"Id": 69393622, "ScriptId": 18944569, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 970670, "CreationDate": "07/30/2021 12:43:05", "VersionNumber": 1.0, "Title": "notebookce8f56eaa5", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92503919, "KernelVersionId": 69393622, "SourceDatasetVersionId": 2157486}]
|
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
|
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
sns.set()
warnings.simplefilter("ignore")
data_orig = pd.read_csv("/kaggle/input/water-potability/water_potability.csv")
print(data_orig.shape)
data_orig.head()
data = data_orig.copy()
data.info()
data.isnull().sum()
feat_cols = [
"ph",
"Hardness",
"Solids",
"Chloramines",
"Sulfate",
"Conductivity",
"Organic_carbon",
"Trihalomethanes",
"Turbidity",
]
output_col = "Potability"
fig, ax1 = plt.subplots(4, 2, figsize=(20, 20))
k = 0
skew_vals = data[feat_cols].skew()
for i in range(4):
for j in range(2):
sns.histplot(
data=data, x=feat_cols[k], ax=ax1[i][j], label="{:.4}".format(skew_vals[k])
)
ax1[i][j].legend()
k += 1
plt.show()
"""
All columns are almost normally distributed
Solids has the highest skewness value
"""
fig = plt.figure(figsize=(10, 7))
plt.pie(
data[output_col].value_counts(),
labels=["Not Potable", "Potable"],
autopct="%1.1f%%",
)
## checking for outliers
fig, ax2 = plt.subplots(4, 2, figsize=(30, 30))
k = 0
q1 = data[feat_cols].quantile(0.25)
q3 = data[feat_cols].quantile(0.75)
iqr = q3 - q1
upper_lim = dict(q3 + 1.5 * iqr)
lower_lim = dict(q1 - 1.5 * iqr)
for i in range(4):
for j in range(2):
col = feat_cols[k]
out_count = sum(data[col] > upper_lim[col]) + sum(data[col] < lower_lim[col])
out_prop = out_count / len(data)
sns.boxplot(data=data, x=feat_cols[k], ax=ax2[i][j])
ax2[i][j].set_title("Outliers Proporation : {:.2f}%".format(out_prop * 100))
k += 1
plt.show()
"""
Most columns contain outliers that are normally distributed except Solids column
Will outliers effect model performance? ,(can they be safely removed of replaced?)
Since columns ['ph','sulfates','trihalomethanes'] have outliers and are normally distributed
will replace missing values with median/mode.
"""
|
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>water-potability/water_potability.csv:
<column_names>
['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability']
<column_types>
{'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'}
<dataframe_Summary>
{'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<some_examples>
{'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 763 | 0 | 3,040 | 763 |
69393612
|
<jupyter_start><jupyter_text>Water Quality
# Context
`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`
# Content
The water_potability.csv file contains water quality metrics for 3276 different water bodies.
### 1. pH value:
```PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards. ```
### 2. Hardness:
```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.
Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```
### 3. Solids (Total dissolved solids - TDS):
```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```
### 4. Chloramines:
```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```
### 5. Sulfate:
```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```
### 6. Conductivity:
```Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm. ```
### 7. Organic_carbon:
```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```
### 8. Trihalomethanes:
```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```
### 9. Turbidity:
```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```
### 10. Potability:
```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```
Kaggle dataset identifier: water-potability
<jupyter_code>import pandas as pd
df = pd.read_csv('water-potability/water_potability.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<jupyter_text>Examples:
{
"ph": NaN,
"Hardness": 204.8904554713,
"Solids": 20791.318980747,
"Chloramines": 7.3002118732,
"Sulfate": 368.5164413498,
"Conductivity": 564.3086541722,
"Organic_carbon": 10.379783078100001,
"Trihalomethanes": 86.9909704615,
"Turbidity": 2.9631353806,
"Potability": 0.0
}
{
"ph": 3.7160800754,
"Hardness": 129.4229205149,
"Solids": 18630.0578579703,
"Chloramines": 6.6352458839,
"Sulfate": NaN,
"Conductivity": 592.8853591349,
"Organic_carbon": 15.1800131164,
"Trihalomethanes": 56.3290762845,
"Turbidity": 4.5006562749,
"Potability": 0.0
}
{
"ph": 8.0991241893,
"Hardness": 224.2362593936,
"Solids": 19909.5417322924,
"Chloramines": 9.2758836027,
"Sulfate": NaN,
"Conductivity": 418.6062130645,
"Organic_carbon": 16.8686369296,
"Trihalomethanes": 66.4200925118,
"Turbidity": 3.0559337497,
"Potability": 0.0
}
{
"ph": 8.3167658842,
"Hardness": 214.3733940856,
"Solids": 22018.4174407753,
"Chloramines": 8.0593323774,
"Sulfate": 356.8861356431,
"Conductivity": 363.2665161642,
"Organic_carbon": 18.4365244955,
"Trihalomethanes": 100.3416743651,
"Turbidity": 4.6287705368,
"Potability": 0.0
}
<jupyter_script>import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("../input/water-potability/water_potability.csv")
data.head()
# ## 1. pH value:
# PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards.
# ## 2. Hardness:
# Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water. Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.
# ## 3. Solids (Total dissolved solids - TDS):
# Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose.
# ## 4. Chloramines:
# Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.
# ## 5. Sulfate:
# Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations.
# ## 6. Conductivity:
# Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm.
# ## 7. Organic_carbon:
# Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.
# ## 8. Trihalomethanes:
# THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.
# ## 9. Turbidity:
# The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.
# ## 10. Potability:
# Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.
data.isna().sum()
data.describe().T
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
data1 = minmax.fit_transform(data)
data1 = pd.DataFrame(data1, columns=data.columns)
data1.fillna(data1.mean(), inplace=True)
# ## second Way For Missing Value :
# ### from sklearn.Impute import SimpleImputer
# ### sm = SimpleImputer(missing_values = np.nan, sterategy = 'mean')
# ### sm.fit_transform(data1)
#
data1.describe()
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9) = plt.subplots(
ncols=9, nrows=1, figsize=(30, 15), sharey=True
)
fig.suptitle("Box Plot For Realized outlier")
ax1.set_title("ph")
ax1.boxplot(data1["ph"], notch=True, vert=True)
ax2.set_title("Hardness")
ax2.boxplot(data1["Hardness"], notch=True, vert=True)
ax3.set_title("Solids")
ax3.boxplot(data1["Solids"], notch=True, vert=True)
ax4.set_title("Chloramines")
ax4.boxplot(data1["Chloramines"], notch=True, vert=True)
ax5.set_title("Sulfate")
ax5.boxplot(data1["Sulfate"], notch=True, vert=True)
ax6.set_title("Conductivity")
ax6.boxplot(data1["Conductivity"], notch=True, vert=True)
ax7.set_title("Organic_carbon")
ax7.boxplot(data1["Organic_carbon"], notch=True, vert=True)
ax8.set_title("Trihalomethanes")
ax8.boxplot(data1["Trihalomethanes"], notch=True, vert=True)
ax9.set_title("Turbidity")
ax9.boxplot(data1["Turbidity"], notch=True, vert=True)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(
ncols=3, nrows=3, figsize=(15, 30), sharey=True
)
fig.suptitle("Histogram Plot For Realized outlier")
ax1.set_title("ph")
ax1.hist(data1["ph"])
ax2.set_title("Hardness")
ax2.hist(data1["Hardness"])
ax3.set_title("Solids")
ax3.hist(data1["Solids"])
ax4.set_title("Chloramines")
ax4.hist(data1["Chloramines"])
ax5.set_title("Sulfate")
ax5.hist(data1["Sulfate"])
ax6.set_title("Conductivity")
ax6.hist(data1["Conductivity"])
ax7.set_title("Organic_carbon")
ax7.hist(data1["Organic_carbon"])
ax8.set_title("Trihalomethanes")
ax8.hist(data1["Trihalomethanes"])
ax9.set_title("Turbidity")
ax9.hist(data1["Turbidity"])
z_score = np.abs(stats.zscore(data1))
print(np.where(z_score > 3))
df = data1[(z_score < 2).all(axis=1)]
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9) = plt.subplots(
ncols=9, nrows=1, figsize=(30, 15), sharey=True
)
fig.suptitle("Box Plot For Realized outlier")
ax1.set_title("ph")
ax1.boxplot(df["ph"], notch=True, vert=True)
ax2.set_title("Hardness")
ax2.boxplot(df["Hardness"], notch=True, vert=True)
ax3.set_title("Solids")
ax3.boxplot(df["Solids"], notch=True, vert=True)
ax4.set_title("Chloramines")
ax4.boxplot(df["Chloramines"], notch=True, vert=True)
ax5.set_title("Sulfate")
ax5.boxplot(df["Sulfate"], notch=True, vert=True)
ax6.set_title("Conductivity")
ax6.boxplot(df["Conductivity"], notch=True, vert=True)
ax7.set_title("Organic_carbon")
ax7.boxplot(df["Organic_carbon"], notch=True, vert=True)
ax8.set_title("Trihalomethanes")
ax8.boxplot(df["Trihalomethanes"], notch=True, vert=True)
ax9.set_title("Turbidity")
ax9.boxplot(df["Turbidity"], notch=True, vert=True)
df = df.reset_index(drop=True)
sns.countplot(df["Potability"])
## Not Balance DataSets
plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True)
# ## What's Different Between Pearson & Sperman & Kendaltaue
# ### pearson: (1, -1)
# ### Spearman: First Sort Data After That Change Number To Rank And Calculate By Rank Not Number
# ### kendalstau: Like Last One But Two By Two Compare nc - nd / nc + nd
from imblearn.combine import SMOTETomek
balancer = SMOTETomek(n_jobs=-1)
x, y = balancer.fit_resample(df.iloc[:, :-1], df.iloc[:, -1])
y.value_counts()
from sklearn.feature_selection import SelectKBest, chi2
effective_feature = SelectKBest(chi2, k=5)
x_new = effective_feature.fit_transform(x, y)
effective_feature.get_support()
from sklearn import svm
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
svm = svm.SVC(gamma="auto", C=10)
y_predict = cross_val_predict(svm, x_new, y)
accuracy_score(y_pred=y_predict, y_true=y)
rf = RandomForestClassifier(max_depth=4, n_jobs=-1, n_estimators=400)
y_predict_rf = cross_val_predict(rf, x, y)
accuracy_score(y_pred=y_predict_rf, y_true=y)
# import random
# random.shuffle(y)
rf = RandomForestClassifier(max_depth=2, n_jobs=-1, n_estimators=300)
y_predict_rf = cross_val_predict(rf, x, y)
accuracy_score(y_pred=y_predict_rf, y_true=y)
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
x_std = std.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca_scale = pca.fit_transform(x_std)
gbc = GradientBoostingClassifier(n_estimators=1500, max_depth=7, learning_rate=0.3)
y_predict_gbc = cross_val_predict(gbc, x_pca_scale, y)
accuracy_score(y_pred=y_predict_gbc, y_true=y)
import tensorflow as tf
from keras.models import Sequential
from keras import layers
model = Sequential()
model.add(layers.Dense(32, activation="relu", input_shape=(9,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(8, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(4, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.summary()
model.compile(loss="binary_crossentropy", metrics="accuracy", optimizer="adam")
history = model.fit(x_pca_scale, y, batch_size=32, epochs=100, validation_split=0.3)
pd.DataFrame(history.history).plot()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393612.ipynb
|
water-potability
|
adityakadiwal
|
[{"Id": 69393612, "ScriptId": 18916319, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5771810, "CreationDate": "07/30/2021 12:42:53", "VersionNumber": 2.0, "Title": "Statistics, Feature_selection, EDA, Model 84%ACC", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 237.0, "LinesInsertedFromPrevious": 59.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 178.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 92503910, "KernelVersionId": 69393612, "SourceDatasetVersionId": 2157486}]
|
[{"Id": 2157486, "DatasetId": 1292407, "DatasourceVersionId": 2198621, "CreatorUserId": 5454565, "LicenseName": "CC0: Public Domain", "CreationDate": "04/25/2021 10:27:44", "VersionNumber": 3.0, "Title": "Water Quality", "Slug": "water-potability", "Subtitle": "Drinking water potability", "Description": "# Context\n\n`Access to safe drinking-water is essential to health, a basic human right and a component of effective policy for health protection. This is important as a health and development issue at a national, regional and local level. In some regions, it has been shown that investments in water supply and sanitation can yield a net economic benefit, since the reductions in adverse health effects and health care costs outweigh the costs of undertaking the interventions.`\n\n\n# Content\n\n\nThe water_potability.csv file contains water quality metrics for 3276 different water bodies. \n### 1. pH value:\n```PH is an important parameter in evaluating the acid\u2013base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52\u20136.83 which are in the range of WHO standards. ```\n\n### 2. Hardness:\n```Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water.\nHardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.```\n\n### 3. Solids (Total dissolved solids - TDS): \n```Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose. ```\n\n### 4. Chloramines: \n```Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.```\n\n### 5. Sulfate: \n```Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations. ```\n\n### 6. Conductivity: \n```Pure water is not a good conductor of electric current rather\u2019s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 \u03bcS/cm. ```\n\n### 7. Organic_carbon: \n ```Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.```\n\n### 8. Trihalomethanes: \n```THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.```\n\n### 9. Turbidity: \n```The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.```\n\n### 10. Potability: \n```Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.```", "VersionNotes": "Removed garbage column", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1292407, "CreatorUserId": 5454565, "OwnerUserId": 5454565.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2157486.0, "CurrentDatasourceVersionId": 2198621.0, "ForumId": 1311077, "Type": 2, "CreationDate": "04/24/2021 07:18:57", "LastActivityDate": "04/24/2021", "TotalViews": 422520, "TotalDownloads": 61531, "TotalVotes": 1262, "TotalKernels": 437}]
|
[{"Id": 5454565, "UserName": "adityakadiwal", "DisplayName": "Aditya Kadiwal", "RegisterDate": "07/12/2020", "PerformanceTier": 2}]
|
import numpy as np
import pandas as pd
from scipy import stats
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("../input/water-potability/water_potability.csv")
data.head()
# ## 1. pH value:
# PH is an important parameter in evaluating the acid–base balance of water. It is also the indicator of acidic or alkaline condition of water status. WHO has recommended maximum permissible limit of pH from 6.5 to 8.5. The current investigation ranges were 6.52–6.83 which are in the range of WHO standards.
# ## 2. Hardness:
# Hardness is mainly caused by calcium and magnesium salts. These salts are dissolved from geologic deposits through which water travels. The length of time water is in contact with hardness producing material helps determine how much hardness there is in raw water. Hardness was originally defined as the capacity of water to precipitate soap caused by Calcium and Magnesium.
# ## 3. Solids (Total dissolved solids - TDS):
# Water has the ability to dissolve a wide range of inorganic and some organic minerals or salts such as potassium, calcium, sodium, bicarbonates, chlorides, magnesium, sulfates etc. These minerals produced un-wanted taste and diluted color in appearance of water. This is the important parameter for the use of water. The water with high TDS value indicates that water is highly mineralized. Desirable limit for TDS is 500 mg/l and maximum limit is 1000 mg/l which prescribed for drinking purpose.
# ## 4. Chloramines:
# Chlorine and chloramine are the major disinfectants used in public water systems. Chloramines are most commonly formed when ammonia is added to chlorine to treat drinking water. Chlorine levels up to 4 milligrams per liter (mg/L or 4 parts per million (ppm)) are considered safe in drinking water.
# ## 5. Sulfate:
# Sulfates are naturally occurring substances that are found in minerals, soil, and rocks. They are present in ambient air, groundwater, plants, and food. The principal commercial use of sulfate is in the chemical industry. Sulfate concentration in seawater is about 2,700 milligrams per liter (mg/L). It ranges from 3 to 30 mg/L in most freshwater supplies, although much higher concentrations (1000 mg/L) are found in some geographic locations.
# ## 6. Conductivity:
# Pure water is not a good conductor of electric current rather’s a good insulator. Increase in ions concentration enhances the electrical conductivity of water. Generally, the amount of dissolved solids in water determines the electrical conductivity. Electrical conductivity (EC) actually measures the ionic process of a solution that enables it to transmit current. According to WHO standards, EC value should not exceeded 400 μS/cm.
# ## 7. Organic_carbon:
# Total Organic Carbon (TOC) in source waters comes from decaying natural organic matter (NOM) as well as synthetic sources. TOC is a measure of the total amount of carbon in organic compounds in pure water. According to US EPA < 2 mg/L as TOC in treated / drinking water, and < 4 mg/Lit in source water which is use for treatment.
# ## 8. Trihalomethanes:
# THMs are chemicals which may be found in water treated with chlorine. The concentration of THMs in drinking water varies according to the level of organic material in the water, the amount of chlorine required to treat the water, and the temperature of the water that is being treated. THM levels up to 80 ppm is considered safe in drinking water.
# ## 9. Turbidity:
# The turbidity of water depends on the quantity of solid matter present in the suspended state. It is a measure of light emitting properties of water and the test is used to indicate the quality of waste discharge with respect to colloidal matter. The mean turbidity value obtained for Wondo Genet Campus (0.98 NTU) is lower than the WHO recommended value of 5.00 NTU.
# ## 10. Potability:
# Indicates if water is safe for human consumption where 1 means Potable and 0 means Not potable.
data.isna().sum()
data.describe().T
from sklearn.preprocessing import MinMaxScaler
minmax = MinMaxScaler()
data1 = minmax.fit_transform(data)
data1 = pd.DataFrame(data1, columns=data.columns)
data1.fillna(data1.mean(), inplace=True)
# ## second Way For Missing Value :
# ### from sklearn.Impute import SimpleImputer
# ### sm = SimpleImputer(missing_values = np.nan, sterategy = 'mean')
# ### sm.fit_transform(data1)
#
data1.describe()
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9) = plt.subplots(
ncols=9, nrows=1, figsize=(30, 15), sharey=True
)
fig.suptitle("Box Plot For Realized outlier")
ax1.set_title("ph")
ax1.boxplot(data1["ph"], notch=True, vert=True)
ax2.set_title("Hardness")
ax2.boxplot(data1["Hardness"], notch=True, vert=True)
ax3.set_title("Solids")
ax3.boxplot(data1["Solids"], notch=True, vert=True)
ax4.set_title("Chloramines")
ax4.boxplot(data1["Chloramines"], notch=True, vert=True)
ax5.set_title("Sulfate")
ax5.boxplot(data1["Sulfate"], notch=True, vert=True)
ax6.set_title("Conductivity")
ax6.boxplot(data1["Conductivity"], notch=True, vert=True)
ax7.set_title("Organic_carbon")
ax7.boxplot(data1["Organic_carbon"], notch=True, vert=True)
ax8.set_title("Trihalomethanes")
ax8.boxplot(data1["Trihalomethanes"], notch=True, vert=True)
ax9.set_title("Turbidity")
ax9.boxplot(data1["Turbidity"], notch=True, vert=True)
fig, ((ax1, ax2, ax3), (ax4, ax5, ax6), (ax7, ax8, ax9)) = plt.subplots(
ncols=3, nrows=3, figsize=(15, 30), sharey=True
)
fig.suptitle("Histogram Plot For Realized outlier")
ax1.set_title("ph")
ax1.hist(data1["ph"])
ax2.set_title("Hardness")
ax2.hist(data1["Hardness"])
ax3.set_title("Solids")
ax3.hist(data1["Solids"])
ax4.set_title("Chloramines")
ax4.hist(data1["Chloramines"])
ax5.set_title("Sulfate")
ax5.hist(data1["Sulfate"])
ax6.set_title("Conductivity")
ax6.hist(data1["Conductivity"])
ax7.set_title("Organic_carbon")
ax7.hist(data1["Organic_carbon"])
ax8.set_title("Trihalomethanes")
ax8.hist(data1["Trihalomethanes"])
ax9.set_title("Turbidity")
ax9.hist(data1["Turbidity"])
z_score = np.abs(stats.zscore(data1))
print(np.where(z_score > 3))
df = data1[(z_score < 2).all(axis=1)]
fig, (ax1, ax2, ax3, ax4, ax5, ax6, ax7, ax8, ax9) = plt.subplots(
ncols=9, nrows=1, figsize=(30, 15), sharey=True
)
fig.suptitle("Box Plot For Realized outlier")
ax1.set_title("ph")
ax1.boxplot(df["ph"], notch=True, vert=True)
ax2.set_title("Hardness")
ax2.boxplot(df["Hardness"], notch=True, vert=True)
ax3.set_title("Solids")
ax3.boxplot(df["Solids"], notch=True, vert=True)
ax4.set_title("Chloramines")
ax4.boxplot(df["Chloramines"], notch=True, vert=True)
ax5.set_title("Sulfate")
ax5.boxplot(df["Sulfate"], notch=True, vert=True)
ax6.set_title("Conductivity")
ax6.boxplot(df["Conductivity"], notch=True, vert=True)
ax7.set_title("Organic_carbon")
ax7.boxplot(df["Organic_carbon"], notch=True, vert=True)
ax8.set_title("Trihalomethanes")
ax8.boxplot(df["Trihalomethanes"], notch=True, vert=True)
ax9.set_title("Turbidity")
ax9.boxplot(df["Turbidity"], notch=True, vert=True)
df = df.reset_index(drop=True)
sns.countplot(df["Potability"])
## Not Balance DataSets
plt.figure(figsize=(10, 10))
sns.heatmap(df.corr(), annot=True)
# ## What's Different Between Pearson & Sperman & Kendaltaue
# ### pearson: (1, -1)
# ### Spearman: First Sort Data After That Change Number To Rank And Calculate By Rank Not Number
# ### kendalstau: Like Last One But Two By Two Compare nc - nd / nc + nd
from imblearn.combine import SMOTETomek
balancer = SMOTETomek(n_jobs=-1)
x, y = balancer.fit_resample(df.iloc[:, :-1], df.iloc[:, -1])
y.value_counts()
from sklearn.feature_selection import SelectKBest, chi2
effective_feature = SelectKBest(chi2, k=5)
x_new = effective_feature.fit_transform(x, y)
effective_feature.get_support()
from sklearn import svm
from sklearn.model_selection import cross_val_score, cross_val_predict
from sklearn.metrics import accuracy_score, mean_squared_error
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier
svm = svm.SVC(gamma="auto", C=10)
y_predict = cross_val_predict(svm, x_new, y)
accuracy_score(y_pred=y_predict, y_true=y)
rf = RandomForestClassifier(max_depth=4, n_jobs=-1, n_estimators=400)
y_predict_rf = cross_val_predict(rf, x, y)
accuracy_score(y_pred=y_predict_rf, y_true=y)
# import random
# random.shuffle(y)
rf = RandomForestClassifier(max_depth=2, n_jobs=-1, n_estimators=300)
y_predict_rf = cross_val_predict(rf, x, y)
accuracy_score(y_pred=y_predict_rf, y_true=y)
from sklearn.preprocessing import StandardScaler
std = StandardScaler()
x_std = std.fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA()
x_pca_scale = pca.fit_transform(x_std)
gbc = GradientBoostingClassifier(n_estimators=1500, max_depth=7, learning_rate=0.3)
y_predict_gbc = cross_val_predict(gbc, x_pca_scale, y)
accuracy_score(y_pred=y_predict_gbc, y_true=y)
import tensorflow as tf
from keras.models import Sequential
from keras import layers
model = Sequential()
model.add(layers.Dense(32, activation="relu", input_shape=(9,)))
model.add(layers.Dense(16, activation="relu"))
model.add(layers.Dense(8, activation="relu"))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(4, activation="relu"))
model.add(layers.Dense(1, activation="sigmoid"))
model.summary()
model.compile(loss="binary_crossentropy", metrics="accuracy", optimizer="adam")
history = model.fit(x_pca_scale, y, batch_size=32, epochs=100, validation_split=0.3)
pd.DataFrame(history.history).plot()
|
[{"water-potability/water_potability.csv": {"column_names": "[\"ph\", \"Hardness\", \"Solids\", \"Chloramines\", \"Sulfate\", \"Conductivity\", \"Organic_carbon\", \"Trihalomethanes\", \"Turbidity\", \"Potability\"]", "column_data_types": "{\"ph\": \"float64\", \"Hardness\": \"float64\", \"Solids\": \"float64\", \"Chloramines\": \"float64\", \"Sulfate\": \"float64\", \"Conductivity\": \"float64\", \"Organic_carbon\": \"float64\", \"Trihalomethanes\": \"float64\", \"Turbidity\": \"float64\", \"Potability\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 3276 entries, 0 to 3275\nData columns (total 10 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 ph 2785 non-null float64\n 1 Hardness 3276 non-null float64\n 2 Solids 3276 non-null float64\n 3 Chloramines 3276 non-null float64\n 4 Sulfate 2495 non-null float64\n 5 Conductivity 3276 non-null float64\n 6 Organic_carbon 3276 non-null float64\n 7 Trihalomethanes 3114 non-null float64\n 8 Turbidity 3276 non-null float64\n 9 Potability 3276 non-null int64 \ndtypes: float64(9), int64(1)\nmemory usage: 256.1 KB\n", "summary": "{\"ph\": {\"count\": 2785.0, \"mean\": 7.080794504276835, \"std\": 1.5943195187088104, \"min\": 0.0, \"25%\": 6.09309191422186, \"50%\": 7.036752103833548, \"75%\": 8.06206612314847, \"max\": 13.999999999999998}, \"Hardness\": {\"count\": 3276.0, \"mean\": 196.36949601730151, \"std\": 32.879761476294156, \"min\": 47.432, \"25%\": 176.85053787752437, \"50%\": 196.96762686363076, \"75%\": 216.66745621487073, \"max\": 323.124}, \"Solids\": {\"count\": 3276.0, \"mean\": 22014.092526077104, \"std\": 8768.570827785927, \"min\": 320.942611274359, \"25%\": 15666.69029696465, \"50%\": 20927.833606520187, \"75%\": 27332.762127438615, \"max\": 61227.19600771213}, \"Chloramines\": {\"count\": 3276.0, \"mean\": 7.122276793425786, \"std\": 1.5830848890397096, \"min\": 0.3520000000000003, \"25%\": 6.1274207554913, \"50%\": 7.130298973883081, \"75%\": 8.114887032109028, \"max\": 13.127000000000002}, \"Sulfate\": {\"count\": 2495.0, \"mean\": 333.7757766108135, \"std\": 41.416840461672706, \"min\": 129.00000000000003, \"25%\": 307.69949783471964, \"50%\": 333.073545745888, \"75%\": 359.9501703847443, \"max\": 481.0306423059972}, \"Conductivity\": {\"count\": 3276.0, \"mean\": 426.20511068255325, \"std\": 80.8240640511118, \"min\": 181.483753985146, \"25%\": 365.7344141184627, \"50%\": 421.8849682800544, \"75%\": 481.7923044877282, \"max\": 753.3426195583046}, \"Organic_carbon\": {\"count\": 3276.0, \"mean\": 14.284970247677318, \"std\": 3.308161999126874, \"min\": 2.1999999999999886, \"25%\": 12.065801333613067, \"50%\": 14.218337937208588, \"75%\": 16.557651543843434, \"max\": 28.30000000000001}, \"Trihalomethanes\": {\"count\": 3114.0, \"mean\": 66.39629294676803, \"std\": 16.175008422218657, \"min\": 0.7379999999999995, \"25%\": 55.844535620979954, \"50%\": 66.62248509808484, \"75%\": 77.33747290873062, \"max\": 124.0}, \"Turbidity\": {\"count\": 3276.0, \"mean\": 3.966786169791058, \"std\": 0.7803824084854124, \"min\": 1.45, \"25%\": 3.439710869612912, \"50%\": 3.955027562993039, \"75%\": 4.50031978728511, \"max\": 6.739}, \"Potability\": {\"count\": 3276.0, \"mean\": 0.3901098901098901, \"std\": 0.48784916967025516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"ph\":{\"0\":null,\"1\":3.7160800754,\"2\":8.0991241893,\"3\":8.3167658842},\"Hardness\":{\"0\":204.8904554713,\"1\":129.4229205149,\"2\":224.2362593936,\"3\":214.3733940856},\"Solids\":{\"0\":20791.318980747,\"1\":18630.0578579703,\"2\":19909.5417322924,\"3\":22018.4174407753},\"Chloramines\":{\"0\":7.3002118732,\"1\":6.6352458839,\"2\":9.2758836027,\"3\":8.0593323774},\"Sulfate\":{\"0\":368.5164413498,\"1\":null,\"2\":null,\"3\":356.8861356431},\"Conductivity\":{\"0\":564.3086541722,\"1\":592.8853591349,\"2\":418.6062130645,\"3\":363.2665161642},\"Organic_carbon\":{\"0\":10.3797830781,\"1\":15.1800131164,\"2\":16.8686369296,\"3\":18.4365244955},\"Trihalomethanes\":{\"0\":86.9909704615,\"1\":56.3290762845,\"2\":66.4200925118,\"3\":100.3416743651},\"Turbidity\":{\"0\":2.9631353806,\"1\":4.5006562749,\"2\":3.0559337497,\"3\":4.6287705368},\"Potability\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>water-potability/water_potability.csv:
<column_names>
['ph', 'Hardness', 'Solids', 'Chloramines', 'Sulfate', 'Conductivity', 'Organic_carbon', 'Trihalomethanes', 'Turbidity', 'Potability']
<column_types>
{'ph': 'float64', 'Hardness': 'float64', 'Solids': 'float64', 'Chloramines': 'float64', 'Sulfate': 'float64', 'Conductivity': 'float64', 'Organic_carbon': 'float64', 'Trihalomethanes': 'float64', 'Turbidity': 'float64', 'Potability': 'int64'}
<dataframe_Summary>
{'ph': {'count': 2785.0, 'mean': 7.080794504276835, 'std': 1.5943195187088104, 'min': 0.0, '25%': 6.09309191422186, '50%': 7.036752103833548, '75%': 8.06206612314847, 'max': 13.999999999999998}, 'Hardness': {'count': 3276.0, 'mean': 196.36949601730151, 'std': 32.879761476294156, 'min': 47.432, '25%': 176.85053787752437, '50%': 196.96762686363076, '75%': 216.66745621487073, 'max': 323.124}, 'Solids': {'count': 3276.0, 'mean': 22014.092526077104, 'std': 8768.570827785927, 'min': 320.942611274359, '25%': 15666.69029696465, '50%': 20927.833606520187, '75%': 27332.762127438615, 'max': 61227.19600771213}, 'Chloramines': {'count': 3276.0, 'mean': 7.122276793425786, 'std': 1.5830848890397096, 'min': 0.3520000000000003, '25%': 6.1274207554913, '50%': 7.130298973883081, '75%': 8.114887032109028, 'max': 13.127000000000002}, 'Sulfate': {'count': 2495.0, 'mean': 333.7757766108135, 'std': 41.416840461672706, 'min': 129.00000000000003, '25%': 307.69949783471964, '50%': 333.073545745888, '75%': 359.9501703847443, 'max': 481.0306423059972}, 'Conductivity': {'count': 3276.0, 'mean': 426.20511068255325, 'std': 80.8240640511118, 'min': 181.483753985146, '25%': 365.7344141184627, '50%': 421.8849682800544, '75%': 481.7923044877282, 'max': 753.3426195583046}, 'Organic_carbon': {'count': 3276.0, 'mean': 14.284970247677318, 'std': 3.308161999126874, 'min': 2.1999999999999886, '25%': 12.065801333613067, '50%': 14.218337937208588, '75%': 16.557651543843434, 'max': 28.30000000000001}, 'Trihalomethanes': {'count': 3114.0, 'mean': 66.39629294676803, 'std': 16.175008422218657, 'min': 0.7379999999999995, '25%': 55.844535620979954, '50%': 66.62248509808484, '75%': 77.33747290873062, 'max': 124.0}, 'Turbidity': {'count': 3276.0, 'mean': 3.966786169791058, 'std': 0.7803824084854124, 'min': 1.45, '25%': 3.439710869612912, '50%': 3.955027562993039, '75%': 4.50031978728511, 'max': 6.739}, 'Potability': {'count': 3276.0, 'mean': 0.3901098901098901, 'std': 0.48784916967025516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 3276 entries, 0 to 3275
Data columns (total 10 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 ph 2785 non-null float64
1 Hardness 3276 non-null float64
2 Solids 3276 non-null float64
3 Chloramines 3276 non-null float64
4 Sulfate 2495 non-null float64
5 Conductivity 3276 non-null float64
6 Organic_carbon 3276 non-null float64
7 Trihalomethanes 3114 non-null float64
8 Turbidity 3276 non-null float64
9 Potability 3276 non-null int64
dtypes: float64(9), int64(1)
memory usage: 256.1 KB
<some_examples>
{'ph': {'0': None, '1': 3.7160800754, '2': 8.0991241893, '3': 8.3167658842}, 'Hardness': {'0': 204.8904554713, '1': 129.4229205149, '2': 224.2362593936, '3': 214.3733940856}, 'Solids': {'0': 20791.318980747, '1': 18630.0578579703, '2': 19909.5417322924, '3': 22018.4174407753}, 'Chloramines': {'0': 7.3002118732, '1': 6.6352458839, '2': 9.2758836027, '3': 8.0593323774}, 'Sulfate': {'0': 368.5164413498, '1': None, '2': None, '3': 356.8861356431}, 'Conductivity': {'0': 564.3086541722, '1': 592.8853591349, '2': 418.6062130645, '3': 363.2665161642}, 'Organic_carbon': {'0': 10.3797830781, '1': 15.1800131164, '2': 16.8686369296, '3': 18.4365244955}, 'Trihalomethanes': {'0': 86.9909704615, '1': 56.3290762845, '2': 66.4200925118, '3': 100.3416743651}, 'Turbidity': {'0': 2.9631353806, '1': 4.5006562749, '2': 3.0559337497, '3': 4.6287705368}, 'Potability': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 3,184 | 3 | 5,461 | 3,184 |
69393959
|
import torch
import pandas as pd
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
class Dataset:
def __init__(self, text, tokenizer, max_len):
self.text = text
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
inputs = self.tokenizer(
text, max_length=self.max_len, padding="max_length", truncation=True
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
}
def generate_predictions(model_path, max_len):
model = AutoModelForSequenceClassification.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
model.to("cuda")
model.eval()
df = pd.read_csv("../input/nlp-getting-started/test.csv")
dataset = Dataset(text=df.text.values, tokenizer=tokenizer, max_len=max_len)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=32, num_workers=4, pin_memory=True, shuffle=False
)
final_output = []
for b_idx, data in enumerate(data_loader):
with torch.no_grad():
for key, value in data.items():
data[key] = value.to("cuda")
output = model(**data)
output = torch.nn.functional.softmax(output.logits, dim=1)
output = output.detach().cpu().numpy()[:, 1]
output = (output >= 0.7).astype(int).tolist()
final_output.extend(output)
torch.cuda.empty_cache()
return np.array(final_output)
preds = generate_predictions("abhishek/autonlp-ferd1-2652021", max_len=128)
sample = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
sample.target = preds
sample.to_csv("submission.csv", index=False)
sample.target.value_counts()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393959.ipynb
| null | null |
[{"Id": 69393959, "ScriptId": 18946719, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5309, "CreationDate": "07/30/2021 12:48:21", "VersionNumber": 2.0, "Title": "notebook160a4ac0b8", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 70.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import torch
import pandas as pd
import numpy as np
from transformers import AutoModelForSequenceClassification, AutoTokenizer
class Dataset:
def __init__(self, text, tokenizer, max_len):
self.text = text
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.text)
def __getitem__(self, item):
text = str(self.text[item])
inputs = self.tokenizer(
text, max_length=self.max_len, padding="max_length", truncation=True
)
ids = inputs["input_ids"]
mask = inputs["attention_mask"]
# token_type_ids = inputs["token_type_ids"]
return {
"input_ids": torch.tensor(ids, dtype=torch.long),
"attention_mask": torch.tensor(mask, dtype=torch.long),
# "token_type_ids": torch.tensor(token_type_ids, dtype=torch.long),
}
def generate_predictions(model_path, max_len):
model = AutoModelForSequenceClassification.from_pretrained(model_path)
tokenizer = AutoTokenizer.from_pretrained(model_path)
model.to("cuda")
model.eval()
df = pd.read_csv("../input/nlp-getting-started/test.csv")
dataset = Dataset(text=df.text.values, tokenizer=tokenizer, max_len=max_len)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=32, num_workers=4, pin_memory=True, shuffle=False
)
final_output = []
for b_idx, data in enumerate(data_loader):
with torch.no_grad():
for key, value in data.items():
data[key] = value.to("cuda")
output = model(**data)
output = torch.nn.functional.softmax(output.logits, dim=1)
output = output.detach().cpu().numpy()[:, 1]
output = (output >= 0.7).astype(int).tolist()
final_output.extend(output)
torch.cuda.empty_cache()
return np.array(final_output)
preds = generate_predictions("abhishek/autonlp-ferd1-2652021", max_len=128)
sample = pd.read_csv("../input/nlp-getting-started/sample_submission.csv")
sample.target = preds
sample.to_csv("submission.csv", index=False)
sample.target.value_counts()
| false | 0 | 605 | 0 | 605 | 605 |
||
69393162
|
<jupyter_start><jupyter_text>Chess Game Dataset (Lichess)
**General Info**
This is a set of just over 20,000 games collected from a selection of users on the site Lichess.org, and how to collect more. I will also upload more games in the future as I collect them. This set contains the:
- Game ID;
- Rated (T/F);
- Start Time;
- End Time;
- Number of Turns;
- Game Status;
- Winner;
- Time Increment;
- White Player ID;
- White Player Rating;
- Black Player ID;
- Black Player Rating;
- All Moves in Standard Chess Notation;
- Opening Eco (Standardised Code for any given opening, [list here][1]);
- Opening Name;
- Opening Ply (Number of moves in the opening phase)
For each of these separate games from Lichess. I collected this data using the [Lichess API][2], which enables collection of any given users game history. The difficult part was collecting usernames to use, however the API also enables dumping of all users in a Lichess team. There are several teams on Lichess with over 1,500 players, so this proved an effective way to get users to collect games from.
**Possible Uses**
Lots of information is contained within a single chess game, let alone a full dataset of multiple games. It is primarily a game of patterns, and data science is all about detecting patterns in data, which is why chess has been one of the most invested in areas of AI in the past. This dataset collects all of the information available from 20,000 games and presents it in a format that is easy to process for analysis of, for example, what allows a player to win as black or white, how much meta (out-of-game) factors affect a game, the relationship between openings and victory for black and white and more.
[1]: https://www.365chess.com/eco.php
[2]: https://github.com/ornicar/lila
Kaggle dataset identifier: chess
<jupyter_code>import pandas as pd
df = pd.read_csv('chess/games.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 20058 entries, 0 to 20057
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 20058 non-null object
1 rated 20058 non-null bool
2 created_at 20058 non-null float64
3 last_move_at 20058 non-null float64
4 turns 20058 non-null int64
5 victory_status 20058 non-null object
6 winner 20058 non-null object
7 increment_code 20058 non-null object
8 white_id 20058 non-null object
9 white_rating 20058 non-null int64
10 black_id 20058 non-null object
11 black_rating 20058 non-null int64
12 moves 20058 non-null object
13 opening_eco 20058 non-null object
14 opening_name 20058 non-null object
15 opening_ply 20058 non-null int64
dtypes: bool(1), float64(2), int64(4), object(9)
memory usage: 2.3+ MB
<jupyter_text>Examples:
{
"id": "TZJHLljE",
"rated": false,
"created_at": "2017-08-31 20:06:40",
"last_move_at": "2017-08-31 20:06:40",
"turns": 13,
"victory_status": "outoftime",
"winner": "white",
"increment_code": "15+2",
"white_id": "bourgris",
"white_rating": 1500,
"black_id": "a-00",
"black_rating": 1191,
"moves": "d4 d5 c4 c6 cxd5 e6 dxe6 fxe6 Nf3 Bb4+ Nc3 Ba5 Bf4",
"opening_eco": "D10",
"opening_name": "Slav Defense: Exchange Variation",
"opening_ply": 5
}
{
"id": "l1NXvwaE",
"rated": true,
"created_at": "2017-08-30 21:53:20",
"last_move_at": "2017-08-30 21:53:20",
"turns": 16,
"victory_status": "resign",
"winner": "black",
"increment_code": "5+10",
"white_id": "a-00",
"white_rating": 1322,
"black_id": "skinnerua",
"black_rating": 1261,
"moves": "d4 Nc6 e4 e5 f4 f6 dxe5 fxe5 fxe5 Nxe5 Qd4 Nc6 Qe5+ Nxe5 c4 Bb4+",
"opening_eco": "B00",
"opening_name": "Nimzowitsch Defense: Kennedy Variation",
"opening_ply": 4
}
{
"id": "mIICvQHh",
"rated": true,
"created_at": "2017-08-30 21:53:20",
"last_move_at": "2017-08-30 21:53:20",
"turns": 61,
"victory_status": "mate",
"winner": "white",
"increment_code": "5+10",
"white_id": "ischia",
"white_rating": 1496,
"black_id": "a-00",
"black_rating": 1500,
"moves": "e4 e5 d3 d6 Be3 c6 Be2 b5 Nd2 a5 a4 c5 axb5 Nc6 bxc6 Ra6 Nc4 a4 c3 a3 Nxa3 Rxa3 Rxa3 c4 dxc4 d5 cxd5 Qxd5 exd5 Be6 Ra8+ Ke7 Bc5+ Kf6 Bxf8 Kg6 Bxg7 Kxg7 dxe6 Kh6 exf7 Nf6 Rxh8 Nh5 Bxh5 Kg5 Rxh7 Kf5 Qf3+ Ke6 Bg4+ Kd6 Rh6+ Kc5 Qe3+ Kb5 c4+ Kb4 Qc3+ Ka4 Bd1#",
"opening_eco": "C20",
"opening_name": "King's Pawn Game: Leonardis Variation",
"opening_ply": 3
}
{
"id": "kWKvrqYL",
"rated": true,
"created_at": "2017-08-30 16:20:00",
"last_move_at": "2017-08-30 16:20:00",
"turns": 61,
"victory_status": "mate",
"winner": "white",
"increment_code": "20+0",
"white_id": "daniamurashov",
"white_rating": 1439,
"black_id": "adivanov2009",
"black_rating": 1454,
"moves": "d4 d5 Nf3 Bf5 Nc3 Nf6 Bf4 Ng4 e3 Nc6 Be2 Qd7 O-O O-O-O Nb5 Nb4 Rc1 Nxa2 Ra1 Nb4 Nxa7+ Kb8 Nb5 Bxc2 Bxc7+ Kc8 Qd2 Qc6 Na7+ Kd7 Nxc6 bxc6 Bxd8 Kxd8 Qxb4 e5 Qb8+ Ke7 dxe5 Be4 Ra7+ Ke6 Qe8+ Kf5 Qxf7+ Nf6 Nh4+ Kg5 g3 Ng4 Qf4+ Kh5 Qxg4+ Kh6 Qf4+ g5 Qf6+ Bg6 Nxg6 Bg7 Qxg7#",
"opening_eco": "D02",
"opening_name": "Queen's Pawn Game: Zukertort Variation",
"opening_ply": 3
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization tool
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/chess/games.csv")
# # 1) Tanima Asamasi
data.info()
data.columns
data.corr()
data.head(20)
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(data.corr(), annot=True, linewidth=5, fmt=".1f", ax=ax)
plt.show()
# # 2) Matplotlib
# Line plot
# Black rating and white rating
data.black_rating.plot(
kind="line",
color="b",
label="Black Rating",
linewidth=1,
alpha=0.5,
grid=True,
linestyle=":",
)
data.white_rating.plot(
color="r", label="White Rating", linewidth=1, alpha=0.5, grid=True, linestyle="-."
)
plt.xlabel("x label")
plt.ylabel("y label")
plt.show()
# Scatter Plot
plt.scatter(data.black_rating, data.white_rating, color="g", alpha=0.5)
plt.xlabel("Black Rating")
plt.ylabel("White Rating")
plt.title("Black Rating and White Rating Scatter Plot")
plt.show()
# Burda black'in rating'i artarsa white'in da artar.
# histogram plot
data.white_rating.plot(kind="hist", bins=50, figsize=(18, 18))
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393162.ipynb
|
chess
|
datasnaek
|
[{"Id": 69393162, "ScriptId": 18918488, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7900128, "CreationDate": "07/30/2021 12:36:52", "VersionNumber": 1.0, "Title": "notebook40f9f9cffa", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 68.0, "LinesInsertedFromPrevious": 68.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92502734, "KernelVersionId": 69393162, "SourceDatasetVersionId": 3919}]
|
[{"Id": 3919, "DatasetId": 2321, "DatasourceVersionId": 3919, "CreatorUserId": 1236717, "LicenseName": "CC0: Public Domain", "CreationDate": "09/04/2017 03:09:09", "VersionNumber": 1.0, "Title": "Chess Game Dataset (Lichess)", "Slug": "chess", "Subtitle": "20,000+ Lichess Games, including moves, victor, rating, opening details and more", "Description": "**General Info**\n\nThis is a set of just over 20,000 games collected from a selection of users on the site Lichess.org, and how to collect more. I will also upload more games in the future as I collect them. This set contains the:\n\n - Game ID;\n - Rated (T/F);\n - Start Time;\n - End Time;\n - Number of Turns;\n - Game Status;\n - Winner;\n - Time Increment;\n - White Player ID;\n - White Player Rating;\n - Black Player ID;\n - Black Player Rating;\n - All Moves in Standard Chess Notation;\n - Opening Eco (Standardised Code for any given opening, [list here][1]);\n - Opening Name;\n - Opening Ply (Number of moves in the opening phase)\n\nFor each of these separate games from Lichess. I collected this data using the [Lichess API][2], which enables collection of any given users game history. The difficult part was collecting usernames to use, however the API also enables dumping of all users in a Lichess team. There are several teams on Lichess with over 1,500 players, so this proved an effective way to get users to collect games from.\n\n**Possible Uses**\n\nLots of information is contained within a single chess game, let alone a full dataset of multiple games. It is primarily a game of patterns, and data science is all about detecting patterns in data, which is why chess has been one of the most invested in areas of AI in the past. This dataset collects all of the information available from 20,000 games and presents it in a format that is easy to process for analysis of, for example, what allows a player to win as black or white, how much meta (out-of-game) factors affect a game, the relationship between openings and victory for black and white and more.\n\n\n [1]: https://www.365chess.com/eco.php\n [2]: https://github.com/ornicar/lila", "VersionNotes": "Initial release", "TotalCompressedBytes": 7672655.0, "TotalUncompressedBytes": 7672655.0}]
|
[{"Id": 2321, "CreatorUserId": 1236717, "OwnerUserId": 1236717.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3919.0, "CurrentDatasourceVersionId": 3919.0, "ForumId": 6243, "Type": 2, "CreationDate": "09/04/2017 03:09:09", "LastActivityDate": "02/05/2018", "TotalViews": 331949, "TotalDownloads": 38314, "TotalVotes": 1152, "TotalKernels": 2322}]
|
[{"Id": 1236717, "UserName": "datasnaek", "DisplayName": "Mitchell J", "RegisterDate": "08/28/2017", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns # visualization tool
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
data = pd.read_csv("/kaggle/input/chess/games.csv")
# # 1) Tanima Asamasi
data.info()
data.columns
data.corr()
data.head(20)
f, ax = plt.subplots(figsize=(18, 18))
sns.heatmap(data.corr(), annot=True, linewidth=5, fmt=".1f", ax=ax)
plt.show()
# # 2) Matplotlib
# Line plot
# Black rating and white rating
data.black_rating.plot(
kind="line",
color="b",
label="Black Rating",
linewidth=1,
alpha=0.5,
grid=True,
linestyle=":",
)
data.white_rating.plot(
color="r", label="White Rating", linewidth=1, alpha=0.5, grid=True, linestyle="-."
)
plt.xlabel("x label")
plt.ylabel("y label")
plt.show()
# Scatter Plot
plt.scatter(data.black_rating, data.white_rating, color="g", alpha=0.5)
plt.xlabel("Black Rating")
plt.ylabel("White Rating")
plt.title("Black Rating and White Rating Scatter Plot")
plt.show()
# Burda black'in rating'i artarsa white'in da artar.
# histogram plot
data.white_rating.plot(kind="hist", bins=50, figsize=(18, 18))
plt.show()
|
[{"chess/games.csv": {"column_names": "[\"id\", \"rated\", \"created_at\", \"last_move_at\", \"turns\", \"victory_status\", \"winner\", \"increment_code\", \"white_id\", \"white_rating\", \"black_id\", \"black_rating\", \"moves\", \"opening_eco\", \"opening_name\", \"opening_ply\"]", "column_data_types": "{\"id\": \"object\", \"rated\": \"bool\", \"created_at\": \"float64\", \"last_move_at\": \"float64\", \"turns\": \"int64\", \"victory_status\": \"object\", \"winner\": \"object\", \"increment_code\": \"object\", \"white_id\": \"object\", \"white_rating\": \"int64\", \"black_id\": \"object\", \"black_rating\": \"int64\", \"moves\": \"object\", \"opening_eco\": \"object\", \"opening_name\": \"object\", \"opening_ply\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 20058 entries, 0 to 20057\nData columns (total 16 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 id 20058 non-null object \n 1 rated 20058 non-null bool \n 2 created_at 20058 non-null float64\n 3 last_move_at 20058 non-null float64\n 4 turns 20058 non-null int64 \n 5 victory_status 20058 non-null object \n 6 winner 20058 non-null object \n 7 increment_code 20058 non-null object \n 8 white_id 20058 non-null object \n 9 white_rating 20058 non-null int64 \n 10 black_id 20058 non-null object \n 11 black_rating 20058 non-null int64 \n 12 moves 20058 non-null object \n 13 opening_eco 20058 non-null object \n 14 opening_name 20058 non-null object \n 15 opening_ply 20058 non-null int64 \ndtypes: bool(1), float64(2), int64(4), object(9)\nmemory usage: 2.3+ MB\n", "summary": "{\"created_at\": {\"count\": 20058.0, \"mean\": 1483616852629.0925, \"std\": 28501509421.00479, \"min\": 1376771633173.0, \"25%\": 1477547500000.0, \"50%\": 1496010000000.0, \"75%\": 1503170000000.0, \"max\": 1504493143790.0}, \"last_move_at\": {\"count\": 20058.0, \"mean\": 1483617722336.142, \"std\": 28501400588.89002, \"min\": 1376771863841.0, \"25%\": 1477547500000.0, \"50%\": 1496010000000.0, \"75%\": 1503170000000.0, \"max\": 1504493827262.0}, \"turns\": {\"count\": 20058.0, \"mean\": 60.46599860404826, \"std\": 33.570584753537126, \"min\": 1.0, \"25%\": 37.0, \"50%\": 55.0, \"75%\": 79.0, \"max\": 349.0}, \"white_rating\": {\"count\": 20058.0, \"mean\": 1596.6318675840064, \"std\": 291.2533757370189, \"min\": 784.0, \"25%\": 1398.0, \"50%\": 1567.0, \"75%\": 1793.0, \"max\": 2700.0}, \"black_rating\": {\"count\": 20058.0, \"mean\": 1588.8319872370128, \"std\": 291.03612596033423, \"min\": 789.0, \"25%\": 1391.0, \"50%\": 1562.0, \"75%\": 1784.0, \"max\": 2723.0}, \"opening_ply\": {\"count\": 20058.0, \"mean\": 4.8169807558081565, \"std\": 2.797151810630093, \"min\": 1.0, \"25%\": 3.0, \"50%\": 4.0, \"75%\": 6.0, \"max\": 28.0}}", "examples": "{\"id\":{\"0\":\"TZJHLljE\",\"1\":\"l1NXvwaE\",\"2\":\"mIICvQHh\",\"3\":\"kWKvrqYL\"},\"rated\":{\"0\":false,\"1\":true,\"2\":true,\"3\":true},\"created_at\":{\"0\":1504210000000.0,\"1\":1504130000000.0,\"2\":1504130000000.0,\"3\":1504110000000.0},\"last_move_at\":{\"0\":1504210000000.0,\"1\":1504130000000.0,\"2\":1504130000000.0,\"3\":1504110000000.0},\"turns\":{\"0\":13,\"1\":16,\"2\":61,\"3\":61},\"victory_status\":{\"0\":\"outoftime\",\"1\":\"resign\",\"2\":\"mate\",\"3\":\"mate\"},\"winner\":{\"0\":\"white\",\"1\":\"black\",\"2\":\"white\",\"3\":\"white\"},\"increment_code\":{\"0\":\"15+2\",\"1\":\"5+10\",\"2\":\"5+10\",\"3\":\"20+0\"},\"white_id\":{\"0\":\"bourgris\",\"1\":\"a-00\",\"2\":\"ischia\",\"3\":\"daniamurashov\"},\"white_rating\":{\"0\":1500,\"1\":1322,\"2\":1496,\"3\":1439},\"black_id\":{\"0\":\"a-00\",\"1\":\"skinnerua\",\"2\":\"a-00\",\"3\":\"adivanov2009\"},\"black_rating\":{\"0\":1191,\"1\":1261,\"2\":1500,\"3\":1454},\"moves\":{\"0\":\"d4 d5 c4 c6 cxd5 e6 dxe6 fxe6 Nf3 Bb4+ Nc3 Ba5 Bf4\",\"1\":\"d4 Nc6 e4 e5 f4 f6 dxe5 fxe5 fxe5 Nxe5 Qd4 Nc6 Qe5+ Nxe5 c4 Bb4+\",\"2\":\"e4 e5 d3 d6 Be3 c6 Be2 b5 Nd2 a5 a4 c5 axb5 Nc6 bxc6 Ra6 Nc4 a4 c3 a3 Nxa3 Rxa3 Rxa3 c4 dxc4 d5 cxd5 Qxd5 exd5 Be6 Ra8+ Ke7 Bc5+ Kf6 Bxf8 Kg6 Bxg7 Kxg7 dxe6 Kh6 exf7 Nf6 Rxh8 Nh5 Bxh5 Kg5 Rxh7 Kf5 Qf3+ Ke6 Bg4+ Kd6 Rh6+ Kc5 Qe3+ Kb5 c4+ Kb4 Qc3+ Ka4 Bd1#\",\"3\":\"d4 d5 Nf3 Bf5 Nc3 Nf6 Bf4 Ng4 e3 Nc6 Be2 Qd7 O-O O-O-O Nb5 Nb4 Rc1 Nxa2 Ra1 Nb4 Nxa7+ Kb8 Nb5 Bxc2 Bxc7+ Kc8 Qd2 Qc6 Na7+ Kd7 Nxc6 bxc6 Bxd8 Kxd8 Qxb4 e5 Qb8+ Ke7 dxe5 Be4 Ra7+ Ke6 Qe8+ Kf5 Qxf7+ Nf6 Nh4+ Kg5 g3 Ng4 Qf4+ Kh5 Qxg4+ Kh6 Qf4+ g5 Qf6+ Bg6 Nxg6 Bg7 Qxg7#\"},\"opening_eco\":{\"0\":\"D10\",\"1\":\"B00\",\"2\":\"C20\",\"3\":\"D02\"},\"opening_name\":{\"0\":\"Slav Defense: Exchange Variation\",\"1\":\"Nimzowitsch Defense: Kennedy Variation\",\"2\":\"King's Pawn Game: Leonardis Variation\",\"3\":\"Queen's Pawn Game: Zukertort Variation\"},\"opening_ply\":{\"0\":5,\"1\":4,\"2\":3,\"3\":3}}"}}]
| true | 1 |
<start_data_description><data_path>chess/games.csv:
<column_names>
['id', 'rated', 'created_at', 'last_move_at', 'turns', 'victory_status', 'winner', 'increment_code', 'white_id', 'white_rating', 'black_id', 'black_rating', 'moves', 'opening_eco', 'opening_name', 'opening_ply']
<column_types>
{'id': 'object', 'rated': 'bool', 'created_at': 'float64', 'last_move_at': 'float64', 'turns': 'int64', 'victory_status': 'object', 'winner': 'object', 'increment_code': 'object', 'white_id': 'object', 'white_rating': 'int64', 'black_id': 'object', 'black_rating': 'int64', 'moves': 'object', 'opening_eco': 'object', 'opening_name': 'object', 'opening_ply': 'int64'}
<dataframe_Summary>
{'created_at': {'count': 20058.0, 'mean': 1483616852629.0925, 'std': 28501509421.00479, 'min': 1376771633173.0, '25%': 1477547500000.0, '50%': 1496010000000.0, '75%': 1503170000000.0, 'max': 1504493143790.0}, 'last_move_at': {'count': 20058.0, 'mean': 1483617722336.142, 'std': 28501400588.89002, 'min': 1376771863841.0, '25%': 1477547500000.0, '50%': 1496010000000.0, '75%': 1503170000000.0, 'max': 1504493827262.0}, 'turns': {'count': 20058.0, 'mean': 60.46599860404826, 'std': 33.570584753537126, 'min': 1.0, '25%': 37.0, '50%': 55.0, '75%': 79.0, 'max': 349.0}, 'white_rating': {'count': 20058.0, 'mean': 1596.6318675840064, 'std': 291.2533757370189, 'min': 784.0, '25%': 1398.0, '50%': 1567.0, '75%': 1793.0, 'max': 2700.0}, 'black_rating': {'count': 20058.0, 'mean': 1588.8319872370128, 'std': 291.03612596033423, 'min': 789.0, '25%': 1391.0, '50%': 1562.0, '75%': 1784.0, 'max': 2723.0}, 'opening_ply': {'count': 20058.0, 'mean': 4.8169807558081565, 'std': 2.797151810630093, 'min': 1.0, '25%': 3.0, '50%': 4.0, '75%': 6.0, 'max': 28.0}}
<dataframe_info>
RangeIndex: 20058 entries, 0 to 20057
Data columns (total 16 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 id 20058 non-null object
1 rated 20058 non-null bool
2 created_at 20058 non-null float64
3 last_move_at 20058 non-null float64
4 turns 20058 non-null int64
5 victory_status 20058 non-null object
6 winner 20058 non-null object
7 increment_code 20058 non-null object
8 white_id 20058 non-null object
9 white_rating 20058 non-null int64
10 black_id 20058 non-null object
11 black_rating 20058 non-null int64
12 moves 20058 non-null object
13 opening_eco 20058 non-null object
14 opening_name 20058 non-null object
15 opening_ply 20058 non-null int64
dtypes: bool(1), float64(2), int64(4), object(9)
memory usage: 2.3+ MB
<some_examples>
{'id': {'0': 'TZJHLljE', '1': 'l1NXvwaE', '2': 'mIICvQHh', '3': 'kWKvrqYL'}, 'rated': {'0': False, '1': True, '2': True, '3': True}, 'created_at': {'0': 1504210000000.0, '1': 1504130000000.0, '2': 1504130000000.0, '3': 1504110000000.0}, 'last_move_at': {'0': 1504210000000.0, '1': 1504130000000.0, '2': 1504130000000.0, '3': 1504110000000.0}, 'turns': {'0': 13, '1': 16, '2': 61, '3': 61}, 'victory_status': {'0': 'outoftime', '1': 'resign', '2': 'mate', '3': 'mate'}, 'winner': {'0': 'white', '1': 'black', '2': 'white', '3': 'white'}, 'increment_code': {'0': '15+2', '1': '5+10', '2': '5+10', '3': '20+0'}, 'white_id': {'0': 'bourgris', '1': 'a-00', '2': 'ischia', '3': 'daniamurashov'}, 'white_rating': {'0': 1500, '1': 1322, '2': 1496, '3': 1439}, 'black_id': {'0': 'a-00', '1': 'skinnerua', '2': 'a-00', '3': 'adivanov2009'}, 'black_rating': {'0': 1191, '1': 1261, '2': 1500, '3': 1454}, 'moves': {'0': 'd4 d5 c4 c6 cxd5 e6 dxe6 fxe6 Nf3 Bb4+ Nc3 Ba5 Bf4', '1': 'd4 Nc6 e4 e5 f4 f6 dxe5 fxe5 fxe5 Nxe5 Qd4 Nc6 Qe5+ Nxe5 c4 Bb4+', '2': 'e4 e5 d3 d6 Be3 c6 Be2 b5 Nd2 a5 a4 c5 axb5 Nc6 bxc6 Ra6 Nc4 a4 c3 a3 Nxa3 Rxa3 Rxa3 c4 dxc4 d5 cxd5 Qxd5 exd5 Be6 Ra8+ Ke7 Bc5+ Kf6 Bxf8 Kg6 Bxg7 Kxg7 dxe6 Kh6 exf7 Nf6 Rxh8 Nh5 Bxh5 Kg5 Rxh7 Kf5 Qf3+ Ke6 Bg4+ Kd6 Rh6+ Kc5 Qe3+ Kb5 c4+ Kb4 Qc3+ Ka4 Bd1#', '3': 'd4 d5 Nf3 Bf5 Nc3 Nf6 Bf4 Ng4 e3 Nc6 Be2 Qd7 O-O O-O-O Nb5 Nb4 Rc1 Nxa2 Ra1 Nb4 Nxa7+ Kb8 Nb5 Bxc2 Bxc7+ Kc8 Qd2 Qc6 Na7+ Kd7 Nxc6 bxc6 Bxd8 Kxd8 Qxb4 e5 Qb8+ Ke7 dxe5 Be4 Ra7+ Ke6 Qe8+ Kf5 Qxf7+ Nf6 Nh4+ Kg5 g3 Ng4 Qf4+ Kh5 Qxg4+ Kh6 Qf4+ g5 Qf6+ Bg6 Nxg6 Bg7 Qxg7#'}, 'opening_eco': {'0': 'D10', '1': 'B00', '2': 'C20', '3': 'D02'}, 'opening_name': {'0': 'Slav Defense: Exchange Variation', '1': 'Nimzowitsch Defense: Kennedy Variation', '2': "King's Pawn Game: Leonardis Variation", '3': "Queen's Pawn Game: Zukertort Variation"}, 'opening_ply': {'0': 5, '1': 4, '2': 3, '3': 3}}
<end_description>
| 552 | 0 | 2,723 | 552 |
69393727
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler
train = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId")
test = pd.read_csv("/kaggle/input/titanic/test.csv", index_col="PassengerId")
data = pd.concat([train, test])
# ----------------------------------------------------------------------------
submit = pd.DataFrame(test.index)
data["Sex"] = data["Sex"].replace({"male": 1, "female": 0})
# ----------------------------------------------------------------------------
b = []
for i in range(0, len(data)):
b.append(data["Name"].iloc[i].split(",")[1].split(".")[0][1:])
data["title"] = b
data["title"] = data["title"].replace(
[
"Capt",
"Sir",
"Dona",
"Mme",
"Jonkheer",
"the Countess",
"Don",
"Lady",
"Mlle",
"Major",
"Ms",
],
"Rare",
)
data["title"] = data["title"].replace(
{"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5, "Rev": 6, "Dr": 7, "Col": 8}
)
# ----------------------------------------------------------------------------
data["passenger"] = data["SibSp"] + data["Parch"] + 1
def family_member(size):
a = ""
if size <= 1:
a = 1
elif size <= 3:
a = 2
elif size <= 5:
a = 3
else:
a = 4
return a
data["family_no"] = data["passenger"].map(family_member)
# --------------------------------------------------------------------------
data = data.drop(["Name", "Ticket", "Cabin"], axis=1)
# -------------------------------------------------------------------------
data = pd.get_dummies(data, columns=["Embarked"], drop_first=True)
data["Fare"] = np.sqrt(data["Fare"])
# ----------------------------------------------------------------------
train = data[: len(train)]
test = data[len(train) :]
test.drop("Survived", axis=1, inplace=True)
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from sklearn.impute import KNNImputer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_selection import SelectKBest, chi2
pipeline = Pipeline(
steps=[
("impute", KNNImputer(n_neighbors=12)),
("scale", MinMaxScaler()),
("s", SelectKBest(score_func=chi2, k=5)),
("model", GradientBoostingClassifier()),
]
)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3)
x = train.drop(["Survived"], axis=1)
y = train["Survived"]
score = cross_val_score(pipeline, x, y, scoring="accuracy", cv=cv)
pipeline.fit(x, y)
a = pipeline.predict(test)
submit["Survived"] = a.astype(int)
submit.to_csv("ver1.csv", index=False)
x
test
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393727.ipynb
| null | null |
[{"Id": 69393727, "ScriptId": 18931438, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4875623, "CreationDate": "07/30/2021 12:44:58", "VersionNumber": 5.0, "Title": "notebook533523103f", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 108.0, "LinesInsertedFromPrevious": 13.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 95.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import MinMaxScaler, StandardScaler
train = pd.read_csv("/kaggle/input/titanic/train.csv", index_col="PassengerId")
test = pd.read_csv("/kaggle/input/titanic/test.csv", index_col="PassengerId")
data = pd.concat([train, test])
# ----------------------------------------------------------------------------
submit = pd.DataFrame(test.index)
data["Sex"] = data["Sex"].replace({"male": 1, "female": 0})
# ----------------------------------------------------------------------------
b = []
for i in range(0, len(data)):
b.append(data["Name"].iloc[i].split(",")[1].split(".")[0][1:])
data["title"] = b
data["title"] = data["title"].replace(
[
"Capt",
"Sir",
"Dona",
"Mme",
"Jonkheer",
"the Countess",
"Don",
"Lady",
"Mlle",
"Major",
"Ms",
],
"Rare",
)
data["title"] = data["title"].replace(
{"Mr": 1, "Miss": 2, "Mrs": 3, "Master": 4, "Rare": 5, "Rev": 6, "Dr": 7, "Col": 8}
)
# ----------------------------------------------------------------------------
data["passenger"] = data["SibSp"] + data["Parch"] + 1
def family_member(size):
a = ""
if size <= 1:
a = 1
elif size <= 3:
a = 2
elif size <= 5:
a = 3
else:
a = 4
return a
data["family_no"] = data["passenger"].map(family_member)
# --------------------------------------------------------------------------
data = data.drop(["Name", "Ticket", "Cabin"], axis=1)
# -------------------------------------------------------------------------
data = pd.get_dummies(data, columns=["Embarked"], drop_first=True)
data["Fare"] = np.sqrt(data["Fare"])
# ----------------------------------------------------------------------
train = data[: len(train)]
test = data[len(train) :]
test.drop("Survived", axis=1, inplace=True)
from sklearn.pipeline import Pipeline
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from sklearn.impute import KNNImputer
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.feature_selection import SelectKBest, chi2
pipeline = Pipeline(
steps=[
("impute", KNNImputer(n_neighbors=12)),
("scale", MinMaxScaler()),
("s", SelectKBest(score_func=chi2, k=5)),
("model", GradientBoostingClassifier()),
]
)
cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3)
x = train.drop(["Survived"], axis=1)
y = train["Survived"]
score = cross_val_score(pipeline, x, y, scoring="accuracy", cv=cv)
pipeline.fit(x, y)
a = pipeline.predict(test)
submit["Survived"] = a.astype(int)
submit.to_csv("ver1.csv", index=False)
x
test
| false | 0 | 981 | 0 | 981 | 981 |
||
69393976
|
# # Введение
# Вас перевели в отдел инновационной разработки рекомендательных систем. Ваш работадель хочет увеличить средний чек продаж. После короткого изучения статей вы поняли, что ключом к успеху будет качественные рекомендации на сайте. Чем лучше вы сможете рекомендовать товар пользователю, тем чаще он будет добавлять товары в корзину. Простая логика, если пользователю нравится товар, он его добавляет в корзину -> средний чек растёт. Мы нашли золото!
# Датасет
# У вас будет история оценок пользователя вместе с его обзором. Вы можете использовать текст рецензии в качестве дополнительной информации. Все оценки пользователей нормированы для бинарной классификации, если человек поставил оценку продукту больше 3 (не включительно), то мы считаем, что продукт ему понравился, если меньше 4, то продукт не понравился.
# Метрики
# В качестве метрики для оценки ваших рекомендаций используется **RocAuc**.
# Условия соревнования
# Данное соревнование является бессрочным и доступно для всех потоков. Срок выполнения соревнования устанавливаеться индивидуально в каждом потоке. Тестовая выборка представлена в ЛидерБорде целиком, поэтому лучшие и победные решения буду проверяться на их "адекватность" (чтоб не было подгонки под тестовую выборку). В данном соревновании вам ненужны дополнительные внешние данные. Запрещается их использовать Разрешено использовать любые ML алгоритмы и библиотеки.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import (
Embedding,
Flatten,
Dense,
Dropout,
concatenate,
Input,
BatchNormalization,
)
from keras.optimizers import Adam
from keras import metrics
from keras.utils.vis_utils import plot_model
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# Any results you write to the current directory are saved as output.
import sys
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **test.csv** - набор данных, для которого необходимо сделать предсказания. У каждого набора **userid, itemid** есть свой id, для которого вы должны сделать предсказание.
# * **overall** - рейтинг, который поставил пользователь
# * **verified** - был ли отзыв верифицирован
# * **reviewTime** - когда был отзыв написан
# * **reviewerName** - имя пользователя
# * **reviewText** - текст отзыва
# * **summary** - сжатый отзыв
# * **unixReviewTime** - дата отзыва в формате unix
# * **vote** - количество голосований за отзыв
# * **style** - метаданные
# * **image** - изображение продукта
# * **userid** - id пользователя
# * **itemid** - id товара
# * **id** - id для предсказания
test.info()
print("Python :", sys.version.split("\n")[0])
print("Pandas :", pd.__version__)
print("Numpy :", np.__version__)
print("Keras :", keras.__version__)
# # Установка библиотек, инициализация вспомогательных функций и загрузка данных
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.sparse as sparse
from lightfm import LightFM
from lightfm.cross_validation import random_train_test_split
from lightfm.evaluation import auc_score, precision_at_k, recall_at_k
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_auc_score, roc_curve
import scipy.sparse as sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import make_scorer, accuracy_score
import itertools
"""
# install pandas-profiling
from pandas_profiling import ProfileReport
"""
RANDOM_STATE = 32
# типовые параметры для модели lightFM
NUM_THREADS = 4 # число потоков
NUM_COMPONENTS = 30 # число параметров вектора
NUM_EPOCHS = 20 # число эпох обучения
# Вспомогательные функции
def show_roc_curve(y_true, y_pred_prob):
"""Функция отображает ROC-кривую"""
fpr, tpr, _ = roc_curve(y_true, y_pred_prob)
plt.figure()
plt.plot([0, 1], label="Случайный классификатор", linestyle="--")
plt.plot(fpr, tpr, label="LightFM")
plt.title("ROC AUC = %0.3f" % roc_auc_score(y_true, y_pred_prob))
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
plt.show()
def get_boxplot(column, ds, ord):
fig, ax = plt.subplots(figsize=(14, 4))
sns.boxplot(
x=column,
y=ord,
data=ds.loc[
ds.loc[:, column].isin(ds.loc[:, column].value_counts().index[:10])
],
ax=ax,
)
plt.xticks(rotation=45)
ax.set_title("Boxplot for " + column)
plt.show()
def rmse(y_true, y_pred):
return np.sqrt(np.square(y_pred - y_true).sum() / len(y_true))
def feature_init():
# Составим списки категориальных, бинарных и численных признаков
cat_cols = ["reviewerName", "reviewText", "summary", "style", "image"]
bin_cols = ["verified"]
num_cols = ["overall", "unixReviewTime", "rating", "userid", "itemid"] # 'vote',
return cat_cols, bin_cols, num_cols
def put_submission(preds):
normalized_preds = (preds - preds.min()) / (preds - preds.min()).max()
submission["rating"] = normalized_preds
submission.to_csv("./submission_log.csv", index=False)
return submission
def create_review_tone(data):
# Creating new features.
data["good_review"] = 0
data["bad_review"] = 0
# Creating new features.
ind = 0
for review in data["reviewText"]:
if pd.isna(review):
ind += 1
break
for word in good_words_list:
if word in review:
data.at[ind, "good_review"] = 1
break
for word in bad_words_list:
if word in review:
data.at[ind, "bad_review"] = 1
break
ind += 1
def create_summary_tone(data):
data["good_summary"] = 0
data["bad_summary"] = 0
ind = 0
for review in data["summary"]:
if pd.isna(review):
ind += 1
break
for word in good_words_list:
if word in review:
data.at[ind, "good_summary"] = 1
break
for word in bad_words_list:
if word in review:
data.at[ind, "bad_summary"] = 1
break
ind += 1
def sample_hyperparameters():
"""
Yield possible hyperparameter choices.
"""
while True:
yield {
"no_components": np.random.randint(16, 64),
"learning_schedule": np.random.choice(["adagrad", "adadelta"]),
"loss": np.random.choice(["bpr", "warp", "warp-kos"]),
"learning_rate": np.random.exponential(0.05),
"item_alpha": np.random.exponential(1e-8),
"user_alpha": np.random.exponential(1e-8),
"max_sampled": np.random.randint(5, 15),
"num_epochs": np.random.randint(5, 50),
}
def random_search(
train,
test,
num_samples=10,
num_threads=1,
user_features=np.array([]),
item_features=np.array([]),
):
"""
Sample random hyperparameters, fit a LightFM model, and evaluate it
on the test set.
Parameters
----------
train: np.float32 coo_matrix of shape [n_users, n_items]
Training data.
test: np.float32 coo_matrix of shape [n_users, n_items]
Test data.
num_samples: int, optional
Number of hyperparameter choices to evaluate.
Returns
-------
generator of (auc_score, hyperparameter dict, fitted model)
"""
for hyperparams in itertools.islice(sample_hyperparameters(), num_samples):
num_epochs = hyperparams.pop("num_epochs")
model = LightFM(**hyperparams)
if (item_features.getnnz() == 0) & (user_features.getnnz() == 0):
model.fit(train, epochs=num_epochs, num_threads=num_threads)
elif (item_features.getnnz() > 0) & (user_features.getnnz() == 0):
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
item_features=item_features,
)
elif (item_features.getnnz() == 0) & (user_features.getnnz() > 0):
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
user_features=user_features,
)
else:
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
user_features=user_features,
item_features=item_features,
)
score = auc_score(
model,
test_interactions=test,
train_interactions=train,
num_threads=num_threads,
check_intersections=False,
).mean()
hyperparams["num_epochs"] = num_epochs
yield (score, hyperparams, model)
train = pd.read_csv("/kaggle/input/recommendationsv4/train.csv", low_memory=False)
test = pd.read_csv("/kaggle/input/recommendationsv4/test.csv", low_memory=False)
submission = pd.read_csv("/kaggle/input/recommendationsv4/sample_submission.csv")
# # Создание наивной модели
"""
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
train_data.info()
"""
"""
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
"""
"""
NUM_THREADS = 4 #число потоков
NUM_COMPONENTS = 30 #число параметров вектора
NUM_EPOCHS = 20 #число эпох обучения
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
num_threads=NUM_THREADS)
"""
"""
preds = model.predict(test_data.userid.values,
test_data.itemid.values)
"""
# rmse(test_data['rating'], preds)
# show_roc_curve(test_data.rating, preds)
# sklearn.metrics.roc_auc_score(test_data.rating,preds)
# rmse(test_data['rating'], preds)
# ## Submission
# preds = model.predict(test.userid.values,
# test.itemid.values)
# put_submission(preds)
# submission roc_auc 0.74475
# # EDA
train.info()
train.sample()
# ProfileReport(train, title="EDA")
# Результаты предварительного EDA
# * в данных много дублей
# * в колонках **revierName, reviewText, vote, style, image** много пропусков
# * поле **unixReviewTime** можно использовать для feature engineerig
# Посмотрим на сбалансированность целевой переменной **rating**
ax = sns.countplot(x="rating", data=train)
"""
# сбалансируем целевую переменную с помощью функции библиотеки imblearn
from imblearn.over_sampling import SMOTE
cat_cols, bin_cols, num_cols = feature_init()
y = train.rating
X = train[num_cols]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=RANDOM_STATE, shuffle=True)
sm = SMOTE(random_state=RANDOM_STATE)
X_train, y_train = sm.fit_sample(X_train, y_train)
"""
# ax = sns.countplot(x="rating", data=X_train)
"""
ratings_coo = sparse.coo_matrix((X_train['rating'].astype(int),
(X_train['userid'],
X_train['itemid'])))
"""
"""
NUM_THREADS = 4 #число потоков
NUM_COMPONENTS = 30 #число параметров вектора
NUM_EPOCHS = 20 #число эпох обучения
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
num_threads=NUM_THREADS)
"""
"""
preds = model.predict(X_test.userid.values,
X_test.itemid.values)
"""
# show_roc_curve(X_test.rating, preds)
# Метрика стала хуже. Можно попытаться преобразовать категориальные переменные в цифру.
# # Embedding
"""
# user_feature
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
user_features=user_features,
num_threads=NUM_THREADS)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
user_features=user_features)
show_roc_curve(test_data.rating, preds)
"""
# rmse(test_data['rating'], preds)
"""
# submission
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
preds = model.predict(test.userid.values,
test.itemid.values,
user_features=user_features)
put_submission(preds)
"""
# Получили улучшение по отношению к наивной модели 0.74522
# ## Используем данные из дополнительного файла
# Построчно прочитаем json с метаданными и положим результат в датасет "meta"
with open("/kaggle/input/recommendationsv4/meta_Grocery_and_Gourmet_Food.json") as f:
meta_list = []
for line in f.readlines():
meta_list.append(json.loads(line))
meta = pd.DataFrame(meta_list)
data = pd.merge(train, meta, on="asin")
data = pd.DataFrame(
data[["itemid", "userid", "rank", "price", "rating", "verified", "overall"]]
)
data["user_rating"] = data["overall"].apply(lambda x: 1 if x > 3 else 0)
data["verified"] = data["verified"].astype(int)
data["price"] = data["price"].apply(
lambda x: 0.0 if pd.isna(x) else float(eval(x.replace("$", "")))
)
data["rank"] = data["rank"].apply(
lambda x: 0.0
if pd.isna(x)
else float(re.search("\d+,?\d*", str(x))[0].replace(",", ""))
)
# rank, price item_feature
#
train_data, test_data = train_test_split(data, random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
#
ratings_coo = sparse.coo_matrix(
(train_data["rating"].astype(int), (train_data["userid"], train_data["itemid"]))
)
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[["rank", "price"]])
model = LightFM(learning_rate=0.1, loss="logistic", no_components=NUM_COMPONENTS)
model = model.fit(
ratings_coo,
epochs=NUM_EPOCHS,
item_features=item_features,
user_features=user_features,
num_threads=NUM_THREADS,
)
# item_features = csr_matrix(test_data[['rank', 'price']])
preds = model.predict(
test_data.userid.values,
test_data.itemid.values,
item_features=item_features,
user_features=user_features,
)
# оцениваем результат
show_roc_curve(test_data.rating, preds)
# submission
# item_features = csr_matrix(train_data[['user_rating']])
preds = model.predict(
test.userid.values,
test.itemid.values,
item_features=item_features,
user_features=user_features,
)
put_submission(preds)
# Результат еще хуже. Чем больше фичей, тем хуже.
# Попробуем использовать текстовый файл.
# # используем reviewText
good_words_list = [
"best",
"good",
"great",
"love",
"delicious",
"nice",
"favorite",
"tasty",
"perfect",
"excellent",
"wonderful",
"enjoy",
"yummy",
"happy",
"loves",
"loved",
"amazing",
"awesome",
"yum",
"enjoyed",
"fantastic",
"perfectly",
"wow",
"lovely",
"beautiful",
"terrific",
"enjoyable",
]
bad_words_list = [
"awful",
"bad",
"disappointed",
"unfortunate",
"waste",
"weird",
"difficult",
"terrible",
"horrible",
"complaint",
"gross",
"worst",
"strange",
"fake",
"disappointing",
"complaints",
"poor",
"sucks",
]
"""
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
item_features=item_features,
num_threads=NUM_THREADS)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
item_features=item_features)
show_roc_curve(test_data.rating, preds)
"""
# rmse(test_data['rating'], preds)
# submission
"""
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
preds = model.predict(test.userid.values,
test.itemid.values,
item_features=item_features)
put_submission(preds)
"""
# Получили улучшение по отношению к наивной модели 0.74522
# Совместное использование user & item emdeddings
train["user_rating"] = train["overall"].apply(lambda x: 1 if x > 3 else 0)
train["verified"] = train["verified"].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train, random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix(
(train_data["rating"].astype(int), (train_data["userid"], train_data["itemid"]))
)
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
# user_feature
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
# item_feature
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(
identity_matrix,
train_data[["good_review", "bad_review", "good_summary", "bad_summary"]],
)
model = LightFM(learning_rate=0.1, loss="logistic", no_components=NUM_COMPONENTS)
model = model.fit(
ratings_coo,
epochs=NUM_EPOCHS,
user_features=user_features,
item_features=item_features,
num_threads=NUM_THREADS,
)
preds = model.predict(
test_data.userid.values,
test_data.itemid.values,
user_features=user_features,
item_features=item_features,
)
show_roc_curve(test_data.rating, preds)
rmse(test_data["rating"], preds)
# submission
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(
identity_matrix,
train_data[["good_review", "bad_review", "good_summary", "bad_summary"]],
)
preds = model.predict(
test.userid.values,
test.itemid.values,
user_features=user_features,
item_features=item_features,
)
put_submission(preds)
# Совместное использование user&item feature немного улучшило roc auc 0.74530
# ## Hyperparameters tuning
# ### without features
"""
# Настройка гиперпараметров модели
train_coo = sparse.coo_matrix((train['rating'].astype(int),
(train['userid'],
train['itemid'])))
train_data, test_data = random_train_test_split(train_coo)
(score, hyperparams, model) = max(random_search(train_data, test_data, num_threads=2), key=lambda x: x[0])
print("Best score {} at {}".format(score, hyperparams))
"""
"""
EPOCHS = hyperparams['num_epochs']
del hyperparams['num_epochs']
hyperparams
"""
# submission roc_auc 0.55700
"""
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
model = LightFM(**hyperparams)
# model.fit(train, epochs=num_epochs, num_threads=num_threads)
model = model.fit(ratings_coo, epochs=EPOCHS,
num_threads=NUM_THREADS
)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
)
show_roc_curve(test_data.rating, preds)
"""
"""
# submission
preds = model.predict(test.userid.values,
test.itemid.values,
)
put_submission(preds)
"""
# submission roc_auc 0.53664
# ### with features
"""
# подготовка user & item emdeddings
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
# Настройка гиперпараметров модели
train_coo = sparse.coo_matrix((train['rating'].astype(int),
(train['userid'],
train['itemid'])))
train_data, test_data = random_train_test_split(train_coo)
(score, hyperparams, model) = max(random_search(train_data, test_data, num_samples=10, num_threads=2, user_features=user_features, item_features=item_features), key=lambda x: x[0])
print("Best score {} at {}".format(score, hyperparams))
"""
"""
EPOCHS = hyperparams['num_epochs']
del hyperparams['num_epochs']
hyperparams
"""
"""
# Модель с оптимизированными гиперпараметрами
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
# user_feature
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
#item_feature
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
model = LightFM(**hyperparams)
# model.fit(train, epochs=num_epochs, num_threads=num_threads)
model = model.fit(ratings_coo, epochs=EPOCHS,
user_features=user_features,
item_features=item_features,
num_threads=NUM_THREADS
)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
user_features=user_features,
item_features=item_features)
show_roc_curve(test_data.rating, preds)
"""
"""
# submission
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
preds = model.predict(test.userid.values,
test.itemid.values,
user_features=user_features,
item_features=item_features)
put_submission(preds)
"""
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393976.ipynb
| null | null |
[{"Id": 69393976, "ScriptId": 17955412, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6543233, "CreationDate": "07/30/2021 12:48:35", "VersionNumber": 21.0, "Title": "My baseline_logreg", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 788.0, "LinesInsertedFromPrevious": 64.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 724.0, "LinesInsertedFromFork": 737.0, "LinesDeletedFromFork": 35.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 51.0, "TotalVotes": 0}]
| null | null | null | null |
# # Введение
# Вас перевели в отдел инновационной разработки рекомендательных систем. Ваш работадель хочет увеличить средний чек продаж. После короткого изучения статей вы поняли, что ключом к успеху будет качественные рекомендации на сайте. Чем лучше вы сможете рекомендовать товар пользователю, тем чаще он будет добавлять товары в корзину. Простая логика, если пользователю нравится товар, он его добавляет в корзину -> средний чек растёт. Мы нашли золото!
# Датасет
# У вас будет история оценок пользователя вместе с его обзором. Вы можете использовать текст рецензии в качестве дополнительной информации. Все оценки пользователей нормированы для бинарной классификации, если человек поставил оценку продукту больше 3 (не включительно), то мы считаем, что продукт ему понравился, если меньше 4, то продукт не понравился.
# Метрики
# В качестве метрики для оценки ваших рекомендаций используется **RocAuc**.
# Условия соревнования
# Данное соревнование является бессрочным и доступно для всех потоков. Срок выполнения соревнования устанавливаеться индивидуально в каждом потоке. Тестовая выборка представлена в ЛидерБорде целиком, поэтому лучшие и победные решения буду проверяться на их "адекватность" (чтоб не было подгонки под тестовую выборку). В данном соревновании вам ненужны дополнительные внешние данные. Запрещается их использовать Разрешено использовать любые ML алгоритмы и библиотеки.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import re
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import json
import keras
from keras import backend as K
from keras.models import Sequential, Model
from keras.layers import (
Embedding,
Flatten,
Dense,
Dropout,
concatenate,
Input,
BatchNormalization,
)
from keras.optimizers import Adam
from keras import metrics
from keras.utils.vis_utils import plot_model
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# Any results you write to the current directory are saved as output.
import sys
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 5GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **test.csv** - набор данных, для которого необходимо сделать предсказания. У каждого набора **userid, itemid** есть свой id, для которого вы должны сделать предсказание.
# * **overall** - рейтинг, который поставил пользователь
# * **verified** - был ли отзыв верифицирован
# * **reviewTime** - когда был отзыв написан
# * **reviewerName** - имя пользователя
# * **reviewText** - текст отзыва
# * **summary** - сжатый отзыв
# * **unixReviewTime** - дата отзыва в формате unix
# * **vote** - количество голосований за отзыв
# * **style** - метаданные
# * **image** - изображение продукта
# * **userid** - id пользователя
# * **itemid** - id товара
# * **id** - id для предсказания
test.info()
print("Python :", sys.version.split("\n")[0])
print("Pandas :", pd.__version__)
print("Numpy :", np.__version__)
print("Keras :", keras.__version__)
# # Установка библиотек, инициализация вспомогательных функций и загрузка данных
import matplotlib.pyplot as plt
import seaborn as sns
import scipy.sparse as sparse
from lightfm import LightFM
from lightfm.cross_validation import random_train_test_split
from lightfm.evaluation import auc_score, precision_at_k, recall_at_k
import sklearn
from sklearn.model_selection import train_test_split
from sklearn.metrics import auc, roc_auc_score, roc_curve
import scipy.sparse as sparse
from scipy.sparse import csr_matrix, csc_matrix
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.metrics import make_scorer, accuracy_score
import itertools
"""
# install pandas-profiling
from pandas_profiling import ProfileReport
"""
RANDOM_STATE = 32
# типовые параметры для модели lightFM
NUM_THREADS = 4 # число потоков
NUM_COMPONENTS = 30 # число параметров вектора
NUM_EPOCHS = 20 # число эпох обучения
# Вспомогательные функции
def show_roc_curve(y_true, y_pred_prob):
"""Функция отображает ROC-кривую"""
fpr, tpr, _ = roc_curve(y_true, y_pred_prob)
plt.figure()
plt.plot([0, 1], label="Случайный классификатор", linestyle="--")
plt.plot(fpr, tpr, label="LightFM")
plt.title("ROC AUC = %0.3f" % roc_auc_score(y_true, y_pred_prob))
plt.ylabel("True Positive Rate")
plt.xlabel("False Positive Rate")
plt.legend(loc="lower right")
plt.show()
def get_boxplot(column, ds, ord):
fig, ax = plt.subplots(figsize=(14, 4))
sns.boxplot(
x=column,
y=ord,
data=ds.loc[
ds.loc[:, column].isin(ds.loc[:, column].value_counts().index[:10])
],
ax=ax,
)
plt.xticks(rotation=45)
ax.set_title("Boxplot for " + column)
plt.show()
def rmse(y_true, y_pred):
return np.sqrt(np.square(y_pred - y_true).sum() / len(y_true))
def feature_init():
# Составим списки категориальных, бинарных и численных признаков
cat_cols = ["reviewerName", "reviewText", "summary", "style", "image"]
bin_cols = ["verified"]
num_cols = ["overall", "unixReviewTime", "rating", "userid", "itemid"] # 'vote',
return cat_cols, bin_cols, num_cols
def put_submission(preds):
normalized_preds = (preds - preds.min()) / (preds - preds.min()).max()
submission["rating"] = normalized_preds
submission.to_csv("./submission_log.csv", index=False)
return submission
def create_review_tone(data):
# Creating new features.
data["good_review"] = 0
data["bad_review"] = 0
# Creating new features.
ind = 0
for review in data["reviewText"]:
if pd.isna(review):
ind += 1
break
for word in good_words_list:
if word in review:
data.at[ind, "good_review"] = 1
break
for word in bad_words_list:
if word in review:
data.at[ind, "bad_review"] = 1
break
ind += 1
def create_summary_tone(data):
data["good_summary"] = 0
data["bad_summary"] = 0
ind = 0
for review in data["summary"]:
if pd.isna(review):
ind += 1
break
for word in good_words_list:
if word in review:
data.at[ind, "good_summary"] = 1
break
for word in bad_words_list:
if word in review:
data.at[ind, "bad_summary"] = 1
break
ind += 1
def sample_hyperparameters():
"""
Yield possible hyperparameter choices.
"""
while True:
yield {
"no_components": np.random.randint(16, 64),
"learning_schedule": np.random.choice(["adagrad", "adadelta"]),
"loss": np.random.choice(["bpr", "warp", "warp-kos"]),
"learning_rate": np.random.exponential(0.05),
"item_alpha": np.random.exponential(1e-8),
"user_alpha": np.random.exponential(1e-8),
"max_sampled": np.random.randint(5, 15),
"num_epochs": np.random.randint(5, 50),
}
def random_search(
train,
test,
num_samples=10,
num_threads=1,
user_features=np.array([]),
item_features=np.array([]),
):
"""
Sample random hyperparameters, fit a LightFM model, and evaluate it
on the test set.
Parameters
----------
train: np.float32 coo_matrix of shape [n_users, n_items]
Training data.
test: np.float32 coo_matrix of shape [n_users, n_items]
Test data.
num_samples: int, optional
Number of hyperparameter choices to evaluate.
Returns
-------
generator of (auc_score, hyperparameter dict, fitted model)
"""
for hyperparams in itertools.islice(sample_hyperparameters(), num_samples):
num_epochs = hyperparams.pop("num_epochs")
model = LightFM(**hyperparams)
if (item_features.getnnz() == 0) & (user_features.getnnz() == 0):
model.fit(train, epochs=num_epochs, num_threads=num_threads)
elif (item_features.getnnz() > 0) & (user_features.getnnz() == 0):
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
item_features=item_features,
)
elif (item_features.getnnz() == 0) & (user_features.getnnz() > 0):
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
user_features=user_features,
)
else:
model.fit(
train,
epochs=num_epochs,
num_threads=num_threads,
user_features=user_features,
item_features=item_features,
)
score = auc_score(
model,
test_interactions=test,
train_interactions=train,
num_threads=num_threads,
check_intersections=False,
).mean()
hyperparams["num_epochs"] = num_epochs
yield (score, hyperparams, model)
train = pd.read_csv("/kaggle/input/recommendationsv4/train.csv", low_memory=False)
test = pd.read_csv("/kaggle/input/recommendationsv4/test.csv", low_memory=False)
submission = pd.read_csv("/kaggle/input/recommendationsv4/sample_submission.csv")
# # Создание наивной модели
"""
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
train_data.info()
"""
"""
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
"""
"""
NUM_THREADS = 4 #число потоков
NUM_COMPONENTS = 30 #число параметров вектора
NUM_EPOCHS = 20 #число эпох обучения
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
num_threads=NUM_THREADS)
"""
"""
preds = model.predict(test_data.userid.values,
test_data.itemid.values)
"""
# rmse(test_data['rating'], preds)
# show_roc_curve(test_data.rating, preds)
# sklearn.metrics.roc_auc_score(test_data.rating,preds)
# rmse(test_data['rating'], preds)
# ## Submission
# preds = model.predict(test.userid.values,
# test.itemid.values)
# put_submission(preds)
# submission roc_auc 0.74475
# # EDA
train.info()
train.sample()
# ProfileReport(train, title="EDA")
# Результаты предварительного EDA
# * в данных много дублей
# * в колонках **revierName, reviewText, vote, style, image** много пропусков
# * поле **unixReviewTime** можно использовать для feature engineerig
# Посмотрим на сбалансированность целевой переменной **rating**
ax = sns.countplot(x="rating", data=train)
"""
# сбалансируем целевую переменную с помощью функции библиотеки imblearn
from imblearn.over_sampling import SMOTE
cat_cols, bin_cols, num_cols = feature_init()
y = train.rating
X = train[num_cols]
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=RANDOM_STATE, shuffle=True)
sm = SMOTE(random_state=RANDOM_STATE)
X_train, y_train = sm.fit_sample(X_train, y_train)
"""
# ax = sns.countplot(x="rating", data=X_train)
"""
ratings_coo = sparse.coo_matrix((X_train['rating'].astype(int),
(X_train['userid'],
X_train['itemid'])))
"""
"""
NUM_THREADS = 4 #число потоков
NUM_COMPONENTS = 30 #число параметров вектора
NUM_EPOCHS = 20 #число эпох обучения
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
num_threads=NUM_THREADS)
"""
"""
preds = model.predict(X_test.userid.values,
X_test.itemid.values)
"""
# show_roc_curve(X_test.rating, preds)
# Метрика стала хуже. Можно попытаться преобразовать категориальные переменные в цифру.
# # Embedding
"""
# user_feature
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
user_features=user_features,
num_threads=NUM_THREADS)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
user_features=user_features)
show_roc_curve(test_data.rating, preds)
"""
# rmse(test_data['rating'], preds)
"""
# submission
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
preds = model.predict(test.userid.values,
test.itemid.values,
user_features=user_features)
put_submission(preds)
"""
# Получили улучшение по отношению к наивной модели 0.74522
# ## Используем данные из дополнительного файла
# Построчно прочитаем json с метаданными и положим результат в датасет "meta"
with open("/kaggle/input/recommendationsv4/meta_Grocery_and_Gourmet_Food.json") as f:
meta_list = []
for line in f.readlines():
meta_list.append(json.loads(line))
meta = pd.DataFrame(meta_list)
data = pd.merge(train, meta, on="asin")
data = pd.DataFrame(
data[["itemid", "userid", "rank", "price", "rating", "verified", "overall"]]
)
data["user_rating"] = data["overall"].apply(lambda x: 1 if x > 3 else 0)
data["verified"] = data["verified"].astype(int)
data["price"] = data["price"].apply(
lambda x: 0.0 if pd.isna(x) else float(eval(x.replace("$", "")))
)
data["rank"] = data["rank"].apply(
lambda x: 0.0
if pd.isna(x)
else float(re.search("\d+,?\d*", str(x))[0].replace(",", ""))
)
# rank, price item_feature
#
train_data, test_data = train_test_split(data, random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
#
ratings_coo = sparse.coo_matrix(
(train_data["rating"].astype(int), (train_data["userid"], train_data["itemid"]))
)
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[["rank", "price"]])
model = LightFM(learning_rate=0.1, loss="logistic", no_components=NUM_COMPONENTS)
model = model.fit(
ratings_coo,
epochs=NUM_EPOCHS,
item_features=item_features,
user_features=user_features,
num_threads=NUM_THREADS,
)
# item_features = csr_matrix(test_data[['rank', 'price']])
preds = model.predict(
test_data.userid.values,
test_data.itemid.values,
item_features=item_features,
user_features=user_features,
)
# оцениваем результат
show_roc_curve(test_data.rating, preds)
# submission
# item_features = csr_matrix(train_data[['user_rating']])
preds = model.predict(
test.userid.values,
test.itemid.values,
item_features=item_features,
user_features=user_features,
)
put_submission(preds)
# Результат еще хуже. Чем больше фичей, тем хуже.
# Попробуем использовать текстовый файл.
# # используем reviewText
good_words_list = [
"best",
"good",
"great",
"love",
"delicious",
"nice",
"favorite",
"tasty",
"perfect",
"excellent",
"wonderful",
"enjoy",
"yummy",
"happy",
"loves",
"loved",
"amazing",
"awesome",
"yum",
"enjoyed",
"fantastic",
"perfectly",
"wow",
"lovely",
"beautiful",
"terrific",
"enjoyable",
]
bad_words_list = [
"awful",
"bad",
"disappointed",
"unfortunate",
"waste",
"weird",
"difficult",
"terrible",
"horrible",
"complaint",
"gross",
"worst",
"strange",
"fake",
"disappointing",
"complaints",
"poor",
"sucks",
]
"""
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
model = LightFM(learning_rate=0.1, loss='logistic',
no_components=NUM_COMPONENTS)
model = model.fit(ratings_coo, epochs=NUM_EPOCHS,
item_features=item_features,
num_threads=NUM_THREADS)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
item_features=item_features)
show_roc_curve(test_data.rating, preds)
"""
# rmse(test_data['rating'], preds)
# submission
"""
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
preds = model.predict(test.userid.values,
test.itemid.values,
item_features=item_features)
put_submission(preds)
"""
# Получили улучшение по отношению к наивной модели 0.74522
# Совместное использование user & item emdeddings
train["user_rating"] = train["overall"].apply(lambda x: 1 if x > 3 else 0)
train["verified"] = train["verified"].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train, random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix(
(train_data["rating"].astype(int), (train_data["userid"], train_data["itemid"]))
)
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
# user_feature
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
# item_feature
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(
identity_matrix,
train_data[["good_review", "bad_review", "good_summary", "bad_summary"]],
)
model = LightFM(learning_rate=0.1, loss="logistic", no_components=NUM_COMPONENTS)
model = model.fit(
ratings_coo,
epochs=NUM_EPOCHS,
user_features=user_features,
item_features=item_features,
num_threads=NUM_THREADS,
)
preds = model.predict(
test_data.userid.values,
test_data.itemid.values,
user_features=user_features,
item_features=item_features,
)
show_roc_curve(test_data.rating, preds)
rmse(test_data["rating"], preds)
# submission
total_users = max(train_data["userid"].max(), test_data["userid"].max()) + 1
total_items = max(train_data["itemid"].max(), test_data["itemid"].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(
identity_matrix, train_data[["user_rating", "verified"]]
)
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(
identity_matrix,
train_data[["good_review", "bad_review", "good_summary", "bad_summary"]],
)
preds = model.predict(
test.userid.values,
test.itemid.values,
user_features=user_features,
item_features=item_features,
)
put_submission(preds)
# Совместное использование user&item feature немного улучшило roc auc 0.74530
# ## Hyperparameters tuning
# ### without features
"""
# Настройка гиперпараметров модели
train_coo = sparse.coo_matrix((train['rating'].astype(int),
(train['userid'],
train['itemid'])))
train_data, test_data = random_train_test_split(train_coo)
(score, hyperparams, model) = max(random_search(train_data, test_data, num_threads=2), key=lambda x: x[0])
print("Best score {} at {}".format(score, hyperparams))
"""
"""
EPOCHS = hyperparams['num_epochs']
del hyperparams['num_epochs']
hyperparams
"""
# submission roc_auc 0.55700
"""
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
model = LightFM(**hyperparams)
# model.fit(train, epochs=num_epochs, num_threads=num_threads)
model = model.fit(ratings_coo, epochs=EPOCHS,
num_threads=NUM_THREADS
)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
)
show_roc_curve(test_data.rating, preds)
"""
"""
# submission
preds = model.predict(test.userid.values,
test.itemid.values,
)
put_submission(preds)
"""
# submission roc_auc 0.53664
# ### with features
"""
# подготовка user & item emdeddings
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
# Настройка гиперпараметров модели
train_coo = sparse.coo_matrix((train['rating'].astype(int),
(train['userid'],
train['itemid'])))
train_data, test_data = random_train_test_split(train_coo)
(score, hyperparams, model) = max(random_search(train_data, test_data, num_samples=10, num_threads=2, user_features=user_features, item_features=item_features), key=lambda x: x[0])
print("Best score {} at {}".format(score, hyperparams))
"""
"""
EPOCHS = hyperparams['num_epochs']
del hyperparams['num_epochs']
hyperparams
"""
"""
# Модель с оптимизированными гиперпараметрами
train['user_rating'] = train['overall'].apply(lambda x: 1 if x > 3 else 0)
train['verified'] = train['verified'].astype(int)
create_review_tone(train)
create_summary_tone(train)
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
ratings_coo = sparse.coo_matrix((train_data['rating'].astype(int),
(train_data['userid'],
train_data['itemid'])))
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
# user_feature
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
#item_feature
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
model = LightFM(**hyperparams)
# model.fit(train, epochs=num_epochs, num_threads=num_threads)
model = model.fit(ratings_coo, epochs=EPOCHS,
user_features=user_features,
item_features=item_features,
num_threads=NUM_THREADS
)
preds = model.predict(test_data.userid.values,
test_data.itemid.values,
user_features=user_features,
item_features=item_features)
show_roc_curve(test_data.rating, preds)
"""
"""
# submission
train_data, test_data = train_test_split(train,random_state=RANDOM_STATE, shuffle=True)
total_users = max(train_data['userid'].max(), test_data['userid'].max()) + 1
total_items = max(train_data['itemid'].max(), test_data['itemid'].max()) + 1
identity_matrix = sparse.identity(total_users)
user_features = sparse.coo_matrix(identity_matrix, train_data[['user_rating', 'verified']])
identity_matrix = sparse.identity(total_items)
item_features = sparse.coo_matrix(identity_matrix, train_data[['good_review', 'bad_review', 'good_summary', 'bad_summary']])
preds = model.predict(test.userid.values,
test.itemid.values,
user_features=user_features,
item_features=item_features)
put_submission(preds)
"""
| false | 0 | 8,236 | 0 | 8,236 | 8,236 |
||
69393059
|
<jupyter_start><jupyter_text>Pokemon Data
Kaggle dataset identifier: pokemon-data
<jupyter_code>import pandas as pd
df = pd.read_csv('pokemon-data/pokemon_data.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 800 entries, 0 to 799
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 # 800 non-null int64
1 Name 800 non-null object
2 Type 1 800 non-null object
3 Type 2 414 non-null object
4 HP 800 non-null int64
5 Attack 800 non-null int64
6 Defense 800 non-null int64
7 Sp. Atk 800 non-null int64
8 Sp. Def 800 non-null int64
9 Speed 800 non-null int64
10 Generation 800 non-null int64
11 Legendary 800 non-null bool
dtypes: bool(1), int64(8), object(3)
memory usage: 69.7+ KB
<jupyter_text>Examples:
{
"#": 1,
"Name": "Bulbasaur",
"Type 1": "Grass",
"Type 2": "Poison",
"HP": 45,
"Attack": 49,
"Defense": 49,
"Sp. Atk": 65,
"Sp. Def": 65,
"Speed": 45,
"Generation": 1,
"Legendary": false
}
{
"#": 2,
"Name": "Ivysaur",
"Type 1": "Grass",
"Type 2": "Poison",
"HP": 60,
"Attack": 62,
"Defense": 63,
"Sp. Atk": 80,
"Sp. Def": 80,
"Speed": 60,
"Generation": 1,
"Legendary": false
}
{
"#": 3,
"Name": "Venusaur",
"Type 1": "Grass",
"Type 2": "Poison",
"HP": 80,
"Attack": 82,
"Defense": 83,
"Sp. Atk": 100,
"Sp. Def": 100,
"Speed": 80,
"Generation": 1,
"Legendary": false
}
{
"#": 3,
"Name": "VenusaurMega Venusaur",
"Type 1": "Grass",
"Type 2": "Poison",
"HP": 80,
"Attack": 100,
"Defense": 123,
"Sp. Atk": 122,
"Sp. Def": 120,
"Speed": 80,
"Generation": 1,
"Legendary": false
}
<jupyter_script># # Gotta Catch 'Em All!
# 1. [Introduction](#1)
# 2. [Libraries and Utilities](#2)
# 3. [Load and Check Data](#3)
# 4. [Variable Description](#4)
# 5. [Data Analysis](#5)
# * [Attack - Defense - Legendary](#6)
# * [Special Attack - Special Defense - Legendary](#21)
# * [Pokémon Stats by Generation](#23)
# * [Pokémon Stats by Legendary](#24)
# * [Pokémon Stats by Types](#25)
# * [Number of Pokémons by Types](#7)
# * [Percentages by Legendary and Generation](#8)
# * [Number of Legendary Pokémons by Types](#9)
# * [Attack - Defense - HP - Speed by Generations](#10)
# * [Attack - Defense - HP - Speed by Types](#11)
# * [TOP 5 Strongest Type](#12)
# * [TOP 10 Strongest Pokémon](#22)
# * [Pearson Correlation Map](#13)
# 6. [Models](#14)
# * [Dependent - Independent Variables](#15)
# * [Train - Test Split](#16)
# * [Standard Scaler](#17)
# * [Logistic Regression](#18)
# * [Hyperparameter Tuning - Grid Search - Cross Validation](#19)
# * [Ensemble Learning](#20)
# # Introduction
# Pokémon, also known as Pocket Monsters in Japan, is a Japanese media franchise managed by The Pokémon Company, a company founded by Nintendo, Game Freak, and Creatures. The franchise was created by Satoshi Tajiri in 1995, and is centered on fictional creatures called "Pokémon", which humans, known as Pokémon Trainers, catch and train to battle each other for sport. Games, shows and other works within the franchise are set in the Pokémon universe. The English slogan for the franchise is "Gotta Catch 'Em All".
# 
# # Libraries and Utilities
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from matplotlib import colors
import chart_studio.plotly as py
import cufflinks as cf
import plotly.express as px
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
cf.go_offline()
import plotly.graph_objs as go
from IPython.display import IFrame
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from imblearn.over_sampling import SMOTE
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore")
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
#
# # Load and Check Data
df = pd.read_csv("/kaggle/input/pokemon-data/pokemon_data.csv")
df = df.drop(["#"], axis=1)
df.head(10)
print("Data Shape: {}".format(df.shape))
df.columns[df.isnull().any()]
#
# # Variable Description
# This data set includes 800 Pokémons, including their number, name, first and second type, and basic stats: HP, Attack, Defense, Special Attack, Special Defense, Speed and Legendary. It has been of great use when teaching statistics to kids. With certain types you can also give a geeky introduction to machine learning.
# 1. **Name:** Name of each pokemon
# 2. **Type 1:** Each pokemon has a type, this determines weakness/resistance to attacks
# 3. **Type 2:** Some pokemon are dual type and have 2
# 4. **HP:** hit points, or health, defines how much damage a pokemon can withstand before fainting
# 5. **Attack:** the base modifier for normal attacks (eg. Scratch, Punch)
# 6. **Defense:** the base damage resistance against normal attacks
# 7. **SP Atk:** special attack, the base modifier for special attacks (e.g. fire blast, bubble beam)
# 8. **SP Def:** the base damage resistance against special attacks
# 9. **Speed:** determines which pokemon attacks first each round
# 10. **Legendary:** indicates whether pokemon is legendary
# # Data Analysis
print("Types of Pokémons:", df["Type 1"].unique())
IFrame(
"https://public.tableau.com/views/Pokmon_16141947229120/Pokemon? :showVizHome=no&:embed=true",
width=900,
height=820,
)
#
# ## Attack - Defense - Legendary
df["Legendary"] = df["Legendary"].astype(str)
F = df[df["Legendary"] == "False"]
T = df[df["Legendary"] == "True"]
plt.figure(figsize=(7, 7))
plt.scatter(F["Attack"], F["Defense"], label="False", color="royalblue", alpha=0.75)
plt.scatter(T["Attack"], T["Defense"], label="True", color="red", alpha=0.75)
plt.xlabel("Attack", size=15)
plt.ylabel("Defense", size=15)
plt.title(" Attack - Defense - Legendary", size=16)
plt.legend(loc="best")
plt.show()
#
# ## Special Attack - Special Defense - Legendary
plt.figure(figsize=(7, 7))
plt.scatter(F["Sp. Atk"], F["Sp. Def"], label="False", color="royalblue", alpha=0.75)
plt.scatter(T["Sp. Atk"], T["Sp. Def"], label="True", color="red", alpha=0.75)
plt.xlabel("Special Attack", size=15)
plt.ylabel("Special Defense", size=15)
plt.title(" Special Attack - Special Defense - Legendary", size=16)
plt.legend(loc="best")
plt.show()
#
# ## Pokémon Stats by Generation
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Generation", size=16)
sns.boxplot(ax=axes[0, 0], data=df, x="Generation", y="Attack")
sns.boxplot(ax=axes[0, 1], data=df, x="Generation", y="Defense")
sns.boxplot(ax=axes[0, 2], data=df, x="Generation", y="Speed")
sns.boxplot(ax=axes[1, 0], data=df, x="Generation", y="Sp. Atk")
sns.boxplot(ax=axes[1, 1], data=df, x="Generation", y="Sp. Def")
sns.boxplot(ax=axes[1, 2], data=df, x="Generation", y="HP")
plt.show()
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Generation", size=16)
sns.violinplot(ax=axes[0, 0], data=df, x="Generation", y="Attack")
sns.violinplot(ax=axes[0, 1], data=df, x="Generation", y="Defense")
sns.violinplot(ax=axes[0, 2], data=df, x="Generation", y="Speed")
sns.violinplot(ax=axes[1, 0], data=df, x="Generation", y="Sp. Atk")
sns.violinplot(ax=axes[1, 1], data=df, x="Generation", y="Sp. Def")
sns.violinplot(ax=axes[1, 2], data=df, x="Generation", y="HP")
plt.show()
#
# ## Pokémon Stats by Legendary
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Legendary", size=16)
sns.boxplot(ax=axes[0, 0], data=df, x="Legendary", y="Attack")
sns.boxplot(ax=axes[0, 1], data=df, x="Legendary", y="Defense")
sns.boxplot(ax=axes[0, 2], data=df, x="Legendary", y="Speed")
sns.boxplot(ax=axes[1, 0], data=df, x="Legendary", y="Sp. Atk")
sns.boxplot(ax=axes[1, 1], data=df, x="Legendary", y="Sp. Def")
sns.boxplot(ax=axes[1, 2], data=df, x="Legendary", y="HP")
plt.show()
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Legendary", size=16)
sns.violinplot(ax=axes[0, 0], data=df, x="Legendary", y="Attack")
sns.violinplot(ax=axes[0, 1], data=df, x="Legendary", y="Defense")
sns.violinplot(ax=axes[0, 2], data=df, x="Legendary", y="Speed")
sns.violinplot(ax=axes[1, 0], data=df, x="Legendary", y="Sp. Atk")
sns.violinplot(ax=axes[1, 1], data=df, x="Legendary", y="Sp. Def")
sns.violinplot(ax=axes[1, 2], data=df, x="Legendary", y="HP")
plt.show()
#
# ## Pokémon Stats by Types
attack_avg = []
defense_avg = []
hp_avg = []
speed_avg = []
sp_def_avg = []
sp_atk_avg = []
for i in df["Type 1"].unique():
attack_avg.append(df[df["Type 1"] == i]["Attack"].mean())
defense_avg.append(df[df["Type 1"] == i]["Defense"].mean())
hp_avg.append(df[df["Type 1"] == i]["HP"].mean())
speed_avg.append(df[df["Type 1"] == i]["Speed"].mean())
sp_def_avg.append(df[df["Type 1"] == i]["Sp. Def"].mean())
sp_atk_avg.append(df[df["Type 1"] == i]["Sp. Atk"].mean())
avg_skills = pd.DataFrame(
[attack_avg, defense_avg, hp_avg, speed_avg, sp_def_avg, sp_atk_avg],
index=["Attack", "Defense", "HP", "Speed", "Sp. Def", "Sp. Atk"],
columns=df["Type 1"].unique(),
)
avg_skills = avg_skills.T
for i in range(len(avg_skills.index)):
fig = px.line_polar(
avg_skills,
r=avg_skills.iloc[i,],
theta=avg_skills.columns,
line_close=True,
title=avg_skills.index[i],
width=600,
height=400,
template="plotly_dark",
)
fig.update_traces(fill="toself")
fig.update_polars(
radialaxis_range=[
i,
round(
avg_skills.max().max(),
)
+ 4,
]
)
fig.show()
#
# ## Number of Pokémons by Types
tp1_sum = df["Type 1"].value_counts().sum()
tp2_sum = df["Type 2"].value_counts().sum()
tp1_count = []
tp2_count = []
pokemon_counts = []
for i in df["Type 1"].unique():
pokemon_counts.append(
df[df["Type 1"] == i].count()["Type 1"]
+ df[df["Type 2"] == i].count()["Type 2"]
)
tp1_count.append(df[df["Type 1"] == i].count()["Type 1"])
tp2_count.append(df[df["Type 2"] == i].count()["Type 2"])
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle("Number of Pokémons by Types", size=16)
axs[0, 0].barh(df["Type 1"].unique(), tp1_count, color="mediumpurple")
axs[0, 0].set_title("Type 1", size=14)
axs[0, 1].barh(df["Type 1"].unique(), tp2_count, color="mediumpurple")
axs[0, 1].set_title("Type 2", size=14)
axs[1, 0].barh(df["Type 1"].unique(), pokemon_counts, color="mediumpurple")
axs[1, 0].set_title("Sum of Type 1 and Type 2", size=14)
axs[1, 1].barh(["Type 1", "Type 2"], [tp1_sum, tp2_sum], color="mediumpurple")
axs[1, 1].set_title("Type 1 and Type 2", size=14)
#
# ## Percentages by Legendary and Generation
fig = go.Figure(
data=[
go.Pie(
labels=df["Legendary"].value_counts().keys(),
values=df["Legendary"].value_counts().values,
)
]
)
fig.update_traces(
hoverinfo="value",
textinfo="label",
textfont_size=16,
textposition="auto",
showlegend=False,
marker=dict(colors=["#337DFF", "#FF5733"], line=dict(color="black", width=0.6)),
)
fig.update_layout(
title={
"text": "Legendary",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
template="simple_white",
)
iplot(fig)
fig = go.Figure(
data=[
go.Pie(
labels=df["Generation"].value_counts().keys(),
values=df["Generation"].value_counts().values,
)
]
)
fig.update_traces(
hoverinfo="value",
textinfo="label",
textfont_size=22,
textposition="auto",
showlegend=False,
marker=dict(
colors=["#28F20C", "#F7322C", "#F27F0C", "#F12CF7", "#337DFF", "#F7F72C"],
line=dict(color="black", width=0.6),
),
)
fig.update_layout(
title={
"text": "Generation",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
template="simple_white",
)
iplot(fig)
#
# ## Number of Legendary Pokémons by Types
cat = sns.catplot(
y="Type 1",
hue="Legendary",
kind="count",
edgecolor=".6",
palette="Set2",
legend=True,
data=df,
)
cat.fig.set_size_inches(10, 6)
plt.yticks(size=12)
cat.set(xlabel=None)
cat.set(ylabel=None)
cat.set(title="Number of Legendary Pokémons by Types")
plt.show()
#
# ## Attack - Defense - HP - Speed by Generations
generation_df = df.groupby("Generation").agg(
{"Attack": "mean", "Defense": "mean", "HP": "mean", "Speed": "mean"}
)
trace1 = go.Scatter(
x=generation_df.index,
y=generation_df["Attack"],
mode="lines",
name="Attack",
line=dict(color="#FF2F01", width=3),
)
trace2 = go.Scatter(
x=generation_df.index,
y=generation_df["Defense"],
mode="lines",
name="Defense",
line=dict(color="#79F72C", width=3),
)
trace3 = go.Scatter(
x=generation_df.index,
y=generation_df["HP"],
mode="lines",
name="HP",
line=dict(color="#2CA4F7", width=3),
)
trace4 = go.Scatter(
x=generation_df.index,
y=generation_df["Speed"],
mode="lines",
name="Speed",
line=dict(color="#9E2CF7", width=3),
)
layout = go.Layout(
title={
"text": "Attack - Defense - HP - Speed by Generations",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
xaxis=dict(title="Generation"),
template="plotly_white",
)
fig = go.Figure(data=[trace1, trace2, trace3, trace4], layout=layout)
iplot(fig)
#
# ## Attack - Defense - HP - Speed by Types
attack_avg = []
defense_avg = []
hp_avg = []
speed_avg = []
for i in df["Type 1"].unique():
attack_avg.append(df[df["Type 1"] == i]["Attack"].mean())
defense_avg.append(df[df["Type 1"] == i]["Defense"].mean())
hp_avg.append(df[df["Type 1"] == i]["HP"].mean())
speed_avg.append(df[df["Type 1"] == i]["Speed"].mean())
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle("Attack - Defense - HP - Speed by Types", size=16)
axs[0, 0].barh(df["Type 1"].unique(), attack_avg, color="cornflowerblue")
axs[0, 0].set_title("Attack", size=14)
axs[0, 1].barh(df["Type 1"].unique(), defense_avg, color="cornflowerblue")
axs[0, 1].set_title("Defense", size=14)
axs[1, 0].barh(df["Type 1"].unique(), hp_avg, color="cornflowerblue")
axs[1, 0].set_title("HP", size=14)
axs[1, 1].barh(df["Type 1"].unique(), speed_avg, color="cornflowerblue")
axs[1, 1].set_title("Speed", size=14)
fig.tight_layout()
#
# ## TOP 5 Strongest Type
total = (
df["HP"]
+ df["Attack"]
+ df["Defense"]
+ df["Sp. Atk"]
+ df["Sp. Def"]
+ df["Speed"]
)
top5_type = {"Type": df["Type 1"], "Overall": total}
top5_type = pd.DataFrame(top5_type)
top5_type = (
top5_type[["Overall", "Type"]]
.groupby(["Type"], as_index=False)
.mean()
.sort_values(by="Overall", ascending=False)[0:5]
)
plt.figure(figsize=(10, 6))
plt.title("TOP 5 Strongest Type", size=16)
sns.barplot(
x="Overall", y="Type", ci=None, data=top5_type, dodge=False, palette="Dark2"
)
plt.yticks(size=12)
plt.show()
#
# ## TOP 10 Strongest Pokémon
top10_pokemon = {"Name": df["Name"], "Type": df["Type 1"], "Overall": total}
top10_pokemon = pd.DataFrame(top10_pokemon)
top10_pokemon = top10_pokemon[["Name", "Type", "Overall"]].sort_values(
by="Overall", ascending=False
)[0:10]
plt.figure(figsize=(12, 8))
plt.title("TOP 10 Strongest Pokémon", size=16)
sns.barplot(x="Overall", y="Name", hue="Type", data=top10_pokemon, dodge=False)
plt.yticks(size=12)
plt.show()
#
# ## Pearson Correlation Map
sns.set_style("white")
matrix = np.triu(df.corr(method="pearson"))
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(
df.corr(),
annot=True,
fmt=".2f",
ax=ax,
vmin=-1,
vmax=1,
mask=matrix,
cmap="coolwarm",
linewidth=0.4,
linecolor="white",
annot_kws={"size": 14},
)
plt.xticks(rotation=60, size=14)
plt.yticks(rotation=0, size=14)
plt.title("Pearson Correlation Map", size=14)
plt.show()
#
# # Models
# ## Dependent - Independent Variables
# - Legendary is dependent variable.
# - Attack, Defense, HP, Speed, Sp. Atk and Sp. Def are independent variables.
y = df["Legendary"].values
X = df.drop(["Name", "Type 1", "Type 2", "Legendary", "Generation"], axis=1)
X.head()
#
# ## Train - Test Split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
sm = SMOTE()
X_train, y_train = sm.fit_resample(X_train, y_train)
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
#
# ## Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#
# ## Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
acc_log_train = round(logreg.score(X_train, y_train) * 100, 2)
acc_log_test = round(logreg.score(X_test, y_test) * 100, 2)
print("Training Accuracy: % {}".format(acc_log_train))
print("Testing Accuracy: % {}".format(acc_log_test))
lr_pred = logreg.predict(X_test)
cm = confusion_matrix(y_test, lr_pred)
df1 = pd.DataFrame(columns=["Not", "Legendary"], index=["Not", "Legendary"], data=cm)
f, ax = plt.subplots(figsize=(4, 4))
sns.heatmap(
df1,
annot=True,
cmap="Reds",
fmt=".0f",
ax=ax,
linewidths=5,
cbar=False,
annot_kws={"size": 16},
)
plt.xlabel("Predicted Label")
plt.xticks(size=12)
plt.yticks(size=12, rotation=0)
plt.ylabel("True Label")
plt.title("Confusion Matrix", size=12)
plt.show()
#
# ## Hyperparameter Tuning - Grid Search - Cross Validation
# We will compare 5 ml classifier and evaluate mean accuracy of each of them by stratified cross validation.
# - Decision Tree
# - SVM
# - Random Forest
# - KNN
# - Logistic Regression
random_state = 42
classifier = [
DecisionTreeClassifier(random_state=random_state),
SVC(random_state=random_state),
RandomForestClassifier(random_state=random_state),
LogisticRegression(random_state=random_state),
KNeighborsClassifier(),
]
dt_param_grid = {"min_samples_split": range(10, 500, 20), "max_depth": range(1, 20, 2)}
svc_param_grid = {
"kernel": ["rbf"],
"gamma": [0.001, 0.01, 0.1, 1],
"C": [1, 10, 50, 100, 200, 300, 1000],
}
rf_param_grid = {
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"],
}
logreg_param_grid = {"C": np.logspace(-4, 4, 20), "penalty": ["l1", "l2"]}
knn_param_grid = {
"n_neighbors": np.linspace(1, 19, 10, dtype=int).tolist(),
"weights": ["uniform", "distance"],
"metric": ["euclidean", "manhattan"],
}
classifier_param = [
dt_param_grid,
svc_param_grid,
rf_param_grid,
logreg_param_grid,
knn_param_grid,
]
cv_result = []
best_estimators = []
for i in range(len(classifier)):
clf = GridSearchCV(
classifier[i],
param_grid=classifier_param[i],
cv=StratifiedKFold(n_splits=10),
scoring="accuracy",
n_jobs=-1,
verbose=2,
)
clf.fit(X_train, y_train)
cv_result.append(clf.best_score_)
best_estimators.append(clf.best_estimator_)
print(cv_result[i])
cv_results = pd.DataFrame(
{
"Cross Validation Means": cv_result,
"ML Models": [
"DecisionTreeClassifier",
"SVC",
"RandomForestClassifier",
"LogisticRegression",
"KNeighborsClassifier",
],
}
)
plt.figure(figsize=(8, 6))
g = sns.barplot("Cross Validation Means", "ML Models", data=cv_results, palette="Set1")
g.set_xlabel("Mean Accuracy")
plt.xlim(0.9, 1)
g.set_title("Cross Validation Scores")
#
# ## Ensemble Learning
votingC = VotingClassifier(
estimators=[
("svc", best_estimators[1]),
("rf", best_estimators[2]),
("knn", best_estimators[4]),
]
)
votingC = votingC.fit(X_train, y_train)
voting_pred = votingC.predict(X_test)
print(classification_report(y_test, voting_pred))
cm = confusion_matrix(y_test, voting_pred)
df1 = pd.DataFrame(columns=["Not", "Legendary"], index=["Not", "Legendary"], data=cm)
f, ax = plt.subplots(figsize=(4, 4))
sns.heatmap(
df1,
annot=True,
cmap="Reds",
fmt=".0f",
ax=ax,
linewidths=5,
cbar=False,
annot_kws={"size": 16},
)
plt.xlabel("Predicted Label")
plt.xticks(size=12)
plt.yticks(size=12, rotation=0)
plt.ylabel("True Label")
plt.title("Confusion Matrix", size=12)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393059.ipynb
|
pokemon-data
|
sercanyesiloz
|
[{"Id": 69393059, "ScriptId": 15213128, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2926451, "CreationDate": "07/30/2021 12:35:24", "VersionNumber": 62.0, "Title": "Gotta Catch 'Em All!", "EvaluationDate": "07/30/2021", "IsChange": false, "TotalLines": 576.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 576.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92502547, "KernelVersionId": 69393059, "SourceDatasetVersionId": 1983148}]
|
[{"Id": 1983148, "DatasetId": 1185421, "DatasourceVersionId": 2022295, "CreatorUserId": 2926451, "LicenseName": "Unknown", "CreationDate": "02/28/2021 14:03:19", "VersionNumber": 1.0, "Title": "Pokemon Data", "Slug": "pokemon-data", "Subtitle": "Pok\u00e9mon Dataset (800 rows)", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1185421, "CreatorUserId": 2926451, "OwnerUserId": 2926451.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1983148.0, "CurrentDatasourceVersionId": 2022295.0, "ForumId": 1203219, "Type": 2, "CreationDate": "02/28/2021 14:03:19", "LastActivityDate": "02/28/2021", "TotalViews": 3522, "TotalDownloads": 216, "TotalVotes": 21, "TotalKernels": 1}]
|
[{"Id": 2926451, "UserName": "sercanyesiloz", "DisplayName": "Sercan Ye\u015fil\u00f6z", "RegisterDate": "03/11/2019", "PerformanceTier": 3}]
|
# # Gotta Catch 'Em All!
# 1. [Introduction](#1)
# 2. [Libraries and Utilities](#2)
# 3. [Load and Check Data](#3)
# 4. [Variable Description](#4)
# 5. [Data Analysis](#5)
# * [Attack - Defense - Legendary](#6)
# * [Special Attack - Special Defense - Legendary](#21)
# * [Pokémon Stats by Generation](#23)
# * [Pokémon Stats by Legendary](#24)
# * [Pokémon Stats by Types](#25)
# * [Number of Pokémons by Types](#7)
# * [Percentages by Legendary and Generation](#8)
# * [Number of Legendary Pokémons by Types](#9)
# * [Attack - Defense - HP - Speed by Generations](#10)
# * [Attack - Defense - HP - Speed by Types](#11)
# * [TOP 5 Strongest Type](#12)
# * [TOP 10 Strongest Pokémon](#22)
# * [Pearson Correlation Map](#13)
# 6. [Models](#14)
# * [Dependent - Independent Variables](#15)
# * [Train - Test Split](#16)
# * [Standard Scaler](#17)
# * [Logistic Regression](#18)
# * [Hyperparameter Tuning - Grid Search - Cross Validation](#19)
# * [Ensemble Learning](#20)
# # Introduction
# Pokémon, also known as Pocket Monsters in Japan, is a Japanese media franchise managed by The Pokémon Company, a company founded by Nintendo, Game Freak, and Creatures. The franchise was created by Satoshi Tajiri in 1995, and is centered on fictional creatures called "Pokémon", which humans, known as Pokémon Trainers, catch and train to battle each other for sport. Games, shows and other works within the franchise are set in the Pokémon universe. The English slogan for the franchise is "Gotta Catch 'Em All".
# 
# # Libraries and Utilities
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_style("darkgrid")
from matplotlib import colors
import chart_studio.plotly as py
import cufflinks as cf
import plotly.express as px
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
init_notebook_mode(connected=True)
cf.go_offline()
import plotly.graph_objs as go
from IPython.display import IFrame
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier, VotingClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV
from imblearn.over_sampling import SMOTE
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
import warnings
warnings.filterwarnings("ignore")
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
#
# # Load and Check Data
df = pd.read_csv("/kaggle/input/pokemon-data/pokemon_data.csv")
df = df.drop(["#"], axis=1)
df.head(10)
print("Data Shape: {}".format(df.shape))
df.columns[df.isnull().any()]
#
# # Variable Description
# This data set includes 800 Pokémons, including their number, name, first and second type, and basic stats: HP, Attack, Defense, Special Attack, Special Defense, Speed and Legendary. It has been of great use when teaching statistics to kids. With certain types you can also give a geeky introduction to machine learning.
# 1. **Name:** Name of each pokemon
# 2. **Type 1:** Each pokemon has a type, this determines weakness/resistance to attacks
# 3. **Type 2:** Some pokemon are dual type and have 2
# 4. **HP:** hit points, or health, defines how much damage a pokemon can withstand before fainting
# 5. **Attack:** the base modifier for normal attacks (eg. Scratch, Punch)
# 6. **Defense:** the base damage resistance against normal attacks
# 7. **SP Atk:** special attack, the base modifier for special attacks (e.g. fire blast, bubble beam)
# 8. **SP Def:** the base damage resistance against special attacks
# 9. **Speed:** determines which pokemon attacks first each round
# 10. **Legendary:** indicates whether pokemon is legendary
# # Data Analysis
print("Types of Pokémons:", df["Type 1"].unique())
IFrame(
"https://public.tableau.com/views/Pokmon_16141947229120/Pokemon? :showVizHome=no&:embed=true",
width=900,
height=820,
)
#
# ## Attack - Defense - Legendary
df["Legendary"] = df["Legendary"].astype(str)
F = df[df["Legendary"] == "False"]
T = df[df["Legendary"] == "True"]
plt.figure(figsize=(7, 7))
plt.scatter(F["Attack"], F["Defense"], label="False", color="royalblue", alpha=0.75)
plt.scatter(T["Attack"], T["Defense"], label="True", color="red", alpha=0.75)
plt.xlabel("Attack", size=15)
plt.ylabel("Defense", size=15)
plt.title(" Attack - Defense - Legendary", size=16)
plt.legend(loc="best")
plt.show()
#
# ## Special Attack - Special Defense - Legendary
plt.figure(figsize=(7, 7))
plt.scatter(F["Sp. Atk"], F["Sp. Def"], label="False", color="royalblue", alpha=0.75)
plt.scatter(T["Sp. Atk"], T["Sp. Def"], label="True", color="red", alpha=0.75)
plt.xlabel("Special Attack", size=15)
plt.ylabel("Special Defense", size=15)
plt.title(" Special Attack - Special Defense - Legendary", size=16)
plt.legend(loc="best")
plt.show()
#
# ## Pokémon Stats by Generation
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Generation", size=16)
sns.boxplot(ax=axes[0, 0], data=df, x="Generation", y="Attack")
sns.boxplot(ax=axes[0, 1], data=df, x="Generation", y="Defense")
sns.boxplot(ax=axes[0, 2], data=df, x="Generation", y="Speed")
sns.boxplot(ax=axes[1, 0], data=df, x="Generation", y="Sp. Atk")
sns.boxplot(ax=axes[1, 1], data=df, x="Generation", y="Sp. Def")
sns.boxplot(ax=axes[1, 2], data=df, x="Generation", y="HP")
plt.show()
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Generation", size=16)
sns.violinplot(ax=axes[0, 0], data=df, x="Generation", y="Attack")
sns.violinplot(ax=axes[0, 1], data=df, x="Generation", y="Defense")
sns.violinplot(ax=axes[0, 2], data=df, x="Generation", y="Speed")
sns.violinplot(ax=axes[1, 0], data=df, x="Generation", y="Sp. Atk")
sns.violinplot(ax=axes[1, 1], data=df, x="Generation", y="Sp. Def")
sns.violinplot(ax=axes[1, 2], data=df, x="Generation", y="HP")
plt.show()
#
# ## Pokémon Stats by Legendary
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Legendary", size=16)
sns.boxplot(ax=axes[0, 0], data=df, x="Legendary", y="Attack")
sns.boxplot(ax=axes[0, 1], data=df, x="Legendary", y="Defense")
sns.boxplot(ax=axes[0, 2], data=df, x="Legendary", y="Speed")
sns.boxplot(ax=axes[1, 0], data=df, x="Legendary", y="Sp. Atk")
sns.boxplot(ax=axes[1, 1], data=df, x="Legendary", y="Sp. Def")
sns.boxplot(ax=axes[1, 2], data=df, x="Legendary", y="HP")
plt.show()
fig, axes = plt.subplots(2, 3, figsize=(18, 10))
fig.suptitle("Pokémon Stats by Legendary", size=16)
sns.violinplot(ax=axes[0, 0], data=df, x="Legendary", y="Attack")
sns.violinplot(ax=axes[0, 1], data=df, x="Legendary", y="Defense")
sns.violinplot(ax=axes[0, 2], data=df, x="Legendary", y="Speed")
sns.violinplot(ax=axes[1, 0], data=df, x="Legendary", y="Sp. Atk")
sns.violinplot(ax=axes[1, 1], data=df, x="Legendary", y="Sp. Def")
sns.violinplot(ax=axes[1, 2], data=df, x="Legendary", y="HP")
plt.show()
#
# ## Pokémon Stats by Types
attack_avg = []
defense_avg = []
hp_avg = []
speed_avg = []
sp_def_avg = []
sp_atk_avg = []
for i in df["Type 1"].unique():
attack_avg.append(df[df["Type 1"] == i]["Attack"].mean())
defense_avg.append(df[df["Type 1"] == i]["Defense"].mean())
hp_avg.append(df[df["Type 1"] == i]["HP"].mean())
speed_avg.append(df[df["Type 1"] == i]["Speed"].mean())
sp_def_avg.append(df[df["Type 1"] == i]["Sp. Def"].mean())
sp_atk_avg.append(df[df["Type 1"] == i]["Sp. Atk"].mean())
avg_skills = pd.DataFrame(
[attack_avg, defense_avg, hp_avg, speed_avg, sp_def_avg, sp_atk_avg],
index=["Attack", "Defense", "HP", "Speed", "Sp. Def", "Sp. Atk"],
columns=df["Type 1"].unique(),
)
avg_skills = avg_skills.T
for i in range(len(avg_skills.index)):
fig = px.line_polar(
avg_skills,
r=avg_skills.iloc[i,],
theta=avg_skills.columns,
line_close=True,
title=avg_skills.index[i],
width=600,
height=400,
template="plotly_dark",
)
fig.update_traces(fill="toself")
fig.update_polars(
radialaxis_range=[
i,
round(
avg_skills.max().max(),
)
+ 4,
]
)
fig.show()
#
# ## Number of Pokémons by Types
tp1_sum = df["Type 1"].value_counts().sum()
tp2_sum = df["Type 2"].value_counts().sum()
tp1_count = []
tp2_count = []
pokemon_counts = []
for i in df["Type 1"].unique():
pokemon_counts.append(
df[df["Type 1"] == i].count()["Type 1"]
+ df[df["Type 2"] == i].count()["Type 2"]
)
tp1_count.append(df[df["Type 1"] == i].count()["Type 1"])
tp2_count.append(df[df["Type 2"] == i].count()["Type 2"])
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle("Number of Pokémons by Types", size=16)
axs[0, 0].barh(df["Type 1"].unique(), tp1_count, color="mediumpurple")
axs[0, 0].set_title("Type 1", size=14)
axs[0, 1].barh(df["Type 1"].unique(), tp2_count, color="mediumpurple")
axs[0, 1].set_title("Type 2", size=14)
axs[1, 0].barh(df["Type 1"].unique(), pokemon_counts, color="mediumpurple")
axs[1, 0].set_title("Sum of Type 1 and Type 2", size=14)
axs[1, 1].barh(["Type 1", "Type 2"], [tp1_sum, tp2_sum], color="mediumpurple")
axs[1, 1].set_title("Type 1 and Type 2", size=14)
#
# ## Percentages by Legendary and Generation
fig = go.Figure(
data=[
go.Pie(
labels=df["Legendary"].value_counts().keys(),
values=df["Legendary"].value_counts().values,
)
]
)
fig.update_traces(
hoverinfo="value",
textinfo="label",
textfont_size=16,
textposition="auto",
showlegend=False,
marker=dict(colors=["#337DFF", "#FF5733"], line=dict(color="black", width=0.6)),
)
fig.update_layout(
title={
"text": "Legendary",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
template="simple_white",
)
iplot(fig)
fig = go.Figure(
data=[
go.Pie(
labels=df["Generation"].value_counts().keys(),
values=df["Generation"].value_counts().values,
)
]
)
fig.update_traces(
hoverinfo="value",
textinfo="label",
textfont_size=22,
textposition="auto",
showlegend=False,
marker=dict(
colors=["#28F20C", "#F7322C", "#F27F0C", "#F12CF7", "#337DFF", "#F7F72C"],
line=dict(color="black", width=0.6),
),
)
fig.update_layout(
title={
"text": "Generation",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
template="simple_white",
)
iplot(fig)
#
# ## Number of Legendary Pokémons by Types
cat = sns.catplot(
y="Type 1",
hue="Legendary",
kind="count",
edgecolor=".6",
palette="Set2",
legend=True,
data=df,
)
cat.fig.set_size_inches(10, 6)
plt.yticks(size=12)
cat.set(xlabel=None)
cat.set(ylabel=None)
cat.set(title="Number of Legendary Pokémons by Types")
plt.show()
#
# ## Attack - Defense - HP - Speed by Generations
generation_df = df.groupby("Generation").agg(
{"Attack": "mean", "Defense": "mean", "HP": "mean", "Speed": "mean"}
)
trace1 = go.Scatter(
x=generation_df.index,
y=generation_df["Attack"],
mode="lines",
name="Attack",
line=dict(color="#FF2F01", width=3),
)
trace2 = go.Scatter(
x=generation_df.index,
y=generation_df["Defense"],
mode="lines",
name="Defense",
line=dict(color="#79F72C", width=3),
)
trace3 = go.Scatter(
x=generation_df.index,
y=generation_df["HP"],
mode="lines",
name="HP",
line=dict(color="#2CA4F7", width=3),
)
trace4 = go.Scatter(
x=generation_df.index,
y=generation_df["Speed"],
mode="lines",
name="Speed",
line=dict(color="#9E2CF7", width=3),
)
layout = go.Layout(
title={
"text": "Attack - Defense - HP - Speed by Generations",
"y": 0.9,
"x": 0.5,
"xanchor": "center",
"yanchor": "top",
},
xaxis=dict(title="Generation"),
template="plotly_white",
)
fig = go.Figure(data=[trace1, trace2, trace3, trace4], layout=layout)
iplot(fig)
#
# ## Attack - Defense - HP - Speed by Types
attack_avg = []
defense_avg = []
hp_avg = []
speed_avg = []
for i in df["Type 1"].unique():
attack_avg.append(df[df["Type 1"] == i]["Attack"].mean())
defense_avg.append(df[df["Type 1"] == i]["Defense"].mean())
hp_avg.append(df[df["Type 1"] == i]["HP"].mean())
speed_avg.append(df[df["Type 1"] == i]["Speed"].mean())
fig, axs = plt.subplots(2, 2, figsize=(12, 10))
fig.suptitle("Attack - Defense - HP - Speed by Types", size=16)
axs[0, 0].barh(df["Type 1"].unique(), attack_avg, color="cornflowerblue")
axs[0, 0].set_title("Attack", size=14)
axs[0, 1].barh(df["Type 1"].unique(), defense_avg, color="cornflowerblue")
axs[0, 1].set_title("Defense", size=14)
axs[1, 0].barh(df["Type 1"].unique(), hp_avg, color="cornflowerblue")
axs[1, 0].set_title("HP", size=14)
axs[1, 1].barh(df["Type 1"].unique(), speed_avg, color="cornflowerblue")
axs[1, 1].set_title("Speed", size=14)
fig.tight_layout()
#
# ## TOP 5 Strongest Type
total = (
df["HP"]
+ df["Attack"]
+ df["Defense"]
+ df["Sp. Atk"]
+ df["Sp. Def"]
+ df["Speed"]
)
top5_type = {"Type": df["Type 1"], "Overall": total}
top5_type = pd.DataFrame(top5_type)
top5_type = (
top5_type[["Overall", "Type"]]
.groupby(["Type"], as_index=False)
.mean()
.sort_values(by="Overall", ascending=False)[0:5]
)
plt.figure(figsize=(10, 6))
plt.title("TOP 5 Strongest Type", size=16)
sns.barplot(
x="Overall", y="Type", ci=None, data=top5_type, dodge=False, palette="Dark2"
)
plt.yticks(size=12)
plt.show()
#
# ## TOP 10 Strongest Pokémon
top10_pokemon = {"Name": df["Name"], "Type": df["Type 1"], "Overall": total}
top10_pokemon = pd.DataFrame(top10_pokemon)
top10_pokemon = top10_pokemon[["Name", "Type", "Overall"]].sort_values(
by="Overall", ascending=False
)[0:10]
plt.figure(figsize=(12, 8))
plt.title("TOP 10 Strongest Pokémon", size=16)
sns.barplot(x="Overall", y="Name", hue="Type", data=top10_pokemon, dodge=False)
plt.yticks(size=12)
plt.show()
#
# ## Pearson Correlation Map
sns.set_style("white")
matrix = np.triu(df.corr(method="pearson"))
f, ax = plt.subplots(figsize=(10, 10))
sns.heatmap(
df.corr(),
annot=True,
fmt=".2f",
ax=ax,
vmin=-1,
vmax=1,
mask=matrix,
cmap="coolwarm",
linewidth=0.4,
linecolor="white",
annot_kws={"size": 14},
)
plt.xticks(rotation=60, size=14)
plt.yticks(rotation=0, size=14)
plt.title("Pearson Correlation Map", size=14)
plt.show()
#
# # Models
# ## Dependent - Independent Variables
# - Legendary is dependent variable.
# - Attack, Defense, HP, Speed, Sp. Atk and Sp. Def are independent variables.
y = df["Legendary"].values
X = df.drop(["Name", "Type 1", "Type 2", "Legendary", "Generation"], axis=1)
X.head()
#
# ## Train - Test Split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
sm = SMOTE()
X_train, y_train = sm.fit_resample(X_train, y_train)
print("X_train shape: {}".format(X_train.shape))
print("y_train shape: {}".format(y_train.shape))
#
# ## Scaling
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
#
# ## Logistic Regression
logreg = LogisticRegression()
logreg.fit(X_train, y_train)
acc_log_train = round(logreg.score(X_train, y_train) * 100, 2)
acc_log_test = round(logreg.score(X_test, y_test) * 100, 2)
print("Training Accuracy: % {}".format(acc_log_train))
print("Testing Accuracy: % {}".format(acc_log_test))
lr_pred = logreg.predict(X_test)
cm = confusion_matrix(y_test, lr_pred)
df1 = pd.DataFrame(columns=["Not", "Legendary"], index=["Not", "Legendary"], data=cm)
f, ax = plt.subplots(figsize=(4, 4))
sns.heatmap(
df1,
annot=True,
cmap="Reds",
fmt=".0f",
ax=ax,
linewidths=5,
cbar=False,
annot_kws={"size": 16},
)
plt.xlabel("Predicted Label")
plt.xticks(size=12)
plt.yticks(size=12, rotation=0)
plt.ylabel("True Label")
plt.title("Confusion Matrix", size=12)
plt.show()
#
# ## Hyperparameter Tuning - Grid Search - Cross Validation
# We will compare 5 ml classifier and evaluate mean accuracy of each of them by stratified cross validation.
# - Decision Tree
# - SVM
# - Random Forest
# - KNN
# - Logistic Regression
random_state = 42
classifier = [
DecisionTreeClassifier(random_state=random_state),
SVC(random_state=random_state),
RandomForestClassifier(random_state=random_state),
LogisticRegression(random_state=random_state),
KNeighborsClassifier(),
]
dt_param_grid = {"min_samples_split": range(10, 500, 20), "max_depth": range(1, 20, 2)}
svc_param_grid = {
"kernel": ["rbf"],
"gamma": [0.001, 0.01, 0.1, 1],
"C": [1, 10, 50, 100, 200, 300, 1000],
}
rf_param_grid = {
"max_features": [1, 3, 10],
"min_samples_split": [2, 3, 10],
"min_samples_leaf": [1, 3, 10],
"bootstrap": [False],
"n_estimators": [100, 300],
"criterion": ["gini"],
}
logreg_param_grid = {"C": np.logspace(-4, 4, 20), "penalty": ["l1", "l2"]}
knn_param_grid = {
"n_neighbors": np.linspace(1, 19, 10, dtype=int).tolist(),
"weights": ["uniform", "distance"],
"metric": ["euclidean", "manhattan"],
}
classifier_param = [
dt_param_grid,
svc_param_grid,
rf_param_grid,
logreg_param_grid,
knn_param_grid,
]
cv_result = []
best_estimators = []
for i in range(len(classifier)):
clf = GridSearchCV(
classifier[i],
param_grid=classifier_param[i],
cv=StratifiedKFold(n_splits=10),
scoring="accuracy",
n_jobs=-1,
verbose=2,
)
clf.fit(X_train, y_train)
cv_result.append(clf.best_score_)
best_estimators.append(clf.best_estimator_)
print(cv_result[i])
cv_results = pd.DataFrame(
{
"Cross Validation Means": cv_result,
"ML Models": [
"DecisionTreeClassifier",
"SVC",
"RandomForestClassifier",
"LogisticRegression",
"KNeighborsClassifier",
],
}
)
plt.figure(figsize=(8, 6))
g = sns.barplot("Cross Validation Means", "ML Models", data=cv_results, palette="Set1")
g.set_xlabel("Mean Accuracy")
plt.xlim(0.9, 1)
g.set_title("Cross Validation Scores")
#
# ## Ensemble Learning
votingC = VotingClassifier(
estimators=[
("svc", best_estimators[1]),
("rf", best_estimators[2]),
("knn", best_estimators[4]),
]
)
votingC = votingC.fit(X_train, y_train)
voting_pred = votingC.predict(X_test)
print(classification_report(y_test, voting_pred))
cm = confusion_matrix(y_test, voting_pred)
df1 = pd.DataFrame(columns=["Not", "Legendary"], index=["Not", "Legendary"], data=cm)
f, ax = plt.subplots(figsize=(4, 4))
sns.heatmap(
df1,
annot=True,
cmap="Reds",
fmt=".0f",
ax=ax,
linewidths=5,
cbar=False,
annot_kws={"size": 16},
)
plt.xlabel("Predicted Label")
plt.xticks(size=12)
plt.yticks(size=12, rotation=0)
plt.ylabel("True Label")
plt.title("Confusion Matrix", size=12)
plt.show()
|
[{"pokemon-data/pokemon_data.csv": {"column_names": "[\"#\", \"Name\", \"Type 1\", \"Type 2\", \"HP\", \"Attack\", \"Defense\", \"Sp. Atk\", \"Sp. Def\", \"Speed\", \"Generation\", \"Legendary\"]", "column_data_types": "{\"#\": \"int64\", \"Name\": \"object\", \"Type 1\": \"object\", \"Type 2\": \"object\", \"HP\": \"int64\", \"Attack\": \"int64\", \"Defense\": \"int64\", \"Sp. Atk\": \"int64\", \"Sp. Def\": \"int64\", \"Speed\": \"int64\", \"Generation\": \"int64\", \"Legendary\": \"bool\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 12 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 # 800 non-null int64 \n 1 Name 800 non-null object\n 2 Type 1 800 non-null object\n 3 Type 2 414 non-null object\n 4 HP 800 non-null int64 \n 5 Attack 800 non-null int64 \n 6 Defense 800 non-null int64 \n 7 Sp. Atk 800 non-null int64 \n 8 Sp. Def 800 non-null int64 \n 9 Speed 800 non-null int64 \n 10 Generation 800 non-null int64 \n 11 Legendary 800 non-null bool \ndtypes: bool(1), int64(8), object(3)\nmemory usage: 69.7+ KB\n", "summary": "{\"#\": {\"count\": 800.0, \"mean\": 362.81375, \"std\": 208.34379756406662, \"min\": 1.0, \"25%\": 184.75, \"50%\": 364.5, \"75%\": 539.25, \"max\": 721.0}, \"HP\": {\"count\": 800.0, \"mean\": 69.25875, \"std\": 25.53466903233207, \"min\": 1.0, \"25%\": 50.0, \"50%\": 65.0, \"75%\": 80.0, \"max\": 255.0}, \"Attack\": {\"count\": 800.0, \"mean\": 79.00125, \"std\": 32.45736586949845, \"min\": 5.0, \"25%\": 55.0, \"50%\": 75.0, \"75%\": 100.0, \"max\": 190.0}, \"Defense\": {\"count\": 800.0, \"mean\": 73.8425, \"std\": 31.183500559332934, \"min\": 5.0, \"25%\": 50.0, \"50%\": 70.0, \"75%\": 90.0, \"max\": 230.0}, \"Sp. Atk\": {\"count\": 800.0, \"mean\": 72.82, \"std\": 32.7222941688016, \"min\": 10.0, \"25%\": 49.75, \"50%\": 65.0, \"75%\": 95.0, \"max\": 194.0}, \"Sp. Def\": {\"count\": 800.0, \"mean\": 71.9025, \"std\": 27.82891579711746, \"min\": 20.0, \"25%\": 50.0, \"50%\": 70.0, \"75%\": 90.0, \"max\": 230.0}, \"Speed\": {\"count\": 800.0, \"mean\": 68.2775, \"std\": 29.060473717161464, \"min\": 5.0, \"25%\": 45.0, \"50%\": 65.0, \"75%\": 90.0, \"max\": 180.0}, \"Generation\": {\"count\": 800.0, \"mean\": 3.32375, \"std\": 1.6612904004849451, \"min\": 1.0, \"25%\": 2.0, \"50%\": 3.0, \"75%\": 5.0, \"max\": 6.0}}", "examples": "{\"#\":{\"0\":1,\"1\":2,\"2\":3,\"3\":3},\"Name\":{\"0\":\"Bulbasaur\",\"1\":\"Ivysaur\",\"2\":\"Venusaur\",\"3\":\"VenusaurMega Venusaur\"},\"Type 1\":{\"0\":\"Grass\",\"1\":\"Grass\",\"2\":\"Grass\",\"3\":\"Grass\"},\"Type 2\":{\"0\":\"Poison\",\"1\":\"Poison\",\"2\":\"Poison\",\"3\":\"Poison\"},\"HP\":{\"0\":45,\"1\":60,\"2\":80,\"3\":80},\"Attack\":{\"0\":49,\"1\":62,\"2\":82,\"3\":100},\"Defense\":{\"0\":49,\"1\":63,\"2\":83,\"3\":123},\"Sp. Atk\":{\"0\":65,\"1\":80,\"2\":100,\"3\":122},\"Sp. Def\":{\"0\":65,\"1\":80,\"2\":100,\"3\":120},\"Speed\":{\"0\":45,\"1\":60,\"2\":80,\"3\":80},\"Generation\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"Legendary\":{\"0\":false,\"1\":false,\"2\":false,\"3\":false}}"}}]
| true | 1 |
<start_data_description><data_path>pokemon-data/pokemon_data.csv:
<column_names>
['#', 'Name', 'Type 1', 'Type 2', 'HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed', 'Generation', 'Legendary']
<column_types>
{'#': 'int64', 'Name': 'object', 'Type 1': 'object', 'Type 2': 'object', 'HP': 'int64', 'Attack': 'int64', 'Defense': 'int64', 'Sp. Atk': 'int64', 'Sp. Def': 'int64', 'Speed': 'int64', 'Generation': 'int64', 'Legendary': 'bool'}
<dataframe_Summary>
{'#': {'count': 800.0, 'mean': 362.81375, 'std': 208.34379756406662, 'min': 1.0, '25%': 184.75, '50%': 364.5, '75%': 539.25, 'max': 721.0}, 'HP': {'count': 800.0, 'mean': 69.25875, 'std': 25.53466903233207, 'min': 1.0, '25%': 50.0, '50%': 65.0, '75%': 80.0, 'max': 255.0}, 'Attack': {'count': 800.0, 'mean': 79.00125, 'std': 32.45736586949845, 'min': 5.0, '25%': 55.0, '50%': 75.0, '75%': 100.0, 'max': 190.0}, 'Defense': {'count': 800.0, 'mean': 73.8425, 'std': 31.183500559332934, 'min': 5.0, '25%': 50.0, '50%': 70.0, '75%': 90.0, 'max': 230.0}, 'Sp. Atk': {'count': 800.0, 'mean': 72.82, 'std': 32.7222941688016, 'min': 10.0, '25%': 49.75, '50%': 65.0, '75%': 95.0, 'max': 194.0}, 'Sp. Def': {'count': 800.0, 'mean': 71.9025, 'std': 27.82891579711746, 'min': 20.0, '25%': 50.0, '50%': 70.0, '75%': 90.0, 'max': 230.0}, 'Speed': {'count': 800.0, 'mean': 68.2775, 'std': 29.060473717161464, 'min': 5.0, '25%': 45.0, '50%': 65.0, '75%': 90.0, 'max': 180.0}, 'Generation': {'count': 800.0, 'mean': 3.32375, 'std': 1.6612904004849451, 'min': 1.0, '25%': 2.0, '50%': 3.0, '75%': 5.0, 'max': 6.0}}
<dataframe_info>
RangeIndex: 800 entries, 0 to 799
Data columns (total 12 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 # 800 non-null int64
1 Name 800 non-null object
2 Type 1 800 non-null object
3 Type 2 414 non-null object
4 HP 800 non-null int64
5 Attack 800 non-null int64
6 Defense 800 non-null int64
7 Sp. Atk 800 non-null int64
8 Sp. Def 800 non-null int64
9 Speed 800 non-null int64
10 Generation 800 non-null int64
11 Legendary 800 non-null bool
dtypes: bool(1), int64(8), object(3)
memory usage: 69.7+ KB
<some_examples>
{'#': {'0': 1, '1': 2, '2': 3, '3': 3}, 'Name': {'0': 'Bulbasaur', '1': 'Ivysaur', '2': 'Venusaur', '3': 'VenusaurMega Venusaur'}, 'Type 1': {'0': 'Grass', '1': 'Grass', '2': 'Grass', '3': 'Grass'}, 'Type 2': {'0': 'Poison', '1': 'Poison', '2': 'Poison', '3': 'Poison'}, 'HP': {'0': 45, '1': 60, '2': 80, '3': 80}, 'Attack': {'0': 49, '1': 62, '2': 82, '3': 100}, 'Defense': {'0': 49, '1': 63, '2': 83, '3': 123}, 'Sp. Atk': {'0': 65, '1': 80, '2': 100, '3': 122}, 'Sp. Def': {'0': 65, '1': 80, '2': 100, '3': 120}, 'Speed': {'0': 45, '1': 60, '2': 80, '3': 80}, 'Generation': {'0': 1, '1': 1, '2': 1, '3': 1}, 'Legendary': {'0': False, '1': False, '2': False, '3': False}}
<end_description>
| 7,024 | 0 | 7,788 | 7,024 |
69393268
|
# ## Cluster Analysis
# Cluster analysis is a statistical method for processing data. It works by organizing items(observations) into groups, or clusters, on the basis of how closely associated they are.
# The goal is to maximize similarities between observations within a cluster and maximize dissimilarities between clusters.
# We can grouping the observations differently by selecting different features on whose basis we are clustering.
# ## 1. Importing Libraries:
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sns
sns.set()
from sklearn.cluster import KMeans
# ## 2. Loading Data
data = pd.read_csv("../input/iris-data/iris_dataset.csv")
data.head()
# Our data consists of flowers' shape parameters. We are going to group flowers on the basis of their shape features. Sepals and petals define the shape of the flower, so we have lengths and widths of both, sepals and petals.
# ## 3. Plotting Data
# Since 2D scatterplot can only work for two features, we will choose sepal length and width as those two features and for now be grouping our data on those two features as well.
data1 = data[["sepal_length", "sepal_width"]]
plt.scatter(data1["sepal_length"], data1["sepal_width"])
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# From above graph, we can interpret that the lengths of sepals are in range of 4.0 to 8.0 and widths are in range(2.0 to 4.5).
# ## 4. Clustering Data:
# We will now be grouping all given flowers(observations) on the basis of these two features (i.e. sepal length and sepal width).
# Initially, we will be grouping observations in two clusters.
kmeans = KMeans(2) # instantiating KMeans(<number of clusters we want>)
kmeans.fit(data1) # fitting our data on the model
data2 = data.copy() # making a copy of original data for visualization ahead
data2["cluster_pred"] = kmeans.fit_predict(
data1
) # adding a new column in data to contain each observation's "cluster"
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# By looking at this grouped scatter plot, we can see that the observations are mainly grouped on the basis of their sepal length and not on width. This is becaues length of the sepal has a higher range than that of the sepal width and therefore length carries more wieght than width. To overcome this, we must standardize our features.
# ## 5. Standardizing Data:
# For standardizing, we will import 'preprocessing' method from sklearn.
from sklearn import preprocessing
data1_scaled = preprocessing.scale(data1) # standardization
data1_scaled[:5] # printing first five observations of scaled data
# ## 6. Clustering Scaled Data
kmeans_scaled = KMeans(2)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# The above grouping weighs both of the features (i.e. length and width of the sepals) equally. We can define weight of a feature by altering its range.
# Let's repeat the process for 3 and 4 clusters.
kmeans_scaled = KMeans(3)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 3")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
kmeans_scaled = KMeans(4)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 4")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# We can cluster observations in any number of groups we want. But, what is the optimal number of clusters. We take advantage of The Elbow Method for this purpose.
# ## 7. Choosing the Number of Clusters
# How many clusters are enough to represent all the observations of the data set in a way that each cluster is significantly different from others without the number of clusters getting too many.
# A quick solution to the problem is the elbow method. It consists of a line graph with number of clusters on the x-axis and sum of squared distance of each point with the centroid (a.k.a. wcss) on the y-axis. The line forms a sort of elbow at some number of clusters after which there is no significant decrease in wcss with the increase in number of clusters.
# - wcss = 0 - when each observation is a seperate cluster (meaning: no. of clusters = no. of observations)
# - wcss = 1 - when every observation is in same cluster (meaning: no. of clusters = 0)
# We want our wcss as close to 0 as possible, but at the same time we don't want too many clusters. Elbow is the point to stop.
# Let's find the value of wcss for numbers of clusters 1 through 9.
wcss = [] # empty list to contain wcss value for number of clusters 1, 2, 3, ..., 9
cl_number = 10
for i in range(1, cl_number):
kmeans = KMeans(i)
kmeans.fit(data1_scaled)
wcss_i = kmeans.inertia_
wcss.append(wcss_i)
wcss
plt.plot(range(1, 10), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# Here, we can see that 3 or 4 are the best solutions we have for the number of clusters, as wcss is not decreasing so much after them. I will choose 3 for now. But, choosing 4 is also reasonable depending upon the problem we have in hand.
# **Clustering into Three**
kmeans_scaled = KMeans(3)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 3")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# ## 8. Considering All of the Features in Clustering
# We have clustered our observations on the basis of sepal shape only. We can choose how many and which features we want to consider for clustering. Let's take petal shape under consideration too and repeat the process of clustering and see how the clusters are different from the ones we got considering sepal shape only.
# We can not show all of the features in same 2D scatter plot as it can only show two variables. Let's plot for the features of sepals and petals seperately.
plt.scatter(data["sepal_length"], data["sepal_width"])
plt.title("Sepal Shape")
plt.xlabel("Sepal Length")
plt.ylabel("Sepal Width")
plt.show()
plt.scatter(data["petal_length"], data["petal_width"])
plt.title("Petal Shape")
plt.xlabel("Petal Length")
plt.ylabel("Petal Width")
plt.show()
# **Standardizing Data**
from sklearn import preprocessing
data_scaled = preprocessing.scale(data)
data_scaled[:5]
# **Choosing Number of Clusters**
wcss = [] # empty list to contain wcss value for number of clusters 1, 2, 3, ..., 9
cl_number = 10
for i in range(1, cl_number):
kmeans = KMeans(i)
kmeans.fit(data_scaled)
wcss_i = kmeans.inertia_
wcss.append(wcss_i)
wcss
plt.plot(range(1, 10), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# Number of clusters = 2 to 3 seem fine for this problem. We will be choosing k=3.
# 'k' represents number of clusters here. This method of clustering analysis is known as **KMeans Clustering**.
# **Clustering Data**
kmeans_scaled = KMeans(3)
kmeans.fit(data_scaled)
data["cluster_pred"] = kmeans_scaled.fit_predict(data_scaled)
plt.scatter(
data["sepal_length"], data["sepal_width"], c=data["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
plt.scatter(
data["petal_length"], data["petal_width"], c=data["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Petal")
plt.ylabel("Width of the Petal")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/393/69393268.ipynb
| null | null |
[{"Id": 69393268, "ScriptId": 18946727, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7112435, "CreationDate": "07/30/2021 12:38:07", "VersionNumber": 1.0, "Title": "K Means Clustering", "EvaluationDate": "07/30/2021", "IsChange": true, "TotalLines": 211.0, "LinesInsertedFromPrevious": 211.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
# ## Cluster Analysis
# Cluster analysis is a statistical method for processing data. It works by organizing items(observations) into groups, or clusters, on the basis of how closely associated they are.
# The goal is to maximize similarities between observations within a cluster and maximize dissimilarities between clusters.
# We can grouping the observations differently by selecting different features on whose basis we are clustering.
# ## 1. Importing Libraries:
import numpy as np, pandas as pd
import matplotlib.pyplot as plt, seaborn as sns
sns.set()
from sklearn.cluster import KMeans
# ## 2. Loading Data
data = pd.read_csv("../input/iris-data/iris_dataset.csv")
data.head()
# Our data consists of flowers' shape parameters. We are going to group flowers on the basis of their shape features. Sepals and petals define the shape of the flower, so we have lengths and widths of both, sepals and petals.
# ## 3. Plotting Data
# Since 2D scatterplot can only work for two features, we will choose sepal length and width as those two features and for now be grouping our data on those two features as well.
data1 = data[["sepal_length", "sepal_width"]]
plt.scatter(data1["sepal_length"], data1["sepal_width"])
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# From above graph, we can interpret that the lengths of sepals are in range of 4.0 to 8.0 and widths are in range(2.0 to 4.5).
# ## 4. Clustering Data:
# We will now be grouping all given flowers(observations) on the basis of these two features (i.e. sepal length and sepal width).
# Initially, we will be grouping observations in two clusters.
kmeans = KMeans(2) # instantiating KMeans(<number of clusters we want>)
kmeans.fit(data1) # fitting our data on the model
data2 = data.copy() # making a copy of original data for visualization ahead
data2["cluster_pred"] = kmeans.fit_predict(
data1
) # adding a new column in data to contain each observation's "cluster"
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# By looking at this grouped scatter plot, we can see that the observations are mainly grouped on the basis of their sepal length and not on width. This is becaues length of the sepal has a higher range than that of the sepal width and therefore length carries more wieght than width. To overcome this, we must standardize our features.
# ## 5. Standardizing Data:
# For standardizing, we will import 'preprocessing' method from sklearn.
from sklearn import preprocessing
data1_scaled = preprocessing.scale(data1) # standardization
data1_scaled[:5] # printing first five observations of scaled data
# ## 6. Clustering Scaled Data
kmeans_scaled = KMeans(2)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# The above grouping weighs both of the features (i.e. length and width of the sepals) equally. We can define weight of a feature by altering its range.
# Let's repeat the process for 3 and 4 clusters.
kmeans_scaled = KMeans(3)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 3")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
kmeans_scaled = KMeans(4)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 4")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# We can cluster observations in any number of groups we want. But, what is the optimal number of clusters. We take advantage of The Elbow Method for this purpose.
# ## 7. Choosing the Number of Clusters
# How many clusters are enough to represent all the observations of the data set in a way that each cluster is significantly different from others without the number of clusters getting too many.
# A quick solution to the problem is the elbow method. It consists of a line graph with number of clusters on the x-axis and sum of squared distance of each point with the centroid (a.k.a. wcss) on the y-axis. The line forms a sort of elbow at some number of clusters after which there is no significant decrease in wcss with the increase in number of clusters.
# - wcss = 0 - when each observation is a seperate cluster (meaning: no. of clusters = no. of observations)
# - wcss = 1 - when every observation is in same cluster (meaning: no. of clusters = 0)
# We want our wcss as close to 0 as possible, but at the same time we don't want too many clusters. Elbow is the point to stop.
# Let's find the value of wcss for numbers of clusters 1 through 9.
wcss = [] # empty list to contain wcss value for number of clusters 1, 2, 3, ..., 9
cl_number = 10
for i in range(1, cl_number):
kmeans = KMeans(i)
kmeans.fit(data1_scaled)
wcss_i = kmeans.inertia_
wcss.append(wcss_i)
wcss
plt.plot(range(1, 10), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# Here, we can see that 3 or 4 are the best solutions we have for the number of clusters, as wcss is not decreasing so much after them. I will choose 3 for now. But, choosing 4 is also reasonable depending upon the problem we have in hand.
# **Clustering into Three**
kmeans_scaled = KMeans(3)
kmeans.fit(data1_scaled)
data2["cluster_pred"] = kmeans_scaled.fit_predict(data1_scaled)
plt.scatter(
data2["sepal_length"], data2["sepal_width"], c=data2["cluster_pred"], cmap="rainbow"
)
plt.title("Clusters = 3")
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
# ## 8. Considering All of the Features in Clustering
# We have clustered our observations on the basis of sepal shape only. We can choose how many and which features we want to consider for clustering. Let's take petal shape under consideration too and repeat the process of clustering and see how the clusters are different from the ones we got considering sepal shape only.
# We can not show all of the features in same 2D scatter plot as it can only show two variables. Let's plot for the features of sepals and petals seperately.
plt.scatter(data["sepal_length"], data["sepal_width"])
plt.title("Sepal Shape")
plt.xlabel("Sepal Length")
plt.ylabel("Sepal Width")
plt.show()
plt.scatter(data["petal_length"], data["petal_width"])
plt.title("Petal Shape")
plt.xlabel("Petal Length")
plt.ylabel("Petal Width")
plt.show()
# **Standardizing Data**
from sklearn import preprocessing
data_scaled = preprocessing.scale(data)
data_scaled[:5]
# **Choosing Number of Clusters**
wcss = [] # empty list to contain wcss value for number of clusters 1, 2, 3, ..., 9
cl_number = 10
for i in range(1, cl_number):
kmeans = KMeans(i)
kmeans.fit(data_scaled)
wcss_i = kmeans.inertia_
wcss.append(wcss_i)
wcss
plt.plot(range(1, 10), wcss)
plt.title("The Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
# Number of clusters = 2 to 3 seem fine for this problem. We will be choosing k=3.
# 'k' represents number of clusters here. This method of clustering analysis is known as **KMeans Clustering**.
# **Clustering Data**
kmeans_scaled = KMeans(3)
kmeans.fit(data_scaled)
data["cluster_pred"] = kmeans_scaled.fit_predict(data_scaled)
plt.scatter(
data["sepal_length"], data["sepal_width"], c=data["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Sepal")
plt.ylabel("Width of the Sepal")
plt.show()
plt.scatter(
data["petal_length"], data["petal_width"], c=data["cluster_pred"], cmap="rainbow"
)
plt.xlabel("Length of the Petal")
plt.ylabel("Width of the Petal")
plt.show()
| false | 0 | 2,422 | 3 | 2,422 | 2,422 |
||
69270894
|
# **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/categorical-variables).**
# ---
# By encoding **categorical variables**, you'll obtain your best results thus far!
# # Setup
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex3 import *
print("Setup Complete")
# In this exercise, you will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).
# 
# Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X = pd.read_csv("../input/train.csv", index_col="Id")
X_test = pd.read_csv("../input/test.csv", index_col="Id")
# Remove rows with missing target, separate target from predictors
X.dropna(axis=0, subset=["SalePrice"], inplace=True)
y = X.SalePrice
X.drop(["SalePrice"], axis=1, inplace=True)
# To keep things simple, we'll drop columns with missing values
cols_with_missing = [col for col in X.columns if X[col].isnull().any()]
X.drop(cols_with_missing, axis=1, inplace=True)
X_test.drop(cols_with_missing, axis=1, inplace=True)
# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
# Use the next code cell to print the first five rows of the data.
X_train.head()
# Notice that the dataset contains both numerical and categorical variables. You'll need to encode the categorical data before training a model.
# To compare different models, you'll use the same `score_dataset()` function from the tutorial. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model.
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
# # Step 1: Drop columns with categorical data
# You'll get started with the most straightforward approach. Use the code cell below to preprocess the data in `X_train` and `X_valid` to remove columns with categorical data. Set the preprocessed DataFrames to `drop_X_train` and `drop_X_valid`, respectively.
# Fill in the lines below: drop columns in training and validation data
drop_X_train = ____
drop_X_valid = ____
# Check your answers
step_1.check()
# Lines below will give you a hint or solution code
# step_1.hint()
# step_1.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 1 (Drop categorical variables):")
print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))
# Before jumping into ordinal encoding, we'll investigate the dataset. Specifically, we'll look at the `'Condition2'` column. The code cell below prints the unique entries in both the training and validation sets.
print(
"Unique values in 'Condition2' column in training data:",
X_train["Condition2"].unique(),
)
print(
"\nUnique values in 'Condition2' column in validation data:",
X_valid["Condition2"].unique(),
)
# # Step 2: Ordinal encoding
# ### Part A
# If you now write code to:
# - fit an ordinal encoder to the training data, and then
# - use it to transform both the training and validation data,
# you'll get an error. Can you see why this is the case? (_You'll need to use the above output to answer this question._)
# Check your answer (Run this code cell to receive credit!)
step_2.a.check()
# step_2.a.hint()
# This is a common problem that you'll encounter with real-world data, and there are many approaches to fixing this issue. For instance, you can write a custom ordinal encoder to deal with new categories. The simplest approach, however, is to drop the problematic categorical columns.
# Run the code cell below to save the problematic columns to a Python list `bad_label_cols`. Likewise, columns that can be safely ordinal encoded are stored in `good_label_cols`.
# All categorical columns
object_cols = [col for col in X_train.columns if X_train[col].dtype == "object"]
# Columns that can be safely ordinal encoded
good_label_cols = [
col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))
]
# Problematic columns that will be dropped from the dataset
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print("Categorical columns that will be ordinal encoded:", good_label_cols)
print("\nCategorical columns that will be dropped from the dataset:", bad_label_cols)
# ### Part B
# Use the next code cell to ordinal encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `label_X_train` and `label_X_valid`, respectively.
# - We have provided code below to drop the categorical columns in `bad_label_cols` from the dataset.
# - You should ordinal encode the categorical columns in `good_label_cols`.
from sklearn.preprocessing import OrdinalEncoder
# Drop categorical columns that will not be encoded
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# Apply ordinal encoder
____ # Your code here
# Check your answer
step_2.b.check()
# Lines below will give you a hint or solution code
# step_2.b.hint()
# step_2.b.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 2 (Ordinal Encoding):")
print(score_dataset(label_X_train, label_X_valid, y_train, y_valid))
# So far, you've tried two different approaches to dealing with categorical variables. And, you've seen that encoding categorical data yields better results than removing columns from the dataset.
# Soon, you'll try one-hot encoding. Before then, there's one additional topic we need to cover. Begin by running the next code cell without changes.
# Get number of unique entries in each column with categorical data
object_nunique = list(map(lambda col: X_train[col].nunique(), object_cols))
d = dict(zip(object_cols, object_nunique))
# Print number of unique entries by column, in ascending order
sorted(d.items(), key=lambda x: x[1])
# # Step 3: Investigating cardinality
# ### Part A
# The output above shows, for each column with categorical data, the number of unique values in the column. For instance, the `'Street'` column in the training data has two unique values: `'Grvl'` and `'Pave'`, corresponding to a gravel road and a paved road, respectively.
# We refer to the number of unique entries of a categorical variable as the **cardinality** of that categorical variable. For instance, the `'Street'` variable has cardinality 2.
# Use the output above to answer the questions below.
# Fill in the line below: How many categorical variables in the training data
# have cardinality greater than 10?
high_cardinality_numcols = ____
# Fill in the line below: How many columns are needed to one-hot encode the
# 'Neighborhood' variable in the training data?
num_cols_neighborhood = ____
# Check your answers
step_3.a.check()
# Lines below will give you a hint or solution code
# step_3.a.hint()
# step_3.a.solution()
# ### Part B
# For large datasets with many rows, one-hot encoding can greatly expand the size of the dataset. For this reason, we typically will only one-hot encode columns with relatively low cardinality. Then, high cardinality columns can either be dropped from the dataset, or we can use ordinal encoding.
# As an example, consider a dataset with 10,000 rows, and containing one categorical column with 100 unique entries.
# - If this column is replaced with the corresponding one-hot encoding, how many entries are added to the dataset?
# - If we instead replace the column with the ordinal encoding, how many entries are added?
# Use your answers to fill in the lines below.
# Fill in the line below: How many entries are added to the dataset by
# replacing the column with a one-hot encoding?
OH_entries_added = ____
# Fill in the line below: How many entries are added to the dataset by
# replacing the column with an ordinal encoding?
label_entries_added = ____
# Check your answers
step_3.b.check()
# Lines below will give you a hint or solution code
# step_3.b.hint()
# step_3.b.solution()
# Next, you'll experiment with one-hot encoding. But, instead of encoding all of the categorical variables in the dataset, you'll only create a one-hot encoding for columns with cardinality less than 10.
# Run the code cell below without changes to set `low_cardinality_cols` to a Python list containing the columns that will be one-hot encoded. Likewise, `high_cardinality_cols` contains a list of categorical columns that will be dropped from the dataset.
# Columns that will be one-hot encoded
low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10]
# Columns that will be dropped from the dataset
high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols))
print("Categorical columns that will be one-hot encoded:", low_cardinality_cols)
print(
"\nCategorical columns that will be dropped from the dataset:",
high_cardinality_cols,
)
# # Step 4: One-hot encoding
# Use the next code cell to one-hot encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `OH_X_train` and `OH_X_valid`, respectively.
# - The full list of categorical columns in the dataset can be found in the Python list `object_cols`.
# - You should only one-hot encode the categorical columns in `low_cardinality_cols`. All other categorical columns should be dropped from the dataset.
from sklearn.preprocessing import OneHotEncoder
# Use as many lines of code as you need!
OH_X_train = ____ # Your code here
OH_X_valid = ____ # Your code here
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
# step_4.hint()
# step_4.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 3 (One-Hot Encoding):")
print(score_dataset(OH_X_train, OH_X_valid, y_train, y_valid))
# # Generate test predictions and submit your results
# After you complete Step 4, if you'd like to use what you've learned to submit your results to the leaderboard, you'll need to preprocess the test data before generating predictions.
# **This step is completely optional, and you do not need to submit results to the leaderboard to successfully complete the exercise.**
# Check out the previous exercise if you need help with remembering how to [join the competition](https://www.kaggle.com/c/home-data-for-ml-course) or save your results to CSV. Once you have generated a file with your results, follow the instructions below:
# 1. Begin by clicking on the **Save Version** button in the top right corner of the window. This will generate a pop-up window.
# 2. Ensure that the **Save and Run All** option is selected, and then click on the **Save** button.
# 3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
# 4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
# You have now successfully submitted to the competition!
# If you want to keep working to improve your performance, select the **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
#
# (Optional) Your code here
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270894.ipynb
| null | null |
[{"Id": 69270894, "ScriptId": 18910986, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5708358, "CreationDate": "07/28/2021 23:28:34", "VersionNumber": 1.0, "Title": "Exercise: Categorical Variables", "EvaluationDate": "07/28/2021", "IsChange": false, "TotalLines": 281.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 281.0, "LinesInsertedFromFork": 0.0, "LinesDeletedFromFork": 0.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 281.0, "TotalVotes": 0}]
| null | null | null | null |
# **This notebook is an exercise in the [Intermediate Machine Learning](https://www.kaggle.com/learn/intermediate-machine-learning) course. You can reference the tutorial at [this link](https://www.kaggle.com/alexisbcook/categorical-variables).**
# ---
# By encoding **categorical variables**, you'll obtain your best results thus far!
# # Setup
# The questions below will give you feedback on your work. Run the following cell to set up the feedback system.
# Set up code checking
import os
if not os.path.exists("../input/train.csv"):
os.symlink("../input/home-data-for-ml-course/train.csv", "../input/train.csv")
os.symlink("../input/home-data-for-ml-course/test.csv", "../input/test.csv")
from learntools.core import binder
binder.bind(globals())
from learntools.ml_intermediate.ex3 import *
print("Setup Complete")
# In this exercise, you will work with data from the [Housing Prices Competition for Kaggle Learn Users](https://www.kaggle.com/c/home-data-for-ml-course).
# 
# Run the next code cell without changes to load the training and validation sets in `X_train`, `X_valid`, `y_train`, and `y_valid`. The test set is loaded in `X_test`.
import pandas as pd
from sklearn.model_selection import train_test_split
# Read the data
X = pd.read_csv("../input/train.csv", index_col="Id")
X_test = pd.read_csv("../input/test.csv", index_col="Id")
# Remove rows with missing target, separate target from predictors
X.dropna(axis=0, subset=["SalePrice"], inplace=True)
y = X.SalePrice
X.drop(["SalePrice"], axis=1, inplace=True)
# To keep things simple, we'll drop columns with missing values
cols_with_missing = [col for col in X.columns if X[col].isnull().any()]
X.drop(cols_with_missing, axis=1, inplace=True)
X_test.drop(cols_with_missing, axis=1, inplace=True)
# Break off validation set from training data
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, train_size=0.8, test_size=0.2, random_state=0
)
# Use the next code cell to print the first five rows of the data.
X_train.head()
# Notice that the dataset contains both numerical and categorical variables. You'll need to encode the categorical data before training a model.
# To compare different models, you'll use the same `score_dataset()` function from the tutorial. This function reports the [mean absolute error](https://en.wikipedia.org/wiki/Mean_absolute_error) (MAE) from a random forest model.
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error
# function for comparing different approaches
def score_dataset(X_train, X_valid, y_train, y_valid):
model = RandomForestRegressor(n_estimators=100, random_state=0)
model.fit(X_train, y_train)
preds = model.predict(X_valid)
return mean_absolute_error(y_valid, preds)
# # Step 1: Drop columns with categorical data
# You'll get started with the most straightforward approach. Use the code cell below to preprocess the data in `X_train` and `X_valid` to remove columns with categorical data. Set the preprocessed DataFrames to `drop_X_train` and `drop_X_valid`, respectively.
# Fill in the lines below: drop columns in training and validation data
drop_X_train = ____
drop_X_valid = ____
# Check your answers
step_1.check()
# Lines below will give you a hint or solution code
# step_1.hint()
# step_1.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 1 (Drop categorical variables):")
print(score_dataset(drop_X_train, drop_X_valid, y_train, y_valid))
# Before jumping into ordinal encoding, we'll investigate the dataset. Specifically, we'll look at the `'Condition2'` column. The code cell below prints the unique entries in both the training and validation sets.
print(
"Unique values in 'Condition2' column in training data:",
X_train["Condition2"].unique(),
)
print(
"\nUnique values in 'Condition2' column in validation data:",
X_valid["Condition2"].unique(),
)
# # Step 2: Ordinal encoding
# ### Part A
# If you now write code to:
# - fit an ordinal encoder to the training data, and then
# - use it to transform both the training and validation data,
# you'll get an error. Can you see why this is the case? (_You'll need to use the above output to answer this question._)
# Check your answer (Run this code cell to receive credit!)
step_2.a.check()
# step_2.a.hint()
# This is a common problem that you'll encounter with real-world data, and there are many approaches to fixing this issue. For instance, you can write a custom ordinal encoder to deal with new categories. The simplest approach, however, is to drop the problematic categorical columns.
# Run the code cell below to save the problematic columns to a Python list `bad_label_cols`. Likewise, columns that can be safely ordinal encoded are stored in `good_label_cols`.
# All categorical columns
object_cols = [col for col in X_train.columns if X_train[col].dtype == "object"]
# Columns that can be safely ordinal encoded
good_label_cols = [
col for col in object_cols if set(X_valid[col]).issubset(set(X_train[col]))
]
# Problematic columns that will be dropped from the dataset
bad_label_cols = list(set(object_cols) - set(good_label_cols))
print("Categorical columns that will be ordinal encoded:", good_label_cols)
print("\nCategorical columns that will be dropped from the dataset:", bad_label_cols)
# ### Part B
# Use the next code cell to ordinal encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `label_X_train` and `label_X_valid`, respectively.
# - We have provided code below to drop the categorical columns in `bad_label_cols` from the dataset.
# - You should ordinal encode the categorical columns in `good_label_cols`.
from sklearn.preprocessing import OrdinalEncoder
# Drop categorical columns that will not be encoded
label_X_train = X_train.drop(bad_label_cols, axis=1)
label_X_valid = X_valid.drop(bad_label_cols, axis=1)
# Apply ordinal encoder
____ # Your code here
# Check your answer
step_2.b.check()
# Lines below will give you a hint or solution code
# step_2.b.hint()
# step_2.b.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 2 (Ordinal Encoding):")
print(score_dataset(label_X_train, label_X_valid, y_train, y_valid))
# So far, you've tried two different approaches to dealing with categorical variables. And, you've seen that encoding categorical data yields better results than removing columns from the dataset.
# Soon, you'll try one-hot encoding. Before then, there's one additional topic we need to cover. Begin by running the next code cell without changes.
# Get number of unique entries in each column with categorical data
object_nunique = list(map(lambda col: X_train[col].nunique(), object_cols))
d = dict(zip(object_cols, object_nunique))
# Print number of unique entries by column, in ascending order
sorted(d.items(), key=lambda x: x[1])
# # Step 3: Investigating cardinality
# ### Part A
# The output above shows, for each column with categorical data, the number of unique values in the column. For instance, the `'Street'` column in the training data has two unique values: `'Grvl'` and `'Pave'`, corresponding to a gravel road and a paved road, respectively.
# We refer to the number of unique entries of a categorical variable as the **cardinality** of that categorical variable. For instance, the `'Street'` variable has cardinality 2.
# Use the output above to answer the questions below.
# Fill in the line below: How many categorical variables in the training data
# have cardinality greater than 10?
high_cardinality_numcols = ____
# Fill in the line below: How many columns are needed to one-hot encode the
# 'Neighborhood' variable in the training data?
num_cols_neighborhood = ____
# Check your answers
step_3.a.check()
# Lines below will give you a hint or solution code
# step_3.a.hint()
# step_3.a.solution()
# ### Part B
# For large datasets with many rows, one-hot encoding can greatly expand the size of the dataset. For this reason, we typically will only one-hot encode columns with relatively low cardinality. Then, high cardinality columns can either be dropped from the dataset, or we can use ordinal encoding.
# As an example, consider a dataset with 10,000 rows, and containing one categorical column with 100 unique entries.
# - If this column is replaced with the corresponding one-hot encoding, how many entries are added to the dataset?
# - If we instead replace the column with the ordinal encoding, how many entries are added?
# Use your answers to fill in the lines below.
# Fill in the line below: How many entries are added to the dataset by
# replacing the column with a one-hot encoding?
OH_entries_added = ____
# Fill in the line below: How many entries are added to the dataset by
# replacing the column with an ordinal encoding?
label_entries_added = ____
# Check your answers
step_3.b.check()
# Lines below will give you a hint or solution code
# step_3.b.hint()
# step_3.b.solution()
# Next, you'll experiment with one-hot encoding. But, instead of encoding all of the categorical variables in the dataset, you'll only create a one-hot encoding for columns with cardinality less than 10.
# Run the code cell below without changes to set `low_cardinality_cols` to a Python list containing the columns that will be one-hot encoded. Likewise, `high_cardinality_cols` contains a list of categorical columns that will be dropped from the dataset.
# Columns that will be one-hot encoded
low_cardinality_cols = [col for col in object_cols if X_train[col].nunique() < 10]
# Columns that will be dropped from the dataset
high_cardinality_cols = list(set(object_cols) - set(low_cardinality_cols))
print("Categorical columns that will be one-hot encoded:", low_cardinality_cols)
print(
"\nCategorical columns that will be dropped from the dataset:",
high_cardinality_cols,
)
# # Step 4: One-hot encoding
# Use the next code cell to one-hot encode the data in `X_train` and `X_valid`. Set the preprocessed DataFrames to `OH_X_train` and `OH_X_valid`, respectively.
# - The full list of categorical columns in the dataset can be found in the Python list `object_cols`.
# - You should only one-hot encode the categorical columns in `low_cardinality_cols`. All other categorical columns should be dropped from the dataset.
from sklearn.preprocessing import OneHotEncoder
# Use as many lines of code as you need!
OH_X_train = ____ # Your code here
OH_X_valid = ____ # Your code here
# Check your answer
step_4.check()
# Lines below will give you a hint or solution code
# step_4.hint()
# step_4.solution()
# Run the next code cell to get the MAE for this approach.
print("MAE from Approach 3 (One-Hot Encoding):")
print(score_dataset(OH_X_train, OH_X_valid, y_train, y_valid))
# # Generate test predictions and submit your results
# After you complete Step 4, if you'd like to use what you've learned to submit your results to the leaderboard, you'll need to preprocess the test data before generating predictions.
# **This step is completely optional, and you do not need to submit results to the leaderboard to successfully complete the exercise.**
# Check out the previous exercise if you need help with remembering how to [join the competition](https://www.kaggle.com/c/home-data-for-ml-course) or save your results to CSV. Once you have generated a file with your results, follow the instructions below:
# 1. Begin by clicking on the **Save Version** button in the top right corner of the window. This will generate a pop-up window.
# 2. Ensure that the **Save and Run All** option is selected, and then click on the **Save** button.
# 3. This generates a window in the bottom left corner of the notebook. After it has finished running, click on the number to the right of the **Save Version** button. This pulls up a list of versions on the right of the screen. Click on the ellipsis **(...)** to the right of the most recent version, and select **Open in Viewer**. This brings you into view mode of the same page. You will need to scroll down to get back to these instructions.
# 4. Click on the **Output** tab on the right of the screen. Then, click on the file you would like to submit, and click on the blue **Submit** button to submit your results to the leaderboard.
# You have now successfully submitted to the competition!
# If you want to keep working to improve your performance, select the **Edit** button in the top right of the screen. Then you can change your code and repeat the process. There's a lot of room to improve, and you will climb up the leaderboard as you work.
#
# (Optional) Your code here
| false | 0 | 3,440 | 0 | 3,440 | 3,440 |
||
69270504
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
display(train_df.head())
print(train_df.shape)
print("#" * 50)
display(test_df.head())
print(test_df.shape)
display(train_df.info())
display(test_df.info())
train_df.groupby("Survived").hist(figsize=(9, 9))
fig, ax = plt.subplots(figsize=(15, 5))
corr_matrix = train_df.corr().abs()
sns.heatmap(corr_matrix, annot=True, linewidths=0.5)
train_df["Ticket"].unique()
# train_df Cabin
train_df = train_df.drop(["Ticket", "Name"], axis=1)
test_df = test_df.drop(["Ticket", "Name"], axis=1)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df["CabinNew"] = train_df["Cabin"].notna()
test_df["CabinNew"] = test_df["Cabin"].notna()
# pd.isna(train_df["Cabin"])
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df = train_df.drop(["Cabin"], axis=1)
test_df = test_df.drop(["Cabin"], axis=1)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
print(train_df.isnull().sum())
print(test_df.isnull().sum())
train_df["Age"] = train_df["Age"].fillna(train_df["Age"].mean())
train_df["Embarked"] = train_df["Embarked"].fillna("S")
test_df["Age"] = test_df["Age"].fillna(train_df["Age"].mean())
test_df["Embarked"] = test_df["Embarked"].fillna("S")
test_df["Fare"].fillna(test_df["Fare"].dropna().median(), inplace=True)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df["Sex"] = train_df.Sex.map({"male": 0, "female": 1})
train_df["Embarked"] = train_df.Embarked.map({"S": 0, "C": 1, "Q": 2})
test_df["Sex"] = test_df.Sex.map({"male": 0, "female": 1})
test_df["Embarked"] = test_df.Embarked.map({"S": 0, "C": 1, "Q": 2})
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df # .drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# from sklearn.model_selection import train_test_split
# X_train,X_val, y_train, y_val = train_test_split(X_train,Y_train, test_size=0.2, random_state=42,shuffle=True) # Try adding `stratify` here
X_train["CabinNew"] = X_train["CabinNew"].astype(int)
X_train.info()
# Logistic Regression
# logreg = LogisticRegression()
# logreg.fit(X_train, Y_train)
# # Y_pred = logreg.predict(X_test)
# # acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
# # acc_log
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
acc_svc
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, Y_train)
Y_pred_rf = model.predict(X_test)
acc_rf = round(model.score(X_train, Y_train) * 100, 2)
acc_rf
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, Y_train)
Y_pred_KNN = model.predict(X_test)
accKNN = round(model.score(X_train, Y_train) * 100, 2)
accKNN
test_df
out_df = pd.DataFrame()
out_df["PassengerId"] = test_df["PassengerId"]
out_df["Survived"] = pd.DataFrame(Y_pred_rf)
out_df.to_csv("submission.csv")
out_df
# print("The accuracy of the classifier on the validation set is ", (logreg.score(X_val, y_val)))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270504.ipynb
| null | null |
[{"Id": 69270504, "ScriptId": 18886136, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7822265, "CreationDate": "07/28/2021 23:17:52", "VersionNumber": 1.0, "Title": "notebook403056d04d", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 159.0, "LinesInsertedFromPrevious": 159.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import os
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.impute import SimpleImputer
# machine learning
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC, LinearSVC
train_df = pd.read_csv("/kaggle/input/titanic/train.csv")
test_df = pd.read_csv("/kaggle/input/titanic/test.csv")
display(train_df.head())
print(train_df.shape)
print("#" * 50)
display(test_df.head())
print(test_df.shape)
display(train_df.info())
display(test_df.info())
train_df.groupby("Survived").hist(figsize=(9, 9))
fig, ax = plt.subplots(figsize=(15, 5))
corr_matrix = train_df.corr().abs()
sns.heatmap(corr_matrix, annot=True, linewidths=0.5)
train_df["Ticket"].unique()
# train_df Cabin
train_df = train_df.drop(["Ticket", "Name"], axis=1)
test_df = test_df.drop(["Ticket", "Name"], axis=1)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df["CabinNew"] = train_df["Cabin"].notna()
test_df["CabinNew"] = test_df["Cabin"].notna()
# pd.isna(train_df["Cabin"])
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df = train_df.drop(["Cabin"], axis=1)
test_df = test_df.drop(["Cabin"], axis=1)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
print(train_df.isnull().sum())
print(test_df.isnull().sum())
train_df["Age"] = train_df["Age"].fillna(train_df["Age"].mean())
train_df["Embarked"] = train_df["Embarked"].fillna("S")
test_df["Age"] = test_df["Age"].fillna(train_df["Age"].mean())
test_df["Embarked"] = test_df["Embarked"].fillna("S")
test_df["Fare"].fillna(test_df["Fare"].dropna().median(), inplace=True)
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
train_df["Sex"] = train_df.Sex.map({"male": 0, "female": 1})
train_df["Embarked"] = train_df.Embarked.map({"S": 0, "C": 1, "Q": 2})
test_df["Sex"] = test_df.Sex.map({"male": 0, "female": 1})
test_df["Embarked"] = test_df.Embarked.map({"S": 0, "C": 1, "Q": 2})
display(train_df.head())
print(train_df.info())
display(test_df.head())
print(test_df.info())
X_train = train_df.drop("Survived", axis=1)
Y_train = train_df["Survived"]
X_test = test_df # .drop("PassengerId", axis=1).copy()
X_train.shape, Y_train.shape, X_test.shape
# from sklearn.model_selection import train_test_split
# X_train,X_val, y_train, y_val = train_test_split(X_train,Y_train, test_size=0.2, random_state=42,shuffle=True) # Try adding `stratify` here
X_train["CabinNew"] = X_train["CabinNew"].astype(int)
X_train.info()
# Logistic Regression
# logreg = LogisticRegression()
# logreg.fit(X_train, Y_train)
# # Y_pred = logreg.predict(X_test)
# # acc_log = round(logreg.score(X_train, Y_train) * 100, 2)
# # acc_log
# Support Vector Machines
svc = SVC()
svc.fit(X_train, Y_train)
Y_pred = svc.predict(X_test)
acc_svc = round(svc.score(X_train, Y_train) * 100, 2)
acc_svc
from sklearn.ensemble import RandomForestClassifier
model = RandomForestClassifier()
model.fit(X_train, Y_train)
Y_pred_rf = model.predict(X_test)
acc_rf = round(model.score(X_train, Y_train) * 100, 2)
acc_rf
from sklearn.neighbors import KNeighborsClassifier
model = KNeighborsClassifier(n_neighbors=3)
model.fit(X_train, Y_train)
Y_pred_KNN = model.predict(X_test)
accKNN = round(model.score(X_train, Y_train) * 100, 2)
accKNN
test_df
out_df = pd.DataFrame()
out_df["PassengerId"] = test_df["PassengerId"]
out_df["Survived"] = pd.DataFrame(Y_pred_rf)
out_df.to_csv("submission.csv")
out_df
# print("The accuracy of the classifier on the validation set is ", (logreg.score(X_val, y_val)))
| false | 0 | 1,611 | 0 | 1,611 | 1,611 |
||
69270064
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
# Reading Data from CSV file
x = pd.read_csv("../input/neolen-house-price-prediction/train.csv")
y = pd.read_csv("../input/neolen-house-price-prediction/test.csv")
# z = pd.read_csv("C:\\Users\\hp\\Desktop\\House price prediction\\sample_submission.csv")
x.info()
x.head()
x
display(x.describe())
print(x.shape)
y.describe()
x.SalePrice.hist(bins=50, rwidth=0.6, figsize=(20, 10))
plt.title("Price of the Houses")
plt.show()
plt.hist(x["SalePrice"], color="green")
sns.distplot(x["SalePrice"], color="red")
sns.pairplot(
x[
[
"SalePrice",
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
],
diag_kind="kde",
)
# sns.heatmap(x.corr(), annot = True)
x.isnull().any()
x.isnull().sum()
nullV = x.isnull().sum().sort_values(ascending=False)
nullV
Y = x[["SalePrice"]]
x = x.drop(
["SalePrice", "Alley", "PoolQC", "Fence", "MiscFeature", "FireplaceQu"], axis=1
)
y.isnull().sum()
y = y.drop(["Alley", "PoolQC", "Fence", "MiscFeature", "FireplaceQu"], axis=1)
# Looking Into the Data
print("Our training dataset has {} rows and {} columns".format(x.shape[0], x.shape[1]))
print("Our Test data has {} rows and {} columns".format(y.shape[0], y.shape[1]))
# x_object = x.select_dtypes('object')
# display(x_object.columns)
# display(x_object.shape)
# x_num = x.select_dtypes(exclude=['object'])
# display(x_num.columns)
# display(x_num.shape)
# x_object = pd.get_dummies(x_object)
# x_object.head()
x_enc = pd.get_dummies(x)
x_enc
x_enc.columns
y_enc = pd.get_dummies(y)
y_enc
y_enc.columns
print(x_enc.isnull().sum())
# using sklearn "simpleimputer" to get rid of the null values using most_frequent strategy
from sklearn.impute import SimpleImputer
# initiate an Imputer
imputer = SimpleImputer(missing_values=np.nan, strategy="median")
# start imputing
x_imputed = imputer.fit_transform(x_enc)
# convert to dataframe again
x_imputed = pd.DataFrame(x_imputed, columns=x_enc.columns)
x_imputed
# using sklearn "simpleimputer" to get rid of the null values using most_frequent strategy
from sklearn.impute import SimpleImputer
# initiate an Imputer
imputer = SimpleImputer(missing_values=np.nan, strategy="median")
# start imputing
y_imputed = imputer.fit_transform(y_enc)
# convert to dataframe again
y_imputed = pd.DataFrame(y_imputed, columns=y_enc.columns)
y_imputed
# check if there is null values
x_imputed.isnull().sum()
# check if there is null values
y_imputed.isnull().sum()
# fig, ax = plt.subplots(figsize=(20,15))
# corr_matrix = x_enc.corr().abs()
# sns.heatmap(corr_matrix, annot=True, linewidths=.5)
# Splitting data
Xx = x_imputed[
[
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
].astype(float)
from sklearn.preprocessing import StandardScaler
# Create the scaler
s = StandardScaler()
# Scaled data
X_imputed = s.fit_transform(Xx)
# #create a dataframe
x_col = list(Xx.columns.values)
X_imputedd = pd.DataFrame(X_imputed, columns=x_col).astype(float)
X_imputedd
# Splitting data
Yy = y_imputed[
[
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
].astype(float)
from sklearn.preprocessing import StandardScaler
# Create the scaler
s = StandardScaler()
# Scaled data
Y_imputed = s.fit_transform(Yy)
# #create a dataframe
y_col = list(Yy.columns.values)
Y_imputedd = pd.DataFrame(Y_imputed, columns=y_col).astype(float)
# Y_imputedd = pd.DataFrame(Y_imputed, columns = ["LotArea", "YearBuilt", "1stFlrSF", "2ndFlrSF", "FullBath", "BedroomAbvGr", "TotRmsAbvGrd"])
Y_imputedd
X_imputedd.head()
Y_imputedd.head()
# corr_matrix = X_imputedd.corr().abs()
# upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape),k=1).astype(np.bool))
# to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)]
# X_imputedd = X_imputedd.drop(X_imputedd.columns[to_drop], axis=1)
# using test_train_split from sklearn to split data to "train" and "test"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X_imputedd, Y, train_size=0.7, test_size=0.3, random_state=0
)
x_train.head()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train, y_train)
r_sq = model.score(x_train, y_train)
print(r_sq * 100, "%")
print("intercept:", model.intercept_)
print("slope:", model.coef_)
print("=" * 50)
# How to use your model for pridection
y_pred = model.predict(x_test)
y_pred
from sklearn.metrics import mean_squared_error
from math import sqrt
RMSE = sqrt(mean_squared_error(y_test, y_pred))
RMSE
plt.scatter(y_test, y_pred)
y_pred = model.predict(Y_imputedd)
y_pred
y_pred = np.squeeze(y_pred)
y_pred.shape
# # Save test predictions to file
output = pd.DataFrame({"Id": y.Id, "SalePrice": y_pred})
output.to_csv("submissions3.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270064.ipynb
| null | null |
[{"Id": 69270064, "ScriptId": 18868941, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7810526, "CreationDate": "07/28/2021 23:03:54", "VersionNumber": 4.0, "Title": "House Price Final", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 200.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 192.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# plt.style.use('ggplot')
import seaborn as sns
from sklearn.preprocessing import LabelEncoder
# Reading Data from CSV file
x = pd.read_csv("../input/neolen-house-price-prediction/train.csv")
y = pd.read_csv("../input/neolen-house-price-prediction/test.csv")
# z = pd.read_csv("C:\\Users\\hp\\Desktop\\House price prediction\\sample_submission.csv")
x.info()
x.head()
x
display(x.describe())
print(x.shape)
y.describe()
x.SalePrice.hist(bins=50, rwidth=0.6, figsize=(20, 10))
plt.title("Price of the Houses")
plt.show()
plt.hist(x["SalePrice"], color="green")
sns.distplot(x["SalePrice"], color="red")
sns.pairplot(
x[
[
"SalePrice",
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
],
diag_kind="kde",
)
# sns.heatmap(x.corr(), annot = True)
x.isnull().any()
x.isnull().sum()
nullV = x.isnull().sum().sort_values(ascending=False)
nullV
Y = x[["SalePrice"]]
x = x.drop(
["SalePrice", "Alley", "PoolQC", "Fence", "MiscFeature", "FireplaceQu"], axis=1
)
y.isnull().sum()
y = y.drop(["Alley", "PoolQC", "Fence", "MiscFeature", "FireplaceQu"], axis=1)
# Looking Into the Data
print("Our training dataset has {} rows and {} columns".format(x.shape[0], x.shape[1]))
print("Our Test data has {} rows and {} columns".format(y.shape[0], y.shape[1]))
# x_object = x.select_dtypes('object')
# display(x_object.columns)
# display(x_object.shape)
# x_num = x.select_dtypes(exclude=['object'])
# display(x_num.columns)
# display(x_num.shape)
# x_object = pd.get_dummies(x_object)
# x_object.head()
x_enc = pd.get_dummies(x)
x_enc
x_enc.columns
y_enc = pd.get_dummies(y)
y_enc
y_enc.columns
print(x_enc.isnull().sum())
# using sklearn "simpleimputer" to get rid of the null values using most_frequent strategy
from sklearn.impute import SimpleImputer
# initiate an Imputer
imputer = SimpleImputer(missing_values=np.nan, strategy="median")
# start imputing
x_imputed = imputer.fit_transform(x_enc)
# convert to dataframe again
x_imputed = pd.DataFrame(x_imputed, columns=x_enc.columns)
x_imputed
# using sklearn "simpleimputer" to get rid of the null values using most_frequent strategy
from sklearn.impute import SimpleImputer
# initiate an Imputer
imputer = SimpleImputer(missing_values=np.nan, strategy="median")
# start imputing
y_imputed = imputer.fit_transform(y_enc)
# convert to dataframe again
y_imputed = pd.DataFrame(y_imputed, columns=y_enc.columns)
y_imputed
# check if there is null values
x_imputed.isnull().sum()
# check if there is null values
y_imputed.isnull().sum()
# fig, ax = plt.subplots(figsize=(20,15))
# corr_matrix = x_enc.corr().abs()
# sns.heatmap(corr_matrix, annot=True, linewidths=.5)
# Splitting data
Xx = x_imputed[
[
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
].astype(float)
from sklearn.preprocessing import StandardScaler
# Create the scaler
s = StandardScaler()
# Scaled data
X_imputed = s.fit_transform(Xx)
# #create a dataframe
x_col = list(Xx.columns.values)
X_imputedd = pd.DataFrame(X_imputed, columns=x_col).astype(float)
X_imputedd
# Splitting data
Yy = y_imputed[
[
"LotArea",
"YearBuilt",
"1stFlrSF",
"2ndFlrSF",
"FullBath",
"BedroomAbvGr",
"TotRmsAbvGrd",
]
].astype(float)
from sklearn.preprocessing import StandardScaler
# Create the scaler
s = StandardScaler()
# Scaled data
Y_imputed = s.fit_transform(Yy)
# #create a dataframe
y_col = list(Yy.columns.values)
Y_imputedd = pd.DataFrame(Y_imputed, columns=y_col).astype(float)
# Y_imputedd = pd.DataFrame(Y_imputed, columns = ["LotArea", "YearBuilt", "1stFlrSF", "2ndFlrSF", "FullBath", "BedroomAbvGr", "TotRmsAbvGrd"])
Y_imputedd
X_imputedd.head()
Y_imputedd.head()
# corr_matrix = X_imputedd.corr().abs()
# upper_tri = corr_matrix.where(np.triu(np.ones(corr_matrix.shape),k=1).astype(np.bool))
# to_drop = [column for column in upper_tri.columns if any(upper_tri[column] > 0.95)]
# X_imputedd = X_imputedd.drop(X_imputedd.columns[to_drop], axis=1)
# using test_train_split from sklearn to split data to "train" and "test"
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
X_imputedd, Y, train_size=0.7, test_size=0.3, random_state=0
)
x_train.head()
from sklearn.linear_model import LinearRegression
model = LinearRegression()
model.fit(x_train, y_train)
r_sq = model.score(x_train, y_train)
print(r_sq * 100, "%")
print("intercept:", model.intercept_)
print("slope:", model.coef_)
print("=" * 50)
# How to use your model for pridection
y_pred = model.predict(x_test)
y_pred
from sklearn.metrics import mean_squared_error
from math import sqrt
RMSE = sqrt(mean_squared_error(y_test, y_pred))
RMSE
plt.scatter(y_test, y_pred)
y_pred = model.predict(Y_imputedd)
y_pred
y_pred = np.squeeze(y_pred)
y_pred.shape
# # Save test predictions to file
output = pd.DataFrame({"Id": y.Id, "SalePrice": y_pred})
output.to_csv("submissions3.csv", index=False)
| false | 0 | 2,032 | 0 | 2,032 | 2,032 |
||
69270255
|
<jupyter_start><jupyter_text>Malware
Kaggle dataset identifier: malware
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this section we use a labeled malware dataset.
# The malware dataset contains features extracted from the following:
# 41,323 Windows binaries (executables .exe and .dlls), as legitimate files.
# 96,724 malware files downloaded from the VirusShare website. So, the dataset
# contains 138,048 lines, in total.
malware_dataset = pd.read_csv("../input/t1dataset/malware.csv", sep="|")
legit_subset = malware_dataset[0:41323].drop(["legitimate"], axis=1)
malware_subset = malware_dataset[41323::].drop(["legitimate"], axis=1)
# To make sure that the dataset has loaded properly, let's print the number of important features:
print("The Number of important features is %i \n" % legit_subset.shape[1])
malware_dataset.head()
malware_dataset.tail()
# Added by Luiz
missing_data = pd.DataFrame(
{
"total_missing": malware_dataset.isnull().sum(),
"perc_missing": (malware_dataset.isnull().sum() / 138047) * 100,
}
)
missing_data.head(5)
# We don't have any missing data points as can be seen in the below table, so we will not need to
# remove rows or look further to address any related issues
# To improve the estimators' accuracy scores, we are going to use the
# sklearn.feature_selection module. This module is used in feature selection or
# dimensionality reduction in the dataset.
# To compute the features' importance, in our case, we are going to use tree-based feature
# selection. Load the sklearn.feature_selection module:
import sklearn
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
import matplotlib.pyplot as plt
data = malware_dataset.drop(["Name", "md5", "legitimate"], axis=1).values
target = malware_dataset["legitimate"].values
featselect = sklearn.ensemble.ExtraTreesClassifier().fit(data, target)
model = SelectFromModel(featselect, prefit=True)
data_new = model.transform(data)
print(data.shape)
print(data_new.shape)
# Feature importance - So, the algorithms has selected fifteen important features for us. To print them out, use the
# following commands:
import sklearn.ensemble as ske
features = data_new.shape[1]
index = np.argsort(ske.ExtraTreesClassifier().fit(data, target).feature_importances_)[
::-1
][:features]
for feat in range(features):
data_new_columns.append(malware_dataset.columns[2 + index[feat]])
# Added by Luiz
# This is an easier way to obtain the features that were selected by ExtraTreesClassifier
data_new_features = malware_dataset.drop(["Name", "md5", "legitimate"], axis=1).iloc[
:, index
]
print(data_new_features)
# From here we can proceed with preprocessing using the features selected by ExtraTreesClassifier
# For many of our features, the standard deviation is too high. The first process will be normalization to better understand the dataset's dynamics
data_new_features.describe()
# Dataset normalization
x = data_new_features.values # returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
data_new_features = pd.DataFrame(x_scaled, columns=data_new_features.columns)
# After normalization
data_new_features.describe()
# Outliers
num_cols = data_new_features.columns
plt.figure(figsize=(27, 9))
data_new_features[num_cols].boxplot()
plt.title("Numerical variables in dataset", fontsize=20)
plt.show()
from scipy import stats
# df[(np.abs(stats.zscore(df)) < 3).all(axis=1)]
# Removing outliers based on the zscore
data_new_features["legitimate"] = malware_dataset["legitimate"].values
df2 = data_new_features[(np.abs(stats.zscore(data_new_features)) < 3).all(axis=1)]
# Added by Luiz
legitimate_count = df2[df2.legitimate == 1].shape[0]
malware_count = df2[df2.legitimate == 0].shape[0]
legit_perc = legitimate_count / (legitimate_count + malware_count) * 100
malware_perc = malware_count / (legitimate_count + malware_count) * 100
print(
f"In the dataset {legit_perc:.2f}% is legitimate, {malware_perc:.2f}% is malware."
)
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter
# Using undersampling to balance the dataset
data = df2.drop(["legitimate"], axis=1).values
target = df2["legitimate"].values
under_sampler = RandomUnderSampler()
X_res, y_res = under_sampler.fit_resample(data, target)
df3 = pd.DataFrame(data=X_res, columns=df2.drop(["legitimate"], axis=1).columns)
df3["legitimate"] = y_res
print(f"Number of legitimate flows: {df3[df3.legitimate == 1].shape[0]}")
print(f"Number of malware flows: {df3[df3.legitimate == 0].shape[0]}")
Data_new = df3.drop(["legitimate"], axis=1).values
Target = df3["legitimate"].values
# Now, it is time to train our model with a random forest classifier.
# Legit_Train, Legit_Test, Malware_Train, Malware_Test = cross_validate.train_test_split(Data_new,
# Target ,test_size=0.2)
(
Legit_Train,
Legit_Test,
Malware_Train,
Malware_Test,
) = sklearn.model_selection.train_test_split(Data_new, Target, test_size=0.2)
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=50)
clf.fit(Legit_Train, Malware_Train)
score = clf.score(Legit_Test, Malware_Test)
print(score * 100)
print("The score of Random Forest is", score * 100)
from sklearn.metrics import confusion_matrix
Result = clf.predict(Legit_Test)
CM = confusion_matrix(Malware_Test, Result)
print("False positive rate : %f %%" % ((CM[0][1] / float(sum(CM[0]))) * 100))
print("False negative rate : %f %%" % ((CM[1][0] / float(sum(CM[1])) * 100)))
# To train the model with another classifier, redo the previous steps, but instead of choosing the
# random forest classifier, select a machine learning algorithm such as gradient-boosting:
Clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=50)
Clf.fit(Legit_Train, Malware_Train)
Score = Clf.score(Legit_Test, Malware_Test)
print(Score * 100)
print("The score of Gradient Boosting is", Score * 100)
# The following is the score using the AdaBoost classifier
Classifiers = {
"RandomForest": ske.RandomForestClassifier(n_estimators=50),
"GradientBoosting": ske.GradientBoostingClassifier(n_estimators=50),
"AdaBoost": ske.AdaBoostClassifier(n_estimators=100),
}
for Classif in Classifiers:
clf = Classifiers[Classif]
clf.fit(Legit_Train, Malware_Train)
score = clf.score(Legit_Test, Malware_Test)
print("%s : %f %%" % (Classif, score * 100))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270255.ipynb
|
malware
|
dscclass
|
[{"Id": 69270255, "ScriptId": 18729322, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7499525, "CreationDate": "07/28/2021 23:09:28", "VersionNumber": 3.0, "Title": "Task #1 - Malware-Exploratory", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 181.0, "LinesInsertedFromPrevious": 49.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 132.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92234224, "KernelVersionId": 69270255, "SourceDatasetVersionId": 2407360}]
|
[{"Id": 2407360, "DatasetId": 1456115, "DatasourceVersionId": 2449430, "CreatorUserId": 7458594, "LicenseName": "CC0: Public Domain", "CreationDate": "07/08/2021 16:55:02", "VersionNumber": 1.0, "Title": "Malware", "Slug": "malware", "Subtitle": "Only for teaching purpose", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1456115, "CreatorUserId": 7458594, "OwnerUserId": 7458594.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2407360.0, "CurrentDatasourceVersionId": 2449430.0, "ForumId": 1475689, "Type": 2, "CreationDate": "07/08/2021 16:55:02", "LastActivityDate": "07/08/2021", "TotalViews": 2004, "TotalDownloads": 160, "TotalVotes": 2, "TotalKernels": 7}]
|
[{"Id": 7458594, "UserName": "dscclass", "DisplayName": "DSC-class", "RegisterDate": "05/19/2021", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# In this section we use a labeled malware dataset.
# The malware dataset contains features extracted from the following:
# 41,323 Windows binaries (executables .exe and .dlls), as legitimate files.
# 96,724 malware files downloaded from the VirusShare website. So, the dataset
# contains 138,048 lines, in total.
malware_dataset = pd.read_csv("../input/t1dataset/malware.csv", sep="|")
legit_subset = malware_dataset[0:41323].drop(["legitimate"], axis=1)
malware_subset = malware_dataset[41323::].drop(["legitimate"], axis=1)
# To make sure that the dataset has loaded properly, let's print the number of important features:
print("The Number of important features is %i \n" % legit_subset.shape[1])
malware_dataset.head()
malware_dataset.tail()
# Added by Luiz
missing_data = pd.DataFrame(
{
"total_missing": malware_dataset.isnull().sum(),
"perc_missing": (malware_dataset.isnull().sum() / 138047) * 100,
}
)
missing_data.head(5)
# We don't have any missing data points as can be seen in the below table, so we will not need to
# remove rows or look further to address any related issues
# To improve the estimators' accuracy scores, we are going to use the
# sklearn.feature_selection module. This module is used in feature selection or
# dimensionality reduction in the dataset.
# To compute the features' importance, in our case, we are going to use tree-based feature
# selection. Load the sklearn.feature_selection module:
import sklearn
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_validate
import matplotlib.pyplot as plt
data = malware_dataset.drop(["Name", "md5", "legitimate"], axis=1).values
target = malware_dataset["legitimate"].values
featselect = sklearn.ensemble.ExtraTreesClassifier().fit(data, target)
model = SelectFromModel(featselect, prefit=True)
data_new = model.transform(data)
print(data.shape)
print(data_new.shape)
# Feature importance - So, the algorithms has selected fifteen important features for us. To print them out, use the
# following commands:
import sklearn.ensemble as ske
features = data_new.shape[1]
index = np.argsort(ske.ExtraTreesClassifier().fit(data, target).feature_importances_)[
::-1
][:features]
for feat in range(features):
data_new_columns.append(malware_dataset.columns[2 + index[feat]])
# Added by Luiz
# This is an easier way to obtain the features that were selected by ExtraTreesClassifier
data_new_features = malware_dataset.drop(["Name", "md5", "legitimate"], axis=1).iloc[
:, index
]
print(data_new_features)
# From here we can proceed with preprocessing using the features selected by ExtraTreesClassifier
# For many of our features, the standard deviation is too high. The first process will be normalization to better understand the dataset's dynamics
data_new_features.describe()
# Dataset normalization
x = data_new_features.values # returns a numpy array
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
data_new_features = pd.DataFrame(x_scaled, columns=data_new_features.columns)
# After normalization
data_new_features.describe()
# Outliers
num_cols = data_new_features.columns
plt.figure(figsize=(27, 9))
data_new_features[num_cols].boxplot()
plt.title("Numerical variables in dataset", fontsize=20)
plt.show()
from scipy import stats
# df[(np.abs(stats.zscore(df)) < 3).all(axis=1)]
# Removing outliers based on the zscore
data_new_features["legitimate"] = malware_dataset["legitimate"].values
df2 = data_new_features[(np.abs(stats.zscore(data_new_features)) < 3).all(axis=1)]
# Added by Luiz
legitimate_count = df2[df2.legitimate == 1].shape[0]
malware_count = df2[df2.legitimate == 0].shape[0]
legit_perc = legitimate_count / (legitimate_count + malware_count) * 100
malware_perc = malware_count / (legitimate_count + malware_count) * 100
print(
f"In the dataset {legit_perc:.2f}% is legitimate, {malware_perc:.2f}% is malware."
)
from imblearn.under_sampling import RandomUnderSampler
from collections import Counter
# Using undersampling to balance the dataset
data = df2.drop(["legitimate"], axis=1).values
target = df2["legitimate"].values
under_sampler = RandomUnderSampler()
X_res, y_res = under_sampler.fit_resample(data, target)
df3 = pd.DataFrame(data=X_res, columns=df2.drop(["legitimate"], axis=1).columns)
df3["legitimate"] = y_res
print(f"Number of legitimate flows: {df3[df3.legitimate == 1].shape[0]}")
print(f"Number of malware flows: {df3[df3.legitimate == 0].shape[0]}")
Data_new = df3.drop(["legitimate"], axis=1).values
Target = df3["legitimate"].values
# Now, it is time to train our model with a random forest classifier.
# Legit_Train, Legit_Test, Malware_Train, Malware_Test = cross_validate.train_test_split(Data_new,
# Target ,test_size=0.2)
(
Legit_Train,
Legit_Test,
Malware_Train,
Malware_Test,
) = sklearn.model_selection.train_test_split(Data_new, Target, test_size=0.2)
clf = sklearn.ensemble.RandomForestClassifier(n_estimators=50)
clf.fit(Legit_Train, Malware_Train)
score = clf.score(Legit_Test, Malware_Test)
print(score * 100)
print("The score of Random Forest is", score * 100)
from sklearn.metrics import confusion_matrix
Result = clf.predict(Legit_Test)
CM = confusion_matrix(Malware_Test, Result)
print("False positive rate : %f %%" % ((CM[0][1] / float(sum(CM[0]))) * 100))
print("False negative rate : %f %%" % ((CM[1][0] / float(sum(CM[1])) * 100)))
# To train the model with another classifier, redo the previous steps, but instead of choosing the
# random forest classifier, select a machine learning algorithm such as gradient-boosting:
Clf = sklearn.ensemble.GradientBoostingClassifier(n_estimators=50)
Clf.fit(Legit_Train, Malware_Train)
Score = Clf.score(Legit_Test, Malware_Test)
print(Score * 100)
print("The score of Gradient Boosting is", Score * 100)
# The following is the score using the AdaBoost classifier
Classifiers = {
"RandomForest": ske.RandomForestClassifier(n_estimators=50),
"GradientBoosting": ske.GradientBoostingClassifier(n_estimators=50),
"AdaBoost": ske.AdaBoostClassifier(n_estimators=100),
}
for Classif in Classifiers:
clf = Classifiers[Classif]
clf.fit(Legit_Train, Malware_Train)
score = clf.score(Legit_Test, Malware_Test)
print("%s : %f %%" % (Classif, score * 100))
| false | 1 | 2,197 | 0 | 2,215 | 2,197 |
||
69270896
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from scipy.fftpack import fft, dct
import os
from tqdm import tqdm
import torch.nn.functional as F
frame = pd.read_csv(
"/kaggle/input/g2net-gravitational-wave-detection/training_labels.csv"
)
idx = frame.iloc[3]["id"]
directory = os.path.join(
"/kaggle/input/g2net-gravitational-wave-detection/",
"train",
idx[0],
idx[1],
idx[2],
idx + ".npy",
)
print(frame)
sample = np.load(directory)
plt.plot(dct(sample, 1)[0][0:128])
class GWavesDataSet(Dataset):
def __init__(
self,
csv_file,
root_dir=os.path.join(
"/kaggle/input/g2net-gravitational-wave-detection/", "train"
),
):
self.gwaves = pd.read_csv(csv_file)
self.root_dir = root_dir
def __len__(self):
return len(self.gwaves)
def __getitem__(self, idx):
file_id = frame.iloc[idx]["id"]
wave_forms = np.load(
os.path.join(
self.root_dir, file_id[0], file_id[1], file_id[2], file_id + ".npy"
)
).astype("float32")
wave_forms = dct(wave_forms, 1)[:, :100].reshape(3 * 100)
target = int(frame.iloc[idx]["target"])
sample = {
"waveforms": torch.from_numpy(wave_forms),
"target": torch.FloatTensor([target]),
}
return sample
dataset = GWavesDataSet(
"/kaggle/input/g2net-gravitational-wave-detection/training_labels.csv"
)
dataset[154828]["waveforms"].shape
# class G2N(nn.Module):
# def __init__(self):
# super(G2N,self).__init__()
# self.conv1= nn.Conv1d(3,8, 3, padding = 1)
# self.conv1_bn = nn.BatchNorm1d(num_features=8)
# self.conv2= nn.Conv1d(8,8, 3, padding = 1)
# self.conv2_bn = nn.BatchNorm1d(num_features=8)
# self.conv3= nn.Conv1d(8,16, 3, padding = 1)
# self.conv3_bn = nn.BatchNorm1d(num_features=16)
# self.encoder_layer = nn.TransformerEncoderLayer(d_model=16, nhead=1)
# self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=1)
# # self.transformer = nn.Transformer(d_model=3, nhead=1, num_encoder_layers=1, num_decoder_layers=1, dim_feedforward=1024)
# self.ff1 = nn.Linear(16*4096,4096)
# self.ff1_bn = nn.BatchNorm1d(num_features=4096)
# self.ff2 = nn.Linear(4096,2048)
# self.ff2_bn = nn.BatchNorm1d(num_features=2048)
# self.ff3 = nn.Linear(2048,512)
# self.ff3_bn = nn.BatchNorm1d(num_features=512)
# self.ff4 = nn.Linear(512,128)
# self.ff4_bn = nn.BatchNorm1d(num_features=128)
# self.ff5 = nn.Linear(128,32)
# self.ff5_bn = nn.BatchNorm1d(num_features=32)
# self.ff6 = nn.Linear(32,1)
# def forward(self, sample):
# x= F.relu(self.conv1_bn(self.conv1(sample)))
# x= F.relu(self.conv2_bn(self.conv2(x)))
# x= F.relu(self.conv3_bn(self.conv3(x)))
# # print(x.shape)
# x = x.permute(2,0,1)
# x = self.transformer_encoder(x)
# x = x.permute(1,2,0).reshape(-1,16*4096)
# x = F.relu(self.ff1_bn(self.ff1(x)))
# x = F.relu(self.ff2_bn(self.ff2(x)))
# x = F.relu(self.ff3_bn(self.ff3(x)))
# x = F.relu(self.ff4_bn(self.ff4(x)))
# x = F.relu(self.ff5_bn(self.ff5(x)))
# x = torch.sigmoid(self.ff6(x))
# return x
class G2N(nn.Module):
def __init__(self):
super(G2N, self).__init__()
self.ff1 = nn.Linear(300, 512)
self.ff1_bn = nn.BatchNorm1d(num_features=4096)
self.ff2 = nn.Linear(512, 1024)
self.ff2_bn = nn.BatchNorm1d(num_features=2048)
self.ff3 = nn.Linear(1024, 2048)
self.ff3_bn = nn.BatchNorm1d(num_features=512)
self.ff4 = nn.Linear(2048, 512)
self.ff4_bn = nn.BatchNorm1d(num_features=128)
self.ff5 = nn.Linear(512, 32)
self.ff5_bn = nn.BatchNorm1d(num_features=32)
self.ff6 = nn.Linear(32, 1)
def forward(self, sample):
x = F.relu(self.conv1_bn(self.conv1(sample)))
x = F.relu(self.conv2_bn(self.conv2(x)))
x = F.relu(self.conv3_bn(self.conv3(x)))
# print(x.shape)
x = x.permute(2, 0, 1)
x = self.transformer_encoder(x)
x = x.permute(1, 2, 0).reshape(-1, 16 * 4096)
x = F.relu(self.ff1_bn(self.ff1(x)))
x = F.relu(self.ff2_bn(self.ff2(x)))
x = F.relu(self.ff3_bn(self.ff3(x)))
x = F.relu(self.ff4_bn(self.ff4(x)))
x = F.relu(self.ff5_bn(self.ff5(x)))
x = torch.sigmoid(self.ff6(x))
return x
model = G2N()
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=16, shuffle=True, num_workers=2
)
len(dataloader)
z = list()
for data in tqdm(dataloader):
z.append(data["waveforms"])
d = torch.stack(z)
d = d.reshape(35000 * 16, 300)
mean = torch.mean(d, 1).numpy()
std = torch.std(d, 1).numpy()
print(mean)
print(std)
np.save("means.npy", mean)
np.save("std.npy", std)
import torch.optim as optim
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
model.load_state_dict(torch.load("check_point_1.pth"))
model.train()
for epoch in range(50): # loop over the dataset multiple times
torch.save(model.state_dict(), "check_point.pth")
progrss_loader = tqdm(dataloader)
running_loss = 0.0
for i, data in enumerate(progrss_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data["waveforms"].cuda(), data["target"].cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
labels = labels
# print(labels.shape)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 4 == 0: # print every 2000 mini-batches
progrss_loader.set_description(
"[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 4)
)
# print('[%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, running_loss / 4))
running_loss = 0.0
print("Finished Training")
print(model)
torch.cuda.empty_cache()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270896.ipynb
| null | null |
[{"Id": 69270896, "ScriptId": 18885246, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5619796, "CreationDate": "07/28/2021 23:28:41", "VersionNumber": 4.0, "Title": "The talky space-time", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 191.0, "LinesInsertedFromPrevious": 79.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 112.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from scipy.fftpack import fft, dct
import os
from tqdm import tqdm
import torch.nn.functional as F
frame = pd.read_csv(
"/kaggle/input/g2net-gravitational-wave-detection/training_labels.csv"
)
idx = frame.iloc[3]["id"]
directory = os.path.join(
"/kaggle/input/g2net-gravitational-wave-detection/",
"train",
idx[0],
idx[1],
idx[2],
idx + ".npy",
)
print(frame)
sample = np.load(directory)
plt.plot(dct(sample, 1)[0][0:128])
class GWavesDataSet(Dataset):
def __init__(
self,
csv_file,
root_dir=os.path.join(
"/kaggle/input/g2net-gravitational-wave-detection/", "train"
),
):
self.gwaves = pd.read_csv(csv_file)
self.root_dir = root_dir
def __len__(self):
return len(self.gwaves)
def __getitem__(self, idx):
file_id = frame.iloc[idx]["id"]
wave_forms = np.load(
os.path.join(
self.root_dir, file_id[0], file_id[1], file_id[2], file_id + ".npy"
)
).astype("float32")
wave_forms = dct(wave_forms, 1)[:, :100].reshape(3 * 100)
target = int(frame.iloc[idx]["target"])
sample = {
"waveforms": torch.from_numpy(wave_forms),
"target": torch.FloatTensor([target]),
}
return sample
dataset = GWavesDataSet(
"/kaggle/input/g2net-gravitational-wave-detection/training_labels.csv"
)
dataset[154828]["waveforms"].shape
# class G2N(nn.Module):
# def __init__(self):
# super(G2N,self).__init__()
# self.conv1= nn.Conv1d(3,8, 3, padding = 1)
# self.conv1_bn = nn.BatchNorm1d(num_features=8)
# self.conv2= nn.Conv1d(8,8, 3, padding = 1)
# self.conv2_bn = nn.BatchNorm1d(num_features=8)
# self.conv3= nn.Conv1d(8,16, 3, padding = 1)
# self.conv3_bn = nn.BatchNorm1d(num_features=16)
# self.encoder_layer = nn.TransformerEncoderLayer(d_model=16, nhead=1)
# self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=1)
# # self.transformer = nn.Transformer(d_model=3, nhead=1, num_encoder_layers=1, num_decoder_layers=1, dim_feedforward=1024)
# self.ff1 = nn.Linear(16*4096,4096)
# self.ff1_bn = nn.BatchNorm1d(num_features=4096)
# self.ff2 = nn.Linear(4096,2048)
# self.ff2_bn = nn.BatchNorm1d(num_features=2048)
# self.ff3 = nn.Linear(2048,512)
# self.ff3_bn = nn.BatchNorm1d(num_features=512)
# self.ff4 = nn.Linear(512,128)
# self.ff4_bn = nn.BatchNorm1d(num_features=128)
# self.ff5 = nn.Linear(128,32)
# self.ff5_bn = nn.BatchNorm1d(num_features=32)
# self.ff6 = nn.Linear(32,1)
# def forward(self, sample):
# x= F.relu(self.conv1_bn(self.conv1(sample)))
# x= F.relu(self.conv2_bn(self.conv2(x)))
# x= F.relu(self.conv3_bn(self.conv3(x)))
# # print(x.shape)
# x = x.permute(2,0,1)
# x = self.transformer_encoder(x)
# x = x.permute(1,2,0).reshape(-1,16*4096)
# x = F.relu(self.ff1_bn(self.ff1(x)))
# x = F.relu(self.ff2_bn(self.ff2(x)))
# x = F.relu(self.ff3_bn(self.ff3(x)))
# x = F.relu(self.ff4_bn(self.ff4(x)))
# x = F.relu(self.ff5_bn(self.ff5(x)))
# x = torch.sigmoid(self.ff6(x))
# return x
class G2N(nn.Module):
def __init__(self):
super(G2N, self).__init__()
self.ff1 = nn.Linear(300, 512)
self.ff1_bn = nn.BatchNorm1d(num_features=4096)
self.ff2 = nn.Linear(512, 1024)
self.ff2_bn = nn.BatchNorm1d(num_features=2048)
self.ff3 = nn.Linear(1024, 2048)
self.ff3_bn = nn.BatchNorm1d(num_features=512)
self.ff4 = nn.Linear(2048, 512)
self.ff4_bn = nn.BatchNorm1d(num_features=128)
self.ff5 = nn.Linear(512, 32)
self.ff5_bn = nn.BatchNorm1d(num_features=32)
self.ff6 = nn.Linear(32, 1)
def forward(self, sample):
x = F.relu(self.conv1_bn(self.conv1(sample)))
x = F.relu(self.conv2_bn(self.conv2(x)))
x = F.relu(self.conv3_bn(self.conv3(x)))
# print(x.shape)
x = x.permute(2, 0, 1)
x = self.transformer_encoder(x)
x = x.permute(1, 2, 0).reshape(-1, 16 * 4096)
x = F.relu(self.ff1_bn(self.ff1(x)))
x = F.relu(self.ff2_bn(self.ff2(x)))
x = F.relu(self.ff3_bn(self.ff3(x)))
x = F.relu(self.ff4_bn(self.ff4(x)))
x = F.relu(self.ff5_bn(self.ff5(x)))
x = torch.sigmoid(self.ff6(x))
return x
model = G2N()
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=16, shuffle=True, num_workers=2
)
len(dataloader)
z = list()
for data in tqdm(dataloader):
z.append(data["waveforms"])
d = torch.stack(z)
d = d.reshape(35000 * 16, 300)
mean = torch.mean(d, 1).numpy()
std = torch.std(d, 1).numpy()
print(mean)
print(std)
np.save("means.npy", mean)
np.save("std.npy", std)
import torch.optim as optim
criterion = nn.BCELoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)
model.load_state_dict(torch.load("check_point_1.pth"))
model.train()
for epoch in range(50): # loop over the dataset multiple times
torch.save(model.state_dict(), "check_point.pth")
progrss_loader = tqdm(dataloader)
running_loss = 0.0
for i, data in enumerate(progrss_loader):
# get the inputs; data is a list of [inputs, labels]
inputs, labels = data["waveforms"].cuda(), data["target"].cuda()
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
outputs = model(inputs)
labels = labels
# print(labels.shape)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
# print statistics
running_loss += loss.item()
if (i + 1) % 4 == 0: # print every 2000 mini-batches
progrss_loader.set_description(
"[%d, %5d] loss: %.3f" % (epoch + 1, i + 1, running_loss / 4)
)
# print('[%d, %5d] loss: %.3f' %
# (epoch + 1, i + 1, running_loss / 4))
running_loss = 0.0
print("Finished Training")
print(model)
torch.cuda.empty_cache()
| false | 0 | 2,399 | 0 | 2,399 | 2,399 |
||
69270223
|
# # Autores
# ## Joseph Santiago Portilla Martínez
# Estudiante de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected]) | [LinkedIn](https://www.linkedin.com/in/portillajs/) | [Github](https://github.com/JoePortilla)
# ## Karen Stefania Mirama Eraso
# Estudiante de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected])
# ## M. Sc. Darío Fernando Fajardo Fajardo
# Docente departamento de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected])
# # Setup
# ## Variable a predecir
var = "rad"
# ## Bibliotecas
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential, Model
from keras.layers import (
Dense,
LSTM,
Dropout,
GRU,
Bidirectional,
RepeatVector,
TimeDistributed,
Input,
)
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
import math
from sklearn.metrics import mean_squared_error
from keras.utils.vis_utils import plot_model
import tensorflow as tf
import matplotlib.dates as mdates
# ## Ajustes para obtener resultados reproducibles
import random
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
# ## Lectura de Archivos
dftemp = pd.read_csv(
"../input/site-adaptation/site_adaptation_temp.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfhr = pd.read_csv(
"../input/site-adaptation/site_adaptation_hr.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfrad = pd.read_csv(
"../input/site-adaptation/site_adaptation_rad.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfvv = pd.read_csv(
"../input/site-adaptation/site_adaptation_vv_full.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
# ### Concatenación
# Creación de dataframe inicial
df = pd.concat(
[
dftemp["temp_opt"].rename("temp"),
dfhr["hr_opt"].rename("hr"),
dfrad["rad_opt"].rename("rad"),
dfvv["vv_opt"].rename("vv"),
],
axis=1,
)
df.head()
df.describe()
# ## Definición de funciones
# ### Dividir el set en secuencias
def split_sequences(
features, targets, n_steps_in, n_steps_out, n_sliding_steps, window_type
):
"""
Args:
* features: Secuencias de entrada univariadas o multivariadas.
* targets: Secuencias de salida univariantes o multivariantes.
* n_steps_in: Longitud de la secuencia de entrada para la ventana deslizante.
* n_steps_out: Longitud de la secuencia de salida.
* n_sliding_steps: Tamaño del paso de la ventana.
* window_type: "deslizante" o "expansiva".
"""
X, y = list(), list() # Listas a llenar
for i in range(0, len(features), n_sliding_steps):
# Calcula el final de la secuencia en curso
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# Comprueba si el ciclo esta fuera del limite de secuencias
if out_end_ix > len(features):
break
# Recopila las secuencias de entrada y salida del ciclo
if window_type == "sliding": # Sliding window
seq_x, seq_y = features[i:end_ix], targets[end_ix:out_end_ix]
else: # expanding window or walk-forward
seq_x, seq_y = features[0:end_ix], targets[end_ix:out_end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# ### Generar secuencias de entrenamiento, validación y evaluación
def get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
):
global index_X_train, index_y_train, index_X_val, index_y_val, index_X_test, index_y_test
global X_train, y_train, X_val, y_val, X_test, y_test
global n_features, n_targets
# Calculo de nro. de variables de entrada y salida
n_features = len(input_cols) - 1 # Número de variables de entrada
n_targets = len(output_cols) - 1 # Número de variables de salida
# Secuencia de muestras para entrenamiento
X_train, y_train = split_sequences(
train_s[input_cols].values,
train_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Secuencia de muestras para validación
X_val, y_val = split_sequences(
val_s[input_cols].values,
val_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Secuencia de muestras para evaluación
X_test, y_test = split_sequences(
test_s[input_cols].values,
test_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Debugging
print(f"Total de muestras entrenamiento: {X_train.shape[0]}.")
print(f"Total de muestras validación: {X_val.shape[0]}.")
print(f"Total de muestras evaluación: {X_test.shape[0]}.")
# Cada muestra contiene los datos de la variable y el indice de tiempo correspondiente.
# A continuación se almacenan por separado los indices y las variables.
# Los indices de tiempo servirán para generar las graficas de los datos de predicción.
# Almacenamiento de Indices de tiempo
index_X_train = X_train[:, :, 0]
index_y_train = y_train[:, :, 0]
index_X_val = X_val[:, :, 0]
index_y_val = y_val[:, :, 0]
index_X_test = X_test[:, :, 0]
index_y_test = y_test[:, :, 0]
# Almacenamiento de las Variables
X_train = (
X_train[:, :, -n_features:]
.astype(np.float32)
.reshape((X_train.shape[0], input_seq_length, n_features))
)
y_train = y_train[:, :, -n_targets:].astype(np.float32)
X_val = (
X_val[:, :, -n_features:]
.astype(np.float32)
.reshape((X_val.shape[0], input_seq_length, n_features))
)
y_val = y_val[:, :, -n_targets:].astype(np.float32)
X_test = (
X_test[:, :, -n_features:]
.astype(np.float32)
.reshape((X_test.shape[0], input_seq_length, n_features))
)
y_test = y_test[:, :, -n_targets:].astype(np.float32)
# ### Métrica RMSE para función de perdida
from keras import backend
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
# ### Definir el modelo
def get_model(hidden_size):
global seq2seq
# Reinicia los estados generados por Keras y TensorFlow para generar una sesión limpia
tf.keras.backend.clear_session()
# ----------ENCODER----------
# Capa de Entrada (Valores historicos).
encoder_inputs = Input(shape=(input_seq_length, n_features), name="Entrada")
# Capa de Unidades LSTM.
encoder_layer = LSTM(hidden_size, return_state=True, name="LSTMencoder")
# Concatenamiento de la capa de entrada con la capa LSTM.
encoder_outputs = encoder_layer(encoder_inputs)
# Almacenamiento del Estado interno (c:cell state) y Estado oculto (h:hidden state) del encoder.
# encoder_outputs es un vector compuesto por: [hidden state, hidden state, cell state].
encoder_states = encoder_outputs[1:]
# ----------DECODER----------
# Entrada al decoder.
# Un vector de repetición distribuye la salida de la capa LSTM encoder (h) a todas las unidades LSTM del decoder.
decoder_inputs = RepeatVector(output_seq_length, name="c")(encoder_outputs[0])
# Capa de Unidades LSTM.
# Se concatena con la capa de entrada al decoder, la cual se inicializa con los estados del encoder.
decoder_layer = LSTM(hidden_size, return_sequences=True, name="LSTMdecoder")(
decoder_inputs, initial_state=encoder_states
)
# Capa de Salida.
# Se predice la secuencia de salida con una capa densa encapsulada en una función de distribución temporal.
decoder_outputs = TimeDistributed(Dense(n_targets), name="Salida")(decoder_layer)
# Creación de Modelo
seq2seq = Model(encoder_inputs, decoder_outputs)
# Compilador
seq2seq.compile(
optimizer="adam", # Optimizador
loss="mse", # Función de perdida
metrics=[rmse],
)
# ### Entrenar el modelo
def train_model(epochs, batch_size, min_delta, patience, verbose):
global history, monitor
monitor = EarlyStopping(
monitor="val_loss", # Metrica a monitorear.
min_delta=min_delta, # Cambio minimo considerado como mejora
patience=patience, # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
verbose=1, # Debugging
restore_best_weights=True, # Restablece los pesos de la época con el mejor valor de 'val_loss'.
mode="auto",
)
history = seq2seq.fit(
X_train,
y_train, # Set de Entrenamiento
validation_data=(X_val, y_val), # Set de Validación
epochs=epochs, # Número de epocas
callbacks=[monitor], # Early Stopping
batch_size=batch_size, # Número de muestras en cada batch
verbose=verbose, # Debugging
shuffle=False, # Mantener la secuencia de los datos
)
# ### Graficar la perdida a través de las epocas
def plot_history(history):
loss_list = [s for s in history.history.keys() if "loss" in s and "val" not in s]
val_loss_list = [s for s in history.history.keys() if "loss" in s and "val" in s]
epochs = range(1, len(history.history[loss_list[0]]) + 1)
## Loss
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
fig.patch.set_facecolor("white")
for l in loss_list:
ax.plot(epochs, history.history[l], "cornflowerblue", label="Training loss")
for l in val_loss_list:
ax.plot(epochs, history.history[l], "orange", label="Validation loss")
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
plt.legend(loc=1)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.show
# ### Evaluar el modelo
def test_model():
global y_pred
# Predicción utilizando los datos de test
y_pred = seq2seq.predict(X_test, verbose=0)
# Reescalado de la variable
rescale_var()
# Evaluación del modelo
test_rmse = np.sqrt(mean_squared_error(y_test.reshape(-1), y_pred.reshape(-1)))
print(f"Testing RMSE: {test_rmse:.4f}")
return test_rmse
# ### Reescalado de Variable
# Se copian los valores del escalador inicial de la variable ($\mu$, $\sigma$) y se asignan a un nuevo escalador con las dimensiones correctas, correspondientes a la matríz de predicción de la variable
def rescale_var():
global X_test, y_test, y_pred
scaler2 = StandardScaler()
if var == "temp":
index = 0
elif var == "hr":
index = 1
elif var == "rad":
index = 2
else:
index = 3
scaler2.scale_, scaler2.mean_, scaler2.var_ = (
scaler.scale_[index],
scaler.mean_[index],
scaler.var_[index],
)
X_test = scaler2.inverse_transform(X_test)
y_test = scaler2.inverse_transform(y_test)
y_pred = scaler2.inverse_transform(y_pred)
# ### Graficar Valores historicos, reales, y predichos
def plot_var(sample, freq, width, height, var):
fig, ax = plt.subplots(1, 1, figsize=(width, height))
fig.patch.set_facecolor("white")
if var == "temp":
ax.set_ylabel("Temperatura [°C]")
index = 0
elif var == "hr":
ax.set_ylabel("Hum. Relativa [%]")
index = 1
elif var == "rad":
ax.set_ylabel("Radiación [$W/m^2$]")
index = 2
else:
ax.set_ylabel("Vel. Viento [m/s]")
index = 3
if n_features > 1: # Grafica para entrada multivariable
sns.lineplot(
x=index_X_test[sample],
y=X_test[sample, :, index].reshape(-1),
ax=ax,
color="b",
label="Valores Vistos (Pasado)",
linestyle="-",
marker=",",
)
sns.lineplot(
x=index_y_test[sample],
y=y_test[sample, :, 0],
ax=ax,
color="b",
label="Valores Futuros Verdaderos",
linestyle="--",
)
sns.lineplot(
x=index_y_test[sample],
y=y_pred[sample, :, 0],
ax=ax,
color="g",
label="Predicciones",
linestyle="--",
marker="o",
)
else: # Grafica para entrada univariada
sns.lineplot(
x=index_X_test[sample],
y=X_test[sample].reshape(-1),
ax=ax,
color="b",
label="Valores Vistos (Pasado)",
linestyle="-",
marker=",",
)
sns.lineplot(
x=index_y_test[sample],
y=y_test[sample].reshape(-1),
ax=ax,
color="b",
label="Valores Futuros Verdaderos",
linestyle="--",
)
sns.lineplot(
x=index_y_test[sample],
y=y_pred[sample].reshape(-1),
ax=ax,
color="g",
label="Predicciones",
linestyle="--",
marker="o",
)
idx = pd.date_range(
start=index_X_test[sample][0], end=index_y_test[sample][-1], freq=freq
)
ax.set_xticks(idx)
myFmt = mdates.DateFormatter("%d/%m/%y, %I%p")
ax.xaxis.set_major_formatter(myFmt)
plt.xticks(rotation=10)
plt.legend(loc=2)
plt.tick_params(labelsize=12)
plt.tight_layout()
# plt.savefig('division.jpg', dpi=300, bbox_inches='tight')
plt.show
# ### Dataframe donde se almacenan las metricas de todos los modelos
dfmetrics = pd.DataFrame(
columns=[
"input_size",
"lstm_units",
"train_loss",
"val_loss",
"epochs",
"test_rmse",
]
)
# # División de Datos
train = df.loc[:"2016"].values
val = df.loc["2017"].values
test = df.loc["2018":].values
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
fig.patch.set_facecolor("white")
ax.plot(df.loc[:"2016", var], label="Entrenamiento", c="tab:blue", linewidth="1.5")
ax.plot(df.loc["2017", var], label="Validación", c="tab:green", linewidth="1.5")
ax.plot(df.loc["2018":, var], label="Evaluación", c="tab:brown", linewidth="1.5")
if var == "temp":
ax.set_ylabel("Temperatura [°C]")
elif var == "hr":
ax.set_ylabel("Hum. Relativa [%]")
elif var == "rad":
ax.set_ylabel("Radiación [$W/m^2$]")
else:
ax.set_ylabel("Vel. Viento [m/s]")
ax.set_xlim(pd.to_datetime("2007"), pd.to_datetime("2021"))
plt.legend(loc=3)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("division.jpg", dpi=300, bbox_inches="tight")
plt.show
# # Estandarización de Datos
trainVal = df.loc[:"2017"].values # Entrenamiento y Validación
scaler = StandardScaler()
scaler.fit_transform(trainVal)
train_scal = scaler.transform(train)
val_scal = scaler.transform(val)
test_scal = scaler.transform(test)
serie = np.concatenate((train_scal, val_scal, test_scal), axis=0)
dfs = pd.DataFrame(serie)
dfs = pd.concat([df.reset_index()["Fecha"], dfs], axis=1)
dfs.set_index("Fecha", inplace=True)
dfs.rename({0: "temp", 1: "hr", 2: "rad", 3: "vv"}, axis=1, inplace=True)
dfs
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
fig.patch.set_facecolor("white")
ax.plot(dfs.loc[:"2016", var], label="Entrenamiento", c="tab:blue", linewidth="1.5")
ax.plot(dfs.loc["2017", var], label="Validación", c="tab:green", linewidth="1.5")
ax.plot(dfs.loc["2018":, var], label="Evaluación", c="tab:brown", linewidth="1.5")
if var == "temp":
ax.set_ylabel("Temp. Escalada")
elif var == "hr":
ax.set_ylabel("H. R. Escalada")
elif var == "rad":
ax.set_ylabel("Rad. Escalada")
else:
ax.set_ylabel("Vel. Viento Escalada")
ax.set_xlim(pd.to_datetime("2007"), pd.to_datetime("2021"))
plt.legend(loc=3)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("division_escalada.jpg", dpi=300, bbox_inches="tight")
plt.show
# Entrenamiento
train_s = dfs.loc[:"2016"]
train_s.reset_index(inplace=True)
# Validación
val_s = dfs.loc["2017"]
val_s.reset_index(inplace=True)
# Evaluación
test_s = dfs.loc["2018":]
test_s.reset_index(inplace=True)
# # **MODELOS UNIVARIADOS**
# # *1)* 24 Horas de Entrada
# # 1.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 1.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 1.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # *2)* 48 Horas de Entrada
# # 2.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 2.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 2.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # *3)* 120 Horas de Entrada
# # 3.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 3.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 3.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # Dataframe de Métricas
dfmetrics
dfmetrics.to_csv("prediccion_radiacion_entrada_univariada.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270223.ipynb
| null | null |
[{"Id": 69270223, "ScriptId": 18704626, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4247539, "CreationDate": "07/28/2021 23:08:39", "VersionNumber": 3.0, "Title": "Prediccion de Radiacion Univariada | UDENAR", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 952.0, "LinesInsertedFromPrevious": 20.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 932.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Autores
# ## Joseph Santiago Portilla Martínez
# Estudiante de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected]) | [LinkedIn](https://www.linkedin.com/in/portillajs/) | [Github](https://github.com/JoePortilla)
# ## Karen Stefania Mirama Eraso
# Estudiante de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected])
# ## M. Sc. Darío Fernando Fajardo Fajardo
# Docente departamento de Ingeniería Electrónica. Universidad de Nariño, Pasto, Colombia.
# [E-mail](mailto:[email protected])
# # Setup
# ## Variable a predecir
var = "rad"
# ## Bibliotecas
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import seaborn as sns
from sklearn.preprocessing import StandardScaler
from keras.models import Sequential, Model
from keras.layers import (
Dense,
LSTM,
Dropout,
GRU,
Bidirectional,
RepeatVector,
TimeDistributed,
Input,
)
from keras.optimizers import SGD
from keras.callbacks import EarlyStopping
import math
from sklearn.metrics import mean_squared_error
from keras.utils.vis_utils import plot_model
import tensorflow as tf
import matplotlib.dates as mdates
# ## Ajustes para obtener resultados reproducibles
import random
random.seed(1)
np.random.seed(1)
tf.random.set_seed(1)
# ## Lectura de Archivos
dftemp = pd.read_csv(
"../input/site-adaptation/site_adaptation_temp.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfhr = pd.read_csv(
"../input/site-adaptation/site_adaptation_hr.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfrad = pd.read_csv(
"../input/site-adaptation/site_adaptation_rad.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
dfvv = pd.read_csv(
"../input/site-adaptation/site_adaptation_vv_full.csv",
index_col="Fecha",
parse_dates=["Fecha"],
)
# ### Concatenación
# Creación de dataframe inicial
df = pd.concat(
[
dftemp["temp_opt"].rename("temp"),
dfhr["hr_opt"].rename("hr"),
dfrad["rad_opt"].rename("rad"),
dfvv["vv_opt"].rename("vv"),
],
axis=1,
)
df.head()
df.describe()
# ## Definición de funciones
# ### Dividir el set en secuencias
def split_sequences(
features, targets, n_steps_in, n_steps_out, n_sliding_steps, window_type
):
"""
Args:
* features: Secuencias de entrada univariadas o multivariadas.
* targets: Secuencias de salida univariantes o multivariantes.
* n_steps_in: Longitud de la secuencia de entrada para la ventana deslizante.
* n_steps_out: Longitud de la secuencia de salida.
* n_sliding_steps: Tamaño del paso de la ventana.
* window_type: "deslizante" o "expansiva".
"""
X, y = list(), list() # Listas a llenar
for i in range(0, len(features), n_sliding_steps):
# Calcula el final de la secuencia en curso
end_ix = i + n_steps_in
out_end_ix = end_ix + n_steps_out
# Comprueba si el ciclo esta fuera del limite de secuencias
if out_end_ix > len(features):
break
# Recopila las secuencias de entrada y salida del ciclo
if window_type == "sliding": # Sliding window
seq_x, seq_y = features[i:end_ix], targets[end_ix:out_end_ix]
else: # expanding window or walk-forward
seq_x, seq_y = features[0:end_ix], targets[end_ix:out_end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# ### Generar secuencias de entrenamiento, validación y evaluación
def get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
):
global index_X_train, index_y_train, index_X_val, index_y_val, index_X_test, index_y_test
global X_train, y_train, X_val, y_val, X_test, y_test
global n_features, n_targets
# Calculo de nro. de variables de entrada y salida
n_features = len(input_cols) - 1 # Número de variables de entrada
n_targets = len(output_cols) - 1 # Número de variables de salida
# Secuencia de muestras para entrenamiento
X_train, y_train = split_sequences(
train_s[input_cols].values,
train_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Secuencia de muestras para validación
X_val, y_val = split_sequences(
val_s[input_cols].values,
val_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Secuencia de muestras para evaluación
X_test, y_test = split_sequences(
test_s[input_cols].values,
test_s[output_cols].values,
n_steps_in=input_seq_length,
n_steps_out=output_seq_length,
n_sliding_steps=sliding_steps,
window_type="sliding",
)
# Debugging
print(f"Total de muestras entrenamiento: {X_train.shape[0]}.")
print(f"Total de muestras validación: {X_val.shape[0]}.")
print(f"Total de muestras evaluación: {X_test.shape[0]}.")
# Cada muestra contiene los datos de la variable y el indice de tiempo correspondiente.
# A continuación se almacenan por separado los indices y las variables.
# Los indices de tiempo servirán para generar las graficas de los datos de predicción.
# Almacenamiento de Indices de tiempo
index_X_train = X_train[:, :, 0]
index_y_train = y_train[:, :, 0]
index_X_val = X_val[:, :, 0]
index_y_val = y_val[:, :, 0]
index_X_test = X_test[:, :, 0]
index_y_test = y_test[:, :, 0]
# Almacenamiento de las Variables
X_train = (
X_train[:, :, -n_features:]
.astype(np.float32)
.reshape((X_train.shape[0], input_seq_length, n_features))
)
y_train = y_train[:, :, -n_targets:].astype(np.float32)
X_val = (
X_val[:, :, -n_features:]
.astype(np.float32)
.reshape((X_val.shape[0], input_seq_length, n_features))
)
y_val = y_val[:, :, -n_targets:].astype(np.float32)
X_test = (
X_test[:, :, -n_features:]
.astype(np.float32)
.reshape((X_test.shape[0], input_seq_length, n_features))
)
y_test = y_test[:, :, -n_targets:].astype(np.float32)
# ### Métrica RMSE para función de perdida
from keras import backend
def rmse(y_true, y_pred):
return backend.sqrt(backend.mean(backend.square(y_pred - y_true), axis=-1))
# ### Definir el modelo
def get_model(hidden_size):
global seq2seq
# Reinicia los estados generados por Keras y TensorFlow para generar una sesión limpia
tf.keras.backend.clear_session()
# ----------ENCODER----------
# Capa de Entrada (Valores historicos).
encoder_inputs = Input(shape=(input_seq_length, n_features), name="Entrada")
# Capa de Unidades LSTM.
encoder_layer = LSTM(hidden_size, return_state=True, name="LSTMencoder")
# Concatenamiento de la capa de entrada con la capa LSTM.
encoder_outputs = encoder_layer(encoder_inputs)
# Almacenamiento del Estado interno (c:cell state) y Estado oculto (h:hidden state) del encoder.
# encoder_outputs es un vector compuesto por: [hidden state, hidden state, cell state].
encoder_states = encoder_outputs[1:]
# ----------DECODER----------
# Entrada al decoder.
# Un vector de repetición distribuye la salida de la capa LSTM encoder (h) a todas las unidades LSTM del decoder.
decoder_inputs = RepeatVector(output_seq_length, name="c")(encoder_outputs[0])
# Capa de Unidades LSTM.
# Se concatena con la capa de entrada al decoder, la cual se inicializa con los estados del encoder.
decoder_layer = LSTM(hidden_size, return_sequences=True, name="LSTMdecoder")(
decoder_inputs, initial_state=encoder_states
)
# Capa de Salida.
# Se predice la secuencia de salida con una capa densa encapsulada en una función de distribución temporal.
decoder_outputs = TimeDistributed(Dense(n_targets), name="Salida")(decoder_layer)
# Creación de Modelo
seq2seq = Model(encoder_inputs, decoder_outputs)
# Compilador
seq2seq.compile(
optimizer="adam", # Optimizador
loss="mse", # Función de perdida
metrics=[rmse],
)
# ### Entrenar el modelo
def train_model(epochs, batch_size, min_delta, patience, verbose):
global history, monitor
monitor = EarlyStopping(
monitor="val_loss", # Metrica a monitorear.
min_delta=min_delta, # Cambio minimo considerado como mejora
patience=patience, # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
verbose=1, # Debugging
restore_best_weights=True, # Restablece los pesos de la época con el mejor valor de 'val_loss'.
mode="auto",
)
history = seq2seq.fit(
X_train,
y_train, # Set de Entrenamiento
validation_data=(X_val, y_val), # Set de Validación
epochs=epochs, # Número de epocas
callbacks=[monitor], # Early Stopping
batch_size=batch_size, # Número de muestras en cada batch
verbose=verbose, # Debugging
shuffle=False, # Mantener la secuencia de los datos
)
# ### Graficar la perdida a través de las epocas
def plot_history(history):
loss_list = [s for s in history.history.keys() if "loss" in s and "val" not in s]
val_loss_list = [s for s in history.history.keys() if "loss" in s and "val" in s]
epochs = range(1, len(history.history[loss_list[0]]) + 1)
## Loss
fig, ax = plt.subplots(1, 1, figsize=(5, 5))
fig.patch.set_facecolor("white")
for l in loss_list:
ax.plot(epochs, history.history[l], "cornflowerblue", label="Training loss")
for l in val_loss_list:
ax.plot(epochs, history.history[l], "orange", label="Validation loss")
ax.set_xlabel("Epochs")
ax.set_ylabel("Loss")
plt.legend(loc=1)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.show
# ### Evaluar el modelo
def test_model():
global y_pred
# Predicción utilizando los datos de test
y_pred = seq2seq.predict(X_test, verbose=0)
# Reescalado de la variable
rescale_var()
# Evaluación del modelo
test_rmse = np.sqrt(mean_squared_error(y_test.reshape(-1), y_pred.reshape(-1)))
print(f"Testing RMSE: {test_rmse:.4f}")
return test_rmse
# ### Reescalado de Variable
# Se copian los valores del escalador inicial de la variable ($\mu$, $\sigma$) y se asignan a un nuevo escalador con las dimensiones correctas, correspondientes a la matríz de predicción de la variable
def rescale_var():
global X_test, y_test, y_pred
scaler2 = StandardScaler()
if var == "temp":
index = 0
elif var == "hr":
index = 1
elif var == "rad":
index = 2
else:
index = 3
scaler2.scale_, scaler2.mean_, scaler2.var_ = (
scaler.scale_[index],
scaler.mean_[index],
scaler.var_[index],
)
X_test = scaler2.inverse_transform(X_test)
y_test = scaler2.inverse_transform(y_test)
y_pred = scaler2.inverse_transform(y_pred)
# ### Graficar Valores historicos, reales, y predichos
def plot_var(sample, freq, width, height, var):
fig, ax = plt.subplots(1, 1, figsize=(width, height))
fig.patch.set_facecolor("white")
if var == "temp":
ax.set_ylabel("Temperatura [°C]")
index = 0
elif var == "hr":
ax.set_ylabel("Hum. Relativa [%]")
index = 1
elif var == "rad":
ax.set_ylabel("Radiación [$W/m^2$]")
index = 2
else:
ax.set_ylabel("Vel. Viento [m/s]")
index = 3
if n_features > 1: # Grafica para entrada multivariable
sns.lineplot(
x=index_X_test[sample],
y=X_test[sample, :, index].reshape(-1),
ax=ax,
color="b",
label="Valores Vistos (Pasado)",
linestyle="-",
marker=",",
)
sns.lineplot(
x=index_y_test[sample],
y=y_test[sample, :, 0],
ax=ax,
color="b",
label="Valores Futuros Verdaderos",
linestyle="--",
)
sns.lineplot(
x=index_y_test[sample],
y=y_pred[sample, :, 0],
ax=ax,
color="g",
label="Predicciones",
linestyle="--",
marker="o",
)
else: # Grafica para entrada univariada
sns.lineplot(
x=index_X_test[sample],
y=X_test[sample].reshape(-1),
ax=ax,
color="b",
label="Valores Vistos (Pasado)",
linestyle="-",
marker=",",
)
sns.lineplot(
x=index_y_test[sample],
y=y_test[sample].reshape(-1),
ax=ax,
color="b",
label="Valores Futuros Verdaderos",
linestyle="--",
)
sns.lineplot(
x=index_y_test[sample],
y=y_pred[sample].reshape(-1),
ax=ax,
color="g",
label="Predicciones",
linestyle="--",
marker="o",
)
idx = pd.date_range(
start=index_X_test[sample][0], end=index_y_test[sample][-1], freq=freq
)
ax.set_xticks(idx)
myFmt = mdates.DateFormatter("%d/%m/%y, %I%p")
ax.xaxis.set_major_formatter(myFmt)
plt.xticks(rotation=10)
plt.legend(loc=2)
plt.tick_params(labelsize=12)
plt.tight_layout()
# plt.savefig('division.jpg', dpi=300, bbox_inches='tight')
plt.show
# ### Dataframe donde se almacenan las metricas de todos los modelos
dfmetrics = pd.DataFrame(
columns=[
"input_size",
"lstm_units",
"train_loss",
"val_loss",
"epochs",
"test_rmse",
]
)
# # División de Datos
train = df.loc[:"2016"].values
val = df.loc["2017"].values
test = df.loc["2018":].values
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
fig.patch.set_facecolor("white")
ax.plot(df.loc[:"2016", var], label="Entrenamiento", c="tab:blue", linewidth="1.5")
ax.plot(df.loc["2017", var], label="Validación", c="tab:green", linewidth="1.5")
ax.plot(df.loc["2018":, var], label="Evaluación", c="tab:brown", linewidth="1.5")
if var == "temp":
ax.set_ylabel("Temperatura [°C]")
elif var == "hr":
ax.set_ylabel("Hum. Relativa [%]")
elif var == "rad":
ax.set_ylabel("Radiación [$W/m^2$]")
else:
ax.set_ylabel("Vel. Viento [m/s]")
ax.set_xlim(pd.to_datetime("2007"), pd.to_datetime("2021"))
plt.legend(loc=3)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("division.jpg", dpi=300, bbox_inches="tight")
plt.show
# # Estandarización de Datos
trainVal = df.loc[:"2017"].values # Entrenamiento y Validación
scaler = StandardScaler()
scaler.fit_transform(trainVal)
train_scal = scaler.transform(train)
val_scal = scaler.transform(val)
test_scal = scaler.transform(test)
serie = np.concatenate((train_scal, val_scal, test_scal), axis=0)
dfs = pd.DataFrame(serie)
dfs = pd.concat([df.reset_index()["Fecha"], dfs], axis=1)
dfs.set_index("Fecha", inplace=True)
dfs.rename({0: "temp", 1: "hr", 2: "rad", 3: "vv"}, axis=1, inplace=True)
dfs
fig, ax = plt.subplots(1, 1, figsize=(10, 2))
fig.patch.set_facecolor("white")
ax.plot(dfs.loc[:"2016", var], label="Entrenamiento", c="tab:blue", linewidth="1.5")
ax.plot(dfs.loc["2017", var], label="Validación", c="tab:green", linewidth="1.5")
ax.plot(dfs.loc["2018":, var], label="Evaluación", c="tab:brown", linewidth="1.5")
if var == "temp":
ax.set_ylabel("Temp. Escalada")
elif var == "hr":
ax.set_ylabel("H. R. Escalada")
elif var == "rad":
ax.set_ylabel("Rad. Escalada")
else:
ax.set_ylabel("Vel. Viento Escalada")
ax.set_xlim(pd.to_datetime("2007"), pd.to_datetime("2021"))
plt.legend(loc=3)
plt.tick_params(labelsize=12)
plt.tight_layout()
plt.savefig("division_escalada.jpg", dpi=300, bbox_inches="tight")
plt.show
# Entrenamiento
train_s = dfs.loc[:"2016"]
train_s.reset_index(inplace=True)
# Validación
val_s = dfs.loc["2017"]
val_s.reset_index(inplace=True)
# Evaluación
test_s = dfs.loc["2018":]
test_s.reset_index(inplace=True)
# # **MODELOS UNIVARIADOS**
# # *1)* 24 Horas de Entrada
# # 1.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 1.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 1.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 24 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "6h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # *2)* 48 Horas de Entrada
# # 2.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 2.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 2.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 48 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12094 # Indice de Muestra a graficar
freq = "12h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # *3)* 120 Horas de Entrada
# # 3.1) *Unidades LSTM*: 32
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 32 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 3.2) *Unidades LSTM*: 64
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 64 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 300 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # 3.3) *Unidades LSTM*: 128
# ## Generación de secuencias
input_cols = ["Fecha", var] # Variables de Entrada
output_cols = ["Fecha", var] # Variables a predecir
input_seq_length = 120 # Pasado Historico. Input [H]
output_seq_length = 24 # Futuro de Predicción. Output [H]
sliding_steps = 1 # Pasos que recorre la ventana deslizante en la serie de tiempo [H]
get_sequences(
input_cols, output_cols, input_seq_length, output_seq_length, sliding_steps
)
# ## Definición de Modelo
hidden_size = 128 # Número de unidades LSTM
get_model(hidden_size)
plot_model(seq2seq, show_shapes=True, show_layer_names=True)
# ## Entrenamiento de Modelo
# Ajustes de entrenamiento
epochs = 200 # Número de epocas
batch_size = 2048 # Número de muestras en cada batch
verbose = 0 # Debugging
# Ajustes de Early Stopping
min_delta = 0.0001 # Cambio minimo considerado como mejora
patience = (
20 # Número de épocas sin mejora tras las cuales se detendrá el entrenamiento.
)
train_model(epochs, batch_size, min_delta, patience, verbose)
plot_history(history)
# ## Evaluación de Modelo
test_rmse = test_model()
# ## Grafica de Predicciones
sample = 55 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
sample = 12085 # Indice de Muestra a graficar
freq = "18h" # Frecuencia de divisiones en el eje de tiempo
width = 10 # Ancho de grafica
height = 3 # Alto de grafica
plot_var(sample, freq, width, height, var)
# ## Almacenamiento de metricas
dfmetrics = dfmetrics.append(
{
"input_size": input_seq_length,
"lstm_units": hidden_size,
"train_loss": history.history["loss"][-1],
"val_loss": history.history["val_loss"][-1],
"epochs": monitor.stopped_epoch,
"test_rmse": test_rmse,
},
ignore_index=True,
)
# # Dataframe de Métricas
dfmetrics
dfmetrics.to_csv("prediccion_radiacion_entrada_univariada.csv")
| false | 0 | 11,581 | 0 | 11,581 | 11,581 |
||
69270730
|
import os
from xml.etree import ElementTree
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather_df.head()
tree = ElementTree.parse(os.path.join(dataset_path, "holidays.xml"))
root = tree.getroot()
holidays = []
for row in root:
try:
holidays.append(row[0].text)
except Exception as ex:
pass
# # Exploratory Data Analysis
for col in df.columns:
if df[col].dtype in ["object", "bool"]:
print(df[col].value_counts())
print()
df["Severity"].value_counts()
weather_df.describe().T
for col in weather_df.columns:
if weather_df[col].dtype in ["object", "bool"]:
print(weather_df[col].value_counts(), "\n\n")
counts = weather_df["Weather_Condition"].value_counts()
less_occur = weather_df["Weather_Condition"].isin(counts[counts < 20].index)
weather_df["Weather_Condition"][less_occur] = "Other"
print(weather_df.value_counts("Weather_Condition"))
weather_df = weather_df.drop(
columns=["Selected", "Wind_Chill(F)", "Precipitation(in)"], axis=1
)
# # Merge & Data Manipulation
weather_df = weather_df.fillna(weather_df.mean())
weather_df = weather_df.fillna(weather_df.mode().iloc[0])
weather_df = weather_df.drop_duplicates(["Year", "Month", "Day", "Hour"])
def merge_weather(df):
merge_list = ["Year", "Month", "Day", "Hour"]
new_df = df.merge(weather_df, how="left", on=merge_list)
return new_df
def add_is_holiday(df):
df["holiday"] = df["timestamp"].dt.date.isin(holidays)
return df
def add_time_cols(df):
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["Year"] = df["timestamp"].apply(lambda x: x.year)
df["Month"] = df["timestamp"].apply(lambda x: x.month)
df["DayOfWeek"] = df["timestamp"].apply(lambda x: x.dayofweek)
df["Day"] = df["timestamp"].apply(lambda x: x.day)
df["Hour"] = df["timestamp"].apply(lambda x: x.hour)
return df
# # Pipline
encoder = OneHotEncoder(handle_unknown="ignore")
def pipline(df, my_list, is_train=False):
df = add_time_cols(df)
df = merge_weather(df)
df = add_is_holiday(df)
df = df[my_list]
object_cols = [col for col in df.columns if df[col].dtype == "object"]
if is_train:
encoder.fit(df[object_cols])
X = pd.DataFrame(
encoder.transform(df[object_cols]).toarray(),
columns=encoder.get_feature_names(object_cols),
)
df = df.drop(columns=object_cols)
df = pd.concat([df, X], axis=1)
return df
# # Model Selection
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
df, test_size=0.2, shuffle=True, random_state=42, stratify=df["Severity"]
)
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
# # Model Traning
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=42)
from itertools import combinations
cols = ["Side", "Year", "Humidity(%)"]
max_score = 0
best_features = []
best_classifier = None
for i in range(len(cols) + 1):
for j in combinations(cols, i):
my_list = ["Lat", "Lng", "Distance(mi)"]
my_list += list(j)
X_train_curr = pipline(X_train, my_list, True)
X_val_curr = pipline(X_val, my_list)
# Train the classifier
classifier = classifier.fit(X_train_curr, y_train)
score = classifier.score(X_val_curr, y_val)
if max_score < score:
max_score = score
best_features = my_list
best_classifier = classifier
print("The best accuracy of the classifier on the validation set is ", max_score)
print("Features List ", best_features)
classifier = best_classifier
plt.figure(figsize=(20, 10))
importances_rf = pd.Series(
classifier.feature_importances_, index=pipline(X_train, best_features).columns
)
sorted_importances_rf = importances_rf.sort_values()
sorted_importances_rf.plot(kind="barh", color="lightgreen")
plt.show()
# # Submission File Generation
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
X_test = test_df.drop(columns=["ID"])
X_test = pipline(X_test, best_features)
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270730.ipynb
| null | null |
[{"Id": 69270730, "ScriptId": 18876026, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3781557, "CreationDate": "07/28/2021 23:24:31", "VersionNumber": 4.0, "Title": "Car Crashes best", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 159.0, "LinesInsertedFromPrevious": 16.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 143.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
from xml.etree import ElementTree
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
dataset_path = "/kaggle/input/car-crashes-severity-prediction/"
df = pd.read_csv(os.path.join(dataset_path, "train.csv"))
print("The shape of the dataset is {}.\n\n".format(df.shape))
df.head()
weather_df = pd.read_csv(os.path.join(dataset_path, "weather-sfcsv.csv"))
weather_df.head()
tree = ElementTree.parse(os.path.join(dataset_path, "holidays.xml"))
root = tree.getroot()
holidays = []
for row in root:
try:
holidays.append(row[0].text)
except Exception as ex:
pass
# # Exploratory Data Analysis
for col in df.columns:
if df[col].dtype in ["object", "bool"]:
print(df[col].value_counts())
print()
df["Severity"].value_counts()
weather_df.describe().T
for col in weather_df.columns:
if weather_df[col].dtype in ["object", "bool"]:
print(weather_df[col].value_counts(), "\n\n")
counts = weather_df["Weather_Condition"].value_counts()
less_occur = weather_df["Weather_Condition"].isin(counts[counts < 20].index)
weather_df["Weather_Condition"][less_occur] = "Other"
print(weather_df.value_counts("Weather_Condition"))
weather_df = weather_df.drop(
columns=["Selected", "Wind_Chill(F)", "Precipitation(in)"], axis=1
)
# # Merge & Data Manipulation
weather_df = weather_df.fillna(weather_df.mean())
weather_df = weather_df.fillna(weather_df.mode().iloc[0])
weather_df = weather_df.drop_duplicates(["Year", "Month", "Day", "Hour"])
def merge_weather(df):
merge_list = ["Year", "Month", "Day", "Hour"]
new_df = df.merge(weather_df, how="left", on=merge_list)
return new_df
def add_is_holiday(df):
df["holiday"] = df["timestamp"].dt.date.isin(holidays)
return df
def add_time_cols(df):
df["timestamp"] = pd.to_datetime(df["timestamp"])
df["Year"] = df["timestamp"].apply(lambda x: x.year)
df["Month"] = df["timestamp"].apply(lambda x: x.month)
df["DayOfWeek"] = df["timestamp"].apply(lambda x: x.dayofweek)
df["Day"] = df["timestamp"].apply(lambda x: x.day)
df["Hour"] = df["timestamp"].apply(lambda x: x.hour)
return df
# # Pipline
encoder = OneHotEncoder(handle_unknown="ignore")
def pipline(df, my_list, is_train=False):
df = add_time_cols(df)
df = merge_weather(df)
df = add_is_holiday(df)
df = df[my_list]
object_cols = [col for col in df.columns if df[col].dtype == "object"]
if is_train:
encoder.fit(df[object_cols])
X = pd.DataFrame(
encoder.transform(df[object_cols]).toarray(),
columns=encoder.get_feature_names(object_cols),
)
df = df.drop(columns=object_cols)
df = pd.concat([df, X], axis=1)
return df
# # Model Selection
from sklearn.model_selection import train_test_split
train_df, val_df = train_test_split(
df, test_size=0.2, shuffle=True, random_state=42, stratify=df["Severity"]
)
X_train = train_df.drop(columns=["ID", "Severity"])
y_train = train_df["Severity"]
X_val = val_df.drop(columns=["ID", "Severity"])
y_val = val_df["Severity"]
# # Model Traning
from sklearn.ensemble import RandomForestClassifier
# Create an instance of the classifier
classifier = RandomForestClassifier(max_depth=2, random_state=42)
from itertools import combinations
cols = ["Side", "Year", "Humidity(%)"]
max_score = 0
best_features = []
best_classifier = None
for i in range(len(cols) + 1):
for j in combinations(cols, i):
my_list = ["Lat", "Lng", "Distance(mi)"]
my_list += list(j)
X_train_curr = pipline(X_train, my_list, True)
X_val_curr = pipline(X_val, my_list)
# Train the classifier
classifier = classifier.fit(X_train_curr, y_train)
score = classifier.score(X_val_curr, y_val)
if max_score < score:
max_score = score
best_features = my_list
best_classifier = classifier
print("The best accuracy of the classifier on the validation set is ", max_score)
print("Features List ", best_features)
classifier = best_classifier
plt.figure(figsize=(20, 10))
importances_rf = pd.Series(
classifier.feature_importances_, index=pipline(X_train, best_features).columns
)
sorted_importances_rf = importances_rf.sort_values()
sorted_importances_rf.plot(kind="barh", color="lightgreen")
plt.show()
# # Submission File Generation
test_df = pd.read_csv(os.path.join(dataset_path, "test.csv"))
test_df.head()
X_test = test_df.drop(columns=["ID"])
X_test = pipline(X_test, best_features)
y_test_predicted = classifier.predict(X_test)
test_df["Severity"] = y_test_predicted
test_df.head()
test_df[["ID", "Severity"]].to_csv("/kaggle/working/submission.csv", index=False)
| false | 0 | 1,526 | 0 | 1,526 | 1,526 |
||
69270381
|
# # **Análise Exploratória: Inadimplência**
# # **1\. Introdução ao problema**
# Estamos analisando uma base de dados de uma instituição financeira e queremos estudar as características de um cliente inadimplente, isto é, em quais circuntâncias este cliente deixa de arcar com suas dívidas quando comparados a clientes adimplentes. Nossa variável dependente será, portanto, aquela descrita pela coluna "Default" em que adimplentes são rotulados por '0' e inadimplentes por '1'.
# # **2\. Carregamento e Exploração dos Dados**
# Nesta estapa faremos o carregamento dos dados e uma análise exploratória incial com o intuito de obter mais informações a respeito de nosso dataset.
# importação de pacotes
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# carregamento dos dados
df = pd.read_csv(
"https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv",
na_values="na",
)
df.head()
qtd_total, _ = df.shape
qtd_adimplentes, _ = df[df["default"] == 0].shape
qtd_inadimplentes, _ = df[df["default"] == 1].shape
print(
f"A proporcão clientes adimplentes é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
# Analisando algumas informações gerais sobre o dataset
df.info()
# Note que as colunas **limite_credito** e **valor_transacoes_12m** estão sendo interpretadas como contendo valores categóricos (dtype = object) sendo que deveriam ser tratadas como contendo valores do tipo numérico. Além disso, já podemos perceber que algumas colunas possuem dados faltantes.
# Verificando quais colunas possuem dados faltantes
df.isna().any()
# Analisando as proporções de inadimplência e adimplência nas colunas para as quais existem dados faltantes
def stats_dados_faltantes(df: pd.DataFrame) -> None:
stats_dados_faltantes = []
for col in df.columns:
if df[col].isna().any():
qtd, _ = df[df[col].isna()].shape
total, _ = df.shape
dict_dados_faltantes = {
col: {"quantidade": qtd, "porcentagem": round(100 * qtd / total, 2)}
}
stats_dados_faltantes.append(dict_dados_faltantes)
for stat in stats_dados_faltantes:
print(stat)
stats_dados_faltantes(df=df)
stats_dados_faltantes(df=df[df["default"] == 0])
stats_dados_faltantes(df=df[df["default"] == 1])
# Note que as proporções dessas quantidades faltantes se mantêm aproximadamente iguais tanto no caso de inadimplência ('default'=1) quanto no caso de adimplência ('default'=0). Ou seja, ambas as classes são afetadas aproximadamente da mesma maneira pela ausência de dados.
# # **3\. Transformação e limpeza dos dados**
# Com a exploração iniciada acima, vemos que o primeiro passo a ser tomado consiste na transformação dos dados erroneamente classificados como categóricos em numéricos. Para isso, abaixo iremos transformar os valores presentes nessas colunas para o formato onde casas decimais são colocadas depois de um ponto, e não uma vírgula.
# Função para formatação adequada de casas decimais
fn = lambda valor: float(valor.replace(".", "").replace(",", "."))
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(fn)
df["limite_credito"] = df["limite_credito"].apply(fn)
# Vamos verificar agora se as colunas **limite_credito** e **valor_transacoes_12m** foram transformadas corretamente e também acessar algumas estatísticas relevantes das colunas numéricas.
df.drop(["id", "default"], axis=1).select_dtypes("number").describe().transpose()
# Vimos que as colunas foram transformadas corretamente para o tipo numérico. Agora, vamos tratar os dados faltantes. Como vimos na seção anterior, os dados ausentes se mantêm proporcionais para as duas classes de interesse (adimplentes e inadimplentes). Portanto, podemos simplesmente removê-los sem que isso introduza um desbalanceamento extra aos dados.
df.dropna(inplace=True)
nova_qtd_total, _ = df.shape
print(f"Quantidade de linhas retiradas: {qtd_total-nova_qtd_total}.")
# # **4\. Análise e Visualização dos Dados**
# Vamos agora analisar as características que podem ser indicadoras de inadimplência. Começaremos pelas quantidades categóricas e então analisaremos as quantidades numéricas. Note que não usaremos a coluna **sexo** do nosso dataset pois não queremos introduzir um viés de gênero em nossas análises.
# Visualização dados categóricos
sns.set_style("whitegrid")
df.replace({"default": {1: "inadimplente", 0: "adimplente"}}, inplace=True)
f = sns.catplot(x="escolaridade", data=df, hue="default", kind="count")
f.set(
title="Escolaridade: adimplentes x inadimplentes",
xlabel="Escolaridade",
ylabel="Frequência Absoluta",
)
f.set_xticklabels(rotation=45)
f = sns.catplot(x="salario_anual", data=df, hue="default", kind="count")
f.set(
title="Salário Anual: adimplentes x inadimplentes",
xlabel="Salário Anual",
ylabel="Frequência Absoluta",
)
f.set_xticklabels(rotation=45)
# Podemos observar acima que nem o salário anual e nem a escolaridade parecem ser características relevantes para selecionar clientes inadimplentes, dado que o tamanho das barras varia aproximadamente de maneira igual para ambas as classes. Interessante notar que, intuitivamente, o salário anual poderia ser considerado decisivo para a distinção entre adimplentes e inadimplentes e, no entanto, os dados não corroboram essa premissa.
# Visualização de dados numéricos
sns.set_style("whitegrid")
df_adimplente = df[df["default"] == "adimplente"]
df_inadimplente = df[df["default"] == "inadimplente"]
df.drop(["id", "default"], axis=1).select_dtypes("number").head()
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Aqui podemos observar uma distinção clara entre as duas classes de clientes: os adimplentes tem um pico de transações anuais entre 60 e 90, aproximadamente, enquanto os inadimplentes possuem um pico entre 30 e 60. Ou seja, clientes inadimplentes tendem a realizar menos transações. Nesse último caso, seria indicado que a instituição financeira buscasse alcançar estes clientes oferecendo uma possibilidade de negociação de juros.
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Vemos através destes gráficos que o valor das transações no último ano também parece ser uma característica relevante na distinção entre as classes de clientes. Os inadimplentes possuem um pico bem pronunciado para valores de transações aproximadamente entre 500,00 e 3.000,00.
# Como as duas características analisadas acima se mostraram relevantes na indicação de inadimplência, vamos visualizar a relação entre elas em um único gráfico com intuito de localizar nesse plano onde se localizam os clientes inadimplentes.
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
# Podemos verificar no gráfico acima que há um grupo concentrado de clientes adimplentes para altos valores de transação e alta quantidade de transações. Para tentar contornar inadimplência, a instituição deve se concentrar em acompanhar mais de perto clientes concentrados em dois grupos: aquele para o qual o número de transações do último ano está abaixo de 60 e cujo valor das transações se concentra em torno de 2.500,00 e aquele para o qual o número de transações do último ano está entre 60 e 80 e cujo valor das transações se concentra entre 7.000,00 e 10.000,00.
# Vamos analisar abaixo outra característica numérica que se mostrou relevante na distinção entre inadimplência e adimplência, a quantidade de produtos do cliente.
# Quantidade de produtos
coluna = "qtd_produtos"
titulos = [
"Quantidade de Produtos",
"Quantidade de Produtos: Adimplentes",
"Quantidade de Produtos: Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(dataframe, x=coluna, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270381.ipynb
| null | null |
[{"Id": 69270381, "ScriptId": 18453485, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6947527, "CreationDate": "07/28/2021 23:13:39", "VersionNumber": 12.0, "Title": "Projeto_Inadimplencia", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 204.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 202.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # **Análise Exploratória: Inadimplência**
# # **1\. Introdução ao problema**
# Estamos analisando uma base de dados de uma instituição financeira e queremos estudar as características de um cliente inadimplente, isto é, em quais circuntâncias este cliente deixa de arcar com suas dívidas quando comparados a clientes adimplentes. Nossa variável dependente será, portanto, aquela descrita pela coluna "Default" em que adimplentes são rotulados por '0' e inadimplentes por '1'.
# # **2\. Carregamento e Exploração dos Dados**
# Nesta estapa faremos o carregamento dos dados e uma análise exploratória incial com o intuito de obter mais informações a respeito de nosso dataset.
# importação de pacotes
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# carregamento dos dados
df = pd.read_csv(
"https://raw.githubusercontent.com/andre-marcos-perez/ebac-course-utils/develop/dataset/credito.csv",
na_values="na",
)
df.head()
qtd_total, _ = df.shape
qtd_adimplentes, _ = df[df["default"] == 0].shape
qtd_inadimplentes, _ = df[df["default"] == 1].shape
print(
f"A proporcão clientes adimplentes é de {round(100 * qtd_adimplentes / qtd_total, 2)}%"
)
print(
f"A proporcão clientes inadimplentes é de {round(100 * qtd_inadimplentes / qtd_total, 2)}%"
)
# Analisando algumas informações gerais sobre o dataset
df.info()
# Note que as colunas **limite_credito** e **valor_transacoes_12m** estão sendo interpretadas como contendo valores categóricos (dtype = object) sendo que deveriam ser tratadas como contendo valores do tipo numérico. Além disso, já podemos perceber que algumas colunas possuem dados faltantes.
# Verificando quais colunas possuem dados faltantes
df.isna().any()
# Analisando as proporções de inadimplência e adimplência nas colunas para as quais existem dados faltantes
def stats_dados_faltantes(df: pd.DataFrame) -> None:
stats_dados_faltantes = []
for col in df.columns:
if df[col].isna().any():
qtd, _ = df[df[col].isna()].shape
total, _ = df.shape
dict_dados_faltantes = {
col: {"quantidade": qtd, "porcentagem": round(100 * qtd / total, 2)}
}
stats_dados_faltantes.append(dict_dados_faltantes)
for stat in stats_dados_faltantes:
print(stat)
stats_dados_faltantes(df=df)
stats_dados_faltantes(df=df[df["default"] == 0])
stats_dados_faltantes(df=df[df["default"] == 1])
# Note que as proporções dessas quantidades faltantes se mantêm aproximadamente iguais tanto no caso de inadimplência ('default'=1) quanto no caso de adimplência ('default'=0). Ou seja, ambas as classes são afetadas aproximadamente da mesma maneira pela ausência de dados.
# # **3\. Transformação e limpeza dos dados**
# Com a exploração iniciada acima, vemos que o primeiro passo a ser tomado consiste na transformação dos dados erroneamente classificados como categóricos em numéricos. Para isso, abaixo iremos transformar os valores presentes nessas colunas para o formato onde casas decimais são colocadas depois de um ponto, e não uma vírgula.
# Função para formatação adequada de casas decimais
fn = lambda valor: float(valor.replace(".", "").replace(",", "."))
df["valor_transacoes_12m"] = df["valor_transacoes_12m"].apply(fn)
df["limite_credito"] = df["limite_credito"].apply(fn)
# Vamos verificar agora se as colunas **limite_credito** e **valor_transacoes_12m** foram transformadas corretamente e também acessar algumas estatísticas relevantes das colunas numéricas.
df.drop(["id", "default"], axis=1).select_dtypes("number").describe().transpose()
# Vimos que as colunas foram transformadas corretamente para o tipo numérico. Agora, vamos tratar os dados faltantes. Como vimos na seção anterior, os dados ausentes se mantêm proporcionais para as duas classes de interesse (adimplentes e inadimplentes). Portanto, podemos simplesmente removê-los sem que isso introduza um desbalanceamento extra aos dados.
df.dropna(inplace=True)
nova_qtd_total, _ = df.shape
print(f"Quantidade de linhas retiradas: {qtd_total-nova_qtd_total}.")
# # **4\. Análise e Visualização dos Dados**
# Vamos agora analisar as características que podem ser indicadoras de inadimplência. Começaremos pelas quantidades categóricas e então analisaremos as quantidades numéricas. Note que não usaremos a coluna **sexo** do nosso dataset pois não queremos introduzir um viés de gênero em nossas análises.
# Visualização dados categóricos
sns.set_style("whitegrid")
df.replace({"default": {1: "inadimplente", 0: "adimplente"}}, inplace=True)
f = sns.catplot(x="escolaridade", data=df, hue="default", kind="count")
f.set(
title="Escolaridade: adimplentes x inadimplentes",
xlabel="Escolaridade",
ylabel="Frequência Absoluta",
)
f.set_xticklabels(rotation=45)
f = sns.catplot(x="salario_anual", data=df, hue="default", kind="count")
f.set(
title="Salário Anual: adimplentes x inadimplentes",
xlabel="Salário Anual",
ylabel="Frequência Absoluta",
)
f.set_xticklabels(rotation=45)
# Podemos observar acima que nem o salário anual e nem a escolaridade parecem ser características relevantes para selecionar clientes inadimplentes, dado que o tamanho das barras varia aproximadamente de maneira igual para ambas as classes. Interessante notar que, intuitivamente, o salário anual poderia ser considerado decisivo para a distinção entre adimplentes e inadimplentes e, no entanto, os dados não corroboram essa premissa.
# Visualização de dados numéricos
sns.set_style("whitegrid")
df_adimplente = df[df["default"] == "adimplente"]
df_inadimplente = df[df["default"] == "inadimplente"]
df.drop(["id", "default"], axis=1).select_dtypes("number").head()
coluna = "qtd_transacoes_12m"
titulos = [
"Qtd. de Transações no Último Ano",
"Qtd. de Transações no Último Ano de Adimplentes",
"Qtd. de Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Aqui podemos observar uma distinção clara entre as duas classes de clientes: os adimplentes tem um pico de transações anuais entre 60 e 90, aproximadamente, enquanto os inadimplentes possuem um pico entre 30 e 60. Ou seja, clientes inadimplentes tendem a realizar menos transações. Nesse último caso, seria indicado que a instituição financeira buscasse alcançar estes clientes oferecendo uma possibilidade de negociação de juros.
coluna = "valor_transacoes_12m"
titulos = [
"Valor das Transações no Último Ano",
"Valor das Transações no Último Ano de Adimplentes",
"Valor das Transações no Último Ano de Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(x=coluna, data=dataframe, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
# Vemos através destes gráficos que o valor das transações no último ano também parece ser uma característica relevante na distinção entre as classes de clientes. Os inadimplentes possuem um pico bem pronunciado para valores de transações aproximadamente entre 500,00 e 3.000,00.
# Como as duas características analisadas acima se mostraram relevantes na indicação de inadimplência, vamos visualizar a relação entre elas em um único gráfico com intuito de localizar nesse plano onde se localizam os clientes inadimplentes.
f = sns.relplot(
x="valor_transacoes_12m", y="qtd_transacoes_12m", data=df, hue="default"
)
_ = f.set(
title="Relação entre Valor e Quantidade de Transações no Último Ano",
xlabel="Valor das Transações no Último Ano",
ylabel="Quantidade das Transações no Último Ano",
)
# Podemos verificar no gráfico acima que há um grupo concentrado de clientes adimplentes para altos valores de transação e alta quantidade de transações. Para tentar contornar inadimplência, a instituição deve se concentrar em acompanhar mais de perto clientes concentrados em dois grupos: aquele para o qual o número de transações do último ano está abaixo de 60 e cujo valor das transações se concentra em torno de 2.500,00 e aquele para o qual o número de transações do último ano está entre 60 e 80 e cujo valor das transações se concentra entre 7.000,00 e 10.000,00.
# Vamos analisar abaixo outra característica numérica que se mostrou relevante na distinção entre inadimplência e adimplência, a quantidade de produtos do cliente.
# Quantidade de produtos
coluna = "qtd_produtos"
titulos = [
"Quantidade de Produtos",
"Quantidade de Produtos: Adimplentes",
"Quantidade de Produtos: Inadimplentes",
]
eixo = 0
max_y = 0
figura, eixos = plt.subplots(1, 3, figsize=(20, 5), sharex=True)
for dataframe in [df, df_adimplente, df_inadimplente]:
f = sns.histplot(dataframe, x=coluna, stat="count", ax=eixos[eixo])
f.set(title=titulos[eixo], xlabel=coluna.capitalize(), ylabel="Frequência Absoluta")
_, max_y_f = f.get_ylim()
max_y = max_y_f if max_y_f > max_y else max_y
f.set(ylim=(0, max_y))
eixo += 1
figura.show()
| false | 0 | 3,289 | 0 | 3,289 | 3,289 |
||
69270626
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
train_df = pd.read_csv("../input/titanic/train.csv")
test_df = pd.read_csv("../input/titanic/test.csv")
train_df.head()
train_df.info()
train_df.describe()
# Finding numerical and categorical data column names
num_variables = [i for i in train_df.columns if train_df.dtypes[i] != "object"]
cat_variables = [i for i in train_df.columns if train_df.dtypes[i] == "object"]
num_variables
cat_variables
train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
g = sns.FacetGrid(train_df, col="Survived")
g.map(plt.hist, "Age", bins=20)
# * Infants (Age <=4) had high survival rate.
# * Oldest passengers (Age = 80) survived.
# * Large number of 15-25 year olds did not survive.
grid = sns.FacetGrid(train_df, col="Survived", row="Pclass", size=2.2, aspect=1.6)
grid.map(plt.hist, "Age", alpha=0.5, bins=20)
grid.add_legend()
# * Most passengers in Pclass=1 survived
# * Infant passengers in Pclass=2 and Pclass=3 mostly survived
# * Pclass=3 had most passengers, however most did not survive
def show_missing(df):
# Shows percentage of null values in each column
pd.options.display.max_rows = None
display(((df.isnull().sum() / len(df)) * 100))
show_missing(train_df)
show_missing(test_df)
train_df = train_df.drop(["Ticket", "Cabin"], axis=1)
test_df = test_df.drop(["Ticket", "Cabin"], axis=1)
combine = [train_df, test_df]
train_df["Title"] = train_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(train_df["Title"], train_df["Sex"])
train_df["Title"] = train_df["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
train_df["Title"] = train_df["Title"].replace("Mlle", "Miss")
train_df["Title"] = train_df["Title"].replace("Ms", "Miss")
train_df["Title"] = train_df["Title"].replace("Mme", "Mrs")
train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean()
test_df["Title"] = test_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(test_df["Title"], test_df["Sex"])
test_df["Title"] = test_df["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
test_df["Title"] = test_df["Title"].replace("Mlle", "Miss")
test_df["Title"] = test_df["Title"].replace("Ms", "Miss")
test_df["Title"] = test_df["Title"].replace("Mme", "Mrs")
le = LabelEncoder()
le.fit(train_df["Title"])
train_df["Title"] = le.transform(train_df["Title"])
test_df["Title"] = le.transform(test_df["Title"])
train_df = train_df.drop(["Name", "PassengerId"], axis=1)
test_df = test_df.drop(["Name"], axis=1)
combine = [train_df, test_df]
le_sex = LabelEncoder()
le_sex.fit(train_df["Sex"])
train_df["Sex"] = le_sex.transform(train_df["Sex"])
test_df["Sex"] = le_sex.transform(test_df["Sex"])
train_df["FamilySize"] = train_df["SibSp"] + train_df["Parch"] + 1
test_df["FamilySize"] = test_df["SibSp"] + test_df["Parch"] + 1
show_missing(train_df)
train_df["Age"].fillna(train_df["Age"].mean(), inplace=True)
test_df["Age"].fillna(train_df["Age"].mean(), inplace=True)
show_missing(train_df)
train_df = train_df.dropna()
test_df = test_df.dropna()
le_emb = LabelEncoder()
le_emb.fit(train_df["Embarked"])
train_df["Embarked"] = le_emb.transform(train_df["Embarked"])
test_df["Embarked"] = le_emb.transform(test_df["Embarked"])
train_df.info()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_df.drop(["Survived"], axis=1),
train_df["Survived"],
test_size=0.2,
random_state=0,
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
logreg = LogisticRegression(solver="liblinear")
logreg.fit(X_train, y_train)
# Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_test, y_test) * 100, 2)
acc_log
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
acc_knn = round(knn.score(X_test, y_test) * 100, 2)
acc_knn
test_df.info()
predictions = knn.predict(test_df.drop(["PassengerId"], axis=1))
sample_submission = pd.DataFrame(
{"PassengerId": np.asarray(test_df.PassengerId), "Survived": predictions}
)
sample_submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270626.ipynb
| null | null |
[{"Id": 69270626, "ScriptId": 18910065, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4428295, "CreationDate": "07/28/2021 23:21:32", "VersionNumber": 1.0, "Title": "notebooke284cc16d3", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 143.0, "LinesInsertedFromPrevious": 143.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
train_df = pd.read_csv("../input/titanic/train.csv")
test_df = pd.read_csv("../input/titanic/test.csv")
train_df.head()
train_df.info()
train_df.describe()
# Finding numerical and categorical data column names
num_variables = [i for i in train_df.columns if train_df.dtypes[i] != "object"]
cat_variables = [i for i in train_df.columns if train_df.dtypes[i] == "object"]
num_variables
cat_variables
train_df[["Pclass", "Survived"]].groupby(["Pclass"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Sex", "Survived"]].groupby(["Sex"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["SibSp", "Survived"]].groupby(["SibSp"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
train_df[["Parch", "Survived"]].groupby(["Parch"], as_index=False).mean().sort_values(
by="Survived", ascending=False
)
g = sns.FacetGrid(train_df, col="Survived")
g.map(plt.hist, "Age", bins=20)
# * Infants (Age <=4) had high survival rate.
# * Oldest passengers (Age = 80) survived.
# * Large number of 15-25 year olds did not survive.
grid = sns.FacetGrid(train_df, col="Survived", row="Pclass", size=2.2, aspect=1.6)
grid.map(plt.hist, "Age", alpha=0.5, bins=20)
grid.add_legend()
# * Most passengers in Pclass=1 survived
# * Infant passengers in Pclass=2 and Pclass=3 mostly survived
# * Pclass=3 had most passengers, however most did not survive
def show_missing(df):
# Shows percentage of null values in each column
pd.options.display.max_rows = None
display(((df.isnull().sum() / len(df)) * 100))
show_missing(train_df)
show_missing(test_df)
train_df = train_df.drop(["Ticket", "Cabin"], axis=1)
test_df = test_df.drop(["Ticket", "Cabin"], axis=1)
combine = [train_df, test_df]
train_df["Title"] = train_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(train_df["Title"], train_df["Sex"])
train_df["Title"] = train_df["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
train_df["Title"] = train_df["Title"].replace("Mlle", "Miss")
train_df["Title"] = train_df["Title"].replace("Ms", "Miss")
train_df["Title"] = train_df["Title"].replace("Mme", "Mrs")
train_df[["Title", "Survived"]].groupby(["Title"], as_index=False).mean()
test_df["Title"] = test_df.Name.str.extract(" ([A-Za-z]+)\.", expand=False)
pd.crosstab(test_df["Title"], test_df["Sex"])
test_df["Title"] = test_df["Title"].replace(
[
"Lady",
"Countess",
"Capt",
"Col",
"Don",
"Dr",
"Major",
"Rev",
"Sir",
"Jonkheer",
"Dona",
],
"Rare",
)
test_df["Title"] = test_df["Title"].replace("Mlle", "Miss")
test_df["Title"] = test_df["Title"].replace("Ms", "Miss")
test_df["Title"] = test_df["Title"].replace("Mme", "Mrs")
le = LabelEncoder()
le.fit(train_df["Title"])
train_df["Title"] = le.transform(train_df["Title"])
test_df["Title"] = le.transform(test_df["Title"])
train_df = train_df.drop(["Name", "PassengerId"], axis=1)
test_df = test_df.drop(["Name"], axis=1)
combine = [train_df, test_df]
le_sex = LabelEncoder()
le_sex.fit(train_df["Sex"])
train_df["Sex"] = le_sex.transform(train_df["Sex"])
test_df["Sex"] = le_sex.transform(test_df["Sex"])
train_df["FamilySize"] = train_df["SibSp"] + train_df["Parch"] + 1
test_df["FamilySize"] = test_df["SibSp"] + test_df["Parch"] + 1
show_missing(train_df)
train_df["Age"].fillna(train_df["Age"].mean(), inplace=True)
test_df["Age"].fillna(train_df["Age"].mean(), inplace=True)
show_missing(train_df)
train_df = train_df.dropna()
test_df = test_df.dropna()
le_emb = LabelEncoder()
le_emb.fit(train_df["Embarked"])
train_df["Embarked"] = le_emb.transform(train_df["Embarked"])
test_df["Embarked"] = le_emb.transform(test_df["Embarked"])
train_df.info()
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
train_df.drop(["Survived"], axis=1),
train_df["Survived"],
test_size=0.2,
random_state=0,
)
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
logreg = LogisticRegression(solver="liblinear")
logreg.fit(X_train, y_train)
# Y_pred = logreg.predict(X_test)
acc_log = round(logreg.score(X_test, y_test) * 100, 2)
acc_log
knn = KNeighborsClassifier(n_neighbors=3)
knn.fit(X_train, y_train)
acc_knn = round(knn.score(X_test, y_test) * 100, 2)
acc_knn
test_df.info()
predictions = knn.predict(test_df.drop(["PassengerId"], axis=1))
sample_submission = pd.DataFrame(
{"PassengerId": np.asarray(test_df.PassengerId), "Survived": predictions}
)
sample_submission.to_csv("submission.csv", index=False)
| false | 0 | 1,825 | 0 | 1,825 | 1,825 |
||
69270885
|
<jupyter_start><jupyter_text>clrp_external
Kaggle dataset identifier: clrp-external
<jupyter_script># # [ReadNet is SOTA for WeeBit](https://paperswithcode.com/sota/text-classification-on-weebit-readability) (Readability Assessment dataset).
# # Below code is ReadNet trained by Weebit so that I can finetune this model by using commonlit data.
import torch
import numpy as np
from torch import Tensor, nn, tensor
import math
import pandas as pd
import csv
import nltk
nltk.download("punkt")
from nltk.tokenize import sent_tokenize, word_tokenize
from pathlib import Path
from fastai.vision.all import *
from fastai.text.all import *
# Make sure to have your glove embeddings stored here
root_dir = "."
## MODEL CODE ##
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, masked):
super().__init__()
assert d_model % num_heads == 0, "num_heads must evenly chunk d_model"
self.num_heads = num_heads
self.wq = nn.Linear(d_model, d_model, bias=False) # QQ what if bias=True?
self.wk = nn.Linear(d_model, d_model, bias=False)
self.wv = nn.Linear(d_model, d_model, bias=False)
self.masked = masked
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
qs = self.wq(q).chunk(self.num_heads, dim=2)
ks = self.wk(k).chunk(self.num_heads, dim=2)
vs = self.wv(v).chunk(self.num_heads, dim=2)
outs = []
# TODO Use einsum instead of for loop
for qi, ki, vi in zip(qs, ks, vs):
attns = qi.bmm(ki.transpose(1, 2)) / (ki.shape[2] ** 0.5)
if self.masked:
attns = attns.tril() # Zero out upper triangle so it can't look ahead
attns = self.softmax(attns)
outs.append(attns.bmm(vi))
return torch.cat(outs, dim=2)
class AddNorm(nn.Module):
def __init__(self, d_model):
super().__init__()
self.ln = nn.LayerNorm(d_model)
def forward(self, x1, x2):
return self.ln(x1 + x2)
class FeedForward(nn.Module):
def __init__(self, d_model):
super().__init__()
self.l1 = nn.Linear(d_model, d_model)
self.relu = nn.ReLU()
self.l2 = nn.Linear(d_model, d_model)
def forward(self, x):
return self.l2(self.relu(self.l1(x)))
def pos_encode(x):
pos, dim = torch.meshgrid(torch.arange(x.shape[1]), torch.arange(x.shape[2]))
dim = 2 * (dim // 2)
enc_base = pos / (10_000 ** (dim / x.shape[2]))
addition = torch.zeros_like(x)
for d in range(x.shape[2]):
enc_func = torch.sin if d % 2 == 0 else torch.cos
addition[:, :, d] = enc_func(enc_base[:, d])
if x.is_cuda:
addition = addition.cuda()
return x + addition
class EncoderBlock(nn.Module):
def __init__(self, d_model, num_heads):
super().__init__()
self.mha = MultiHeadAttention(
d_model=d_model, num_heads=num_heads, masked=False
)
self.an1 = AddNorm(d_model)
self.ff = FeedForward(d_model)
self.an2 = AddNorm(d_model)
def forward(self, x):
x = self.an1(x, self.mha(q=x, k=x, v=x))
return self.an2(x, self.ff(x))
class AttentionAggregation(nn.Module):
def __init__(self, d_model):
super().__init__()
self.query = nn.Linear(d_model, 1, bias=False)
def forward(self, x): # (b, s, m)
attns = self.query(x).softmax(dim=1) # (b, s, 1)
enc = torch.bmm(attns.transpose(1, 2), x) # (b, 1, m)
return enc.squeeze(1)
class LinTanh(nn.Module):
def __init__(self, d_model):
super().__init__()
self.lin = nn.Linear(d_model, d_model)
self.tanh = nn.Tanh()
def forward(self, x):
return self.tanh(self.lin(x))
class LinFeatConcat(nn.Module):
def __init__(self, d_model, n_feats, n_out):
super().__init__()
self.lin = nn.Linear(d_model + n_feats, n_out, bias=False) # TODO what if True?
def forward(self, x, feats):
return self.lin(torch.cat([x, feats], dim=1))
class ReadNetBlock(nn.Module):
def __init__(self, d_model, n_heads, n_blocks, n_feats, n_out):
super().__init__()
self.blocks = nn.Sequential(
*[EncoderBlock(d_model=d_model, num_heads=n_heads) for _ in range(n_blocks)]
)
self.lin_tanh = LinTanh(d_model=d_model)
self.attn_agg = AttentionAggregation(d_model=d_model)
self.lin_feat_concat = LinFeatConcat(
d_model=d_model, n_feats=n_feats, n_out=n_out
)
def forward(self, x, feats): # (b, s, m), (b, f)
x = pos_encode(x)
x = self.blocks(x)
x = self.lin_tanh(x)
x = self.attn_agg(x)
return self.lin_feat_concat(x, feats)
class GloveEmbedding(nn.Module):
def __init__(self, num):
super().__init__()
# Make embedding
self.embed = nn.Embedding(400_000 + 1, num)
# found GloveEmbedding on kaggle and set here.
emb_w = (
pd.read_csv(
"../input/glove-embeddings/glove.6B.200d.txt",
header=None,
sep=" ",
quoting=csv.QUOTE_NONE,
)
.values[:, 1:]
.astype("float64")
)
emb_w = Tensor(emb_w)
emb_w = torch.cat([emb_w, torch.zeros(1, num)], dim=0)
self.embed.weight = nn.Parameter(emb_w)
def forward(self, x):
return self.embed(x.to(torch.long))
class ReadNet(nn.Module):
def __init__(self, embed, d_model, n_heads, n_blocks, n_feats_sent, n_feats_doc):
super().__init__()
self.embed = embed
self.sent_block = ReadNetBlock(
d_model=d_model,
n_heads=n_heads,
n_blocks=n_blocks,
n_feats=n_feats_sent,
n_out=d_model,
)
self.doc_block = ReadNetBlock(
d_model=d_model,
n_heads=n_heads,
n_blocks=n_blocks,
n_feats=n_feats_doc,
n_out=d_model + n_feats_doc,
)
self.head = nn.Sequential(
nn.Linear(d_model + n_feats_doc, 1),
)
def forward(
self, x, feats_sent=None, feats_doc=None
): # (b, d, s) tokens, (b, d, n_f_s), (b, n_f_d)
if feats_sent is None:
feats_sent = Tensor([])
if feats_doc is None:
feats_doc = Tensor([])
if x.is_cuda:
feats_sent = feats_sent.cuda()
feats_doc = feats_doc.cuda()
x = self.embed(x)
b, d, s, m = x.shape
x = x.reshape(b * d, s, m)
sents_enc = self.sent_block(x, feats_sent.reshape(b * d, -1)) # (b*d, m)
docs = sents_enc.reshape(b, d, m)
docs_enc = self.doc_block(docs, feats_doc)
out = self.head(docs_enc)
return out.squeeze(1)
## DATA PREPARATION ##
class GloveTokenizer:
def __init__(self, num):
# found GloveEmbedding on kaggle and set here.
words = pd.read_csv(
"../input/glove-embeddings/glove.6B.200d.txt",
header=None,
sep=" ",
quoting=csv.QUOTE_NONE,
usecols=[0],
).values
words = [word[0] for word in words]
self.word2idx = {w: i for i, w in enumerate(words)}
def __call__(self, sent):
toks = [self.word2idx.get(w.lower()) for w in word_tokenize(sent)]
return [self.unk_token if t is None else t for t in toks]
@property
def unk_token(self):
return (
400_000 # We appended this to the end of the embedding to return all zeros
)
@property
def pad_token(self):
return self.unk_token # Seems that this is the best option for GLOVE
def prepare_txts(txts, tokenizer):
# Input: (bs,) str, Output: (bs, max_doc_len, max_sent_len)
# We choose to elongate all docs and sentences to the max rather than truncate some of them
# TODO: Do this better later:
# (1) Truncate smartly (if there is one very long outlier sentence or doc)
# (2) Group together docs of similar lengths (in terms of num_sents)
docs = [[tokenizer(sent) for sent in sent_tokenize(txt)] for txt in txts]
# pkl_save(root_dir/"doc_lens", pd.Series([len(doc) for doc in docs]))
max_doc_len = max([len(doc) for doc in docs])
docs = [doc + [[]] * (max_doc_len - len(doc)) for doc in docs]
# pkl_save(root_dir/"sent_lens", pd.Series([len(sent) for doc in docs for sent in doc]))
max_sent_len = max([len(sent) for doc in docs for sent in doc])
docs = [
[s + [tokenizer.pad_token] * (max_sent_len - len(s)) for s in doc]
for doc in docs
]
return Tensor(docs)
def prepare_txts_cut(txts, tokenizer, max_doc_len=18, max_sent_len=49):
docs = [
[tokenizer(sent)[:max_sent_len] for sent in sent_tokenize(txt)[:max_doc_len]]
for txt in txts
]
docs = [doc + [[]] * (max_doc_len - len(doc)) for doc in docs]
docs = [
[s + [tokenizer.pad_token] * (max_sent_len - len(s)) for s in doc]
for doc in docs
]
return Tensor(docs)
# # Preprocess
# below code is from [How To: Preprocessing for GloVe Part2: Usage](https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage)
symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁'
from nltk.tokenize.treebank import TreebankWordTokenizer
tb_tokenizer = TreebankWordTokenizer()
isolate_dict = {ord(c): f" {c} " for c in symbols_to_isolate}
remove_dict = {ord(c): f"" for c in symbols_to_delete}
def handle_punctuation(x):
x = x.translate(remove_dict)
x = x.translate(isolate_dict)
return x
def handle_contractions(x):
x = tb_tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = " ".join(x)
return x
def preprocess(x):
x = handle_punctuation(x)
x = handle_contractions(x)
x = fix_quote(x)
return x
data = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
cleaned_text = []
for text in data["excerpt"]:
cleaned_text.append(preprocess(text))
data["cleaned_text"] = cleaned_text
## TRAIN ## (using fastai)
tokenizer = GloveTokenizer(200)
embed = GloveEmbedding(200)
def get_splits(data):
num = len(data)
idx = list(range(num))
random.seed(42)
random.shuffle(idx)
split = int(num * 0.9)
return idx[:split], idx[split:]
def get_dls(bs):
txts = data.cleaned_text.tolist()
x = prepare_txts_cut(txts, tokenizer)
# changed form data.readability.tolist()
y = data.target.tolist()
ds = TfmdLists(
zip(x, y),
tfms=[],
splits=get_splits(data),
)
dls = ds.dataloaders(batch_size=bs)
return dls
def get_model():
# changed the dimention from d_model=200 because of glove.6B.100d.txt for GloveEmbedding.
readnet = ReadNet(
embed=embed,
d_model=200,
n_heads=4,
n_blocks=6,
n_feats_sent=0,
n_feats_doc=0,
)
readnet = readnet.cuda()
# Automatically freeze the embedding. We should not be learning this
for p in readnet.embed.parameters():
p.requires_grad = False
return readnet
metrics = [rmse]
learn = Learner(
dls=get_dls(32), model=get_model(), metrics=metrics, loss_func=MSELossFlat()
)
learn.lr_find()
# Result MSE is about 0.40
# Momentum=0.9 improved training. Weight decay didn't work well
cbs = [
SaveModelCallback(
monitor="_rmse", fname="model_0", comp=np.less, reset_on_fit=False
),
GradientAccumulation(32),
]
learn.fit_one_cycle(50, 3e-5, moms=(0.9, 0.9, 0.9), cbs=cbs)
# # Inference
test_df = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
cleaned_text = []
for text in test_df["excerpt"]:
cleaned_text.append(preprocess(text))
test_df["cleaned_text"] = cleaned_text
test_txts = test_df.cleaned_text.tolist()
test_cut_txts = prepare_txts_cut(test_txts, tokenizer)
test_cut_txts_zip = zip(test_cut_txts, [0 for i in range(len(test_cut_txts))])
test_dl = learn.dls.test_dl(test_cut_txts_zip, 128)
preds, _ = learn.get_preds(dl=test_dl)
pred_df = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
pred_df.target = preds
pred_df.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270885.ipynb
|
clrp-external
|
maunish
|
[{"Id": 69270885, "ScriptId": 18911022, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3067037, "CreationDate": "07/28/2021 23:28:26", "VersionNumber": 1.0, "Title": "CommonLit ReadNet using fastai", "EvaluationDate": "07/28/2021", "IsChange": true, "TotalLines": 345.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 328.0, "LinesInsertedFromFork": 17.0, "LinesDeletedFromFork": 6.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 328.0, "TotalVotes": 0}]
|
[{"Id": 92236080, "KernelVersionId": 69270885, "SourceDatasetVersionId": 2296087}, {"Id": 92236079, "KernelVersionId": 69270885, "SourceDatasetVersionId": 1483651}]
|
[{"Id": 2296087, "DatasetId": 1382433, "DatasourceVersionId": 2337342, "CreatorUserId": 2911143, "LicenseName": "Unknown", "CreationDate": "06/02/2021 12:09:15", "VersionNumber": 3.0, "Title": "clrp_external", "Slug": "clrp-external", "Subtitle": NaN, "Description": NaN, "VersionNotes": "scroll readability", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1382433, "CreatorUserId": 2911143, "OwnerUserId": 2911143.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2296087.0, "CurrentDatasourceVersionId": 2337342.0, "ForumId": 1401627, "Type": 2, "CreationDate": "06/01/2021 15:28:46", "LastActivityDate": "06/01/2021", "TotalViews": 1327, "TotalDownloads": 61, "TotalVotes": 1, "TotalKernels": 2}]
|
[{"Id": 2911143, "UserName": "maunish", "DisplayName": "Maunish dave", "RegisterDate": "03/08/2019", "PerformanceTier": 3}]
|
# # [ReadNet is SOTA for WeeBit](https://paperswithcode.com/sota/text-classification-on-weebit-readability) (Readability Assessment dataset).
# # Below code is ReadNet trained by Weebit so that I can finetune this model by using commonlit data.
import torch
import numpy as np
from torch import Tensor, nn, tensor
import math
import pandas as pd
import csv
import nltk
nltk.download("punkt")
from nltk.tokenize import sent_tokenize, word_tokenize
from pathlib import Path
from fastai.vision.all import *
from fastai.text.all import *
# Make sure to have your glove embeddings stored here
root_dir = "."
## MODEL CODE ##
class MultiHeadAttention(nn.Module):
def __init__(self, d_model, num_heads, masked):
super().__init__()
assert d_model % num_heads == 0, "num_heads must evenly chunk d_model"
self.num_heads = num_heads
self.wq = nn.Linear(d_model, d_model, bias=False) # QQ what if bias=True?
self.wk = nn.Linear(d_model, d_model, bias=False)
self.wv = nn.Linear(d_model, d_model, bias=False)
self.masked = masked
self.softmax = nn.Softmax(dim=2)
def forward(self, q, k, v):
qs = self.wq(q).chunk(self.num_heads, dim=2)
ks = self.wk(k).chunk(self.num_heads, dim=2)
vs = self.wv(v).chunk(self.num_heads, dim=2)
outs = []
# TODO Use einsum instead of for loop
for qi, ki, vi in zip(qs, ks, vs):
attns = qi.bmm(ki.transpose(1, 2)) / (ki.shape[2] ** 0.5)
if self.masked:
attns = attns.tril() # Zero out upper triangle so it can't look ahead
attns = self.softmax(attns)
outs.append(attns.bmm(vi))
return torch.cat(outs, dim=2)
class AddNorm(nn.Module):
def __init__(self, d_model):
super().__init__()
self.ln = nn.LayerNorm(d_model)
def forward(self, x1, x2):
return self.ln(x1 + x2)
class FeedForward(nn.Module):
def __init__(self, d_model):
super().__init__()
self.l1 = nn.Linear(d_model, d_model)
self.relu = nn.ReLU()
self.l2 = nn.Linear(d_model, d_model)
def forward(self, x):
return self.l2(self.relu(self.l1(x)))
def pos_encode(x):
pos, dim = torch.meshgrid(torch.arange(x.shape[1]), torch.arange(x.shape[2]))
dim = 2 * (dim // 2)
enc_base = pos / (10_000 ** (dim / x.shape[2]))
addition = torch.zeros_like(x)
for d in range(x.shape[2]):
enc_func = torch.sin if d % 2 == 0 else torch.cos
addition[:, :, d] = enc_func(enc_base[:, d])
if x.is_cuda:
addition = addition.cuda()
return x + addition
class EncoderBlock(nn.Module):
def __init__(self, d_model, num_heads):
super().__init__()
self.mha = MultiHeadAttention(
d_model=d_model, num_heads=num_heads, masked=False
)
self.an1 = AddNorm(d_model)
self.ff = FeedForward(d_model)
self.an2 = AddNorm(d_model)
def forward(self, x):
x = self.an1(x, self.mha(q=x, k=x, v=x))
return self.an2(x, self.ff(x))
class AttentionAggregation(nn.Module):
def __init__(self, d_model):
super().__init__()
self.query = nn.Linear(d_model, 1, bias=False)
def forward(self, x): # (b, s, m)
attns = self.query(x).softmax(dim=1) # (b, s, 1)
enc = torch.bmm(attns.transpose(1, 2), x) # (b, 1, m)
return enc.squeeze(1)
class LinTanh(nn.Module):
def __init__(self, d_model):
super().__init__()
self.lin = nn.Linear(d_model, d_model)
self.tanh = nn.Tanh()
def forward(self, x):
return self.tanh(self.lin(x))
class LinFeatConcat(nn.Module):
def __init__(self, d_model, n_feats, n_out):
super().__init__()
self.lin = nn.Linear(d_model + n_feats, n_out, bias=False) # TODO what if True?
def forward(self, x, feats):
return self.lin(torch.cat([x, feats], dim=1))
class ReadNetBlock(nn.Module):
def __init__(self, d_model, n_heads, n_blocks, n_feats, n_out):
super().__init__()
self.blocks = nn.Sequential(
*[EncoderBlock(d_model=d_model, num_heads=n_heads) for _ in range(n_blocks)]
)
self.lin_tanh = LinTanh(d_model=d_model)
self.attn_agg = AttentionAggregation(d_model=d_model)
self.lin_feat_concat = LinFeatConcat(
d_model=d_model, n_feats=n_feats, n_out=n_out
)
def forward(self, x, feats): # (b, s, m), (b, f)
x = pos_encode(x)
x = self.blocks(x)
x = self.lin_tanh(x)
x = self.attn_agg(x)
return self.lin_feat_concat(x, feats)
class GloveEmbedding(nn.Module):
def __init__(self, num):
super().__init__()
# Make embedding
self.embed = nn.Embedding(400_000 + 1, num)
# found GloveEmbedding on kaggle and set here.
emb_w = (
pd.read_csv(
"../input/glove-embeddings/glove.6B.200d.txt",
header=None,
sep=" ",
quoting=csv.QUOTE_NONE,
)
.values[:, 1:]
.astype("float64")
)
emb_w = Tensor(emb_w)
emb_w = torch.cat([emb_w, torch.zeros(1, num)], dim=0)
self.embed.weight = nn.Parameter(emb_w)
def forward(self, x):
return self.embed(x.to(torch.long))
class ReadNet(nn.Module):
def __init__(self, embed, d_model, n_heads, n_blocks, n_feats_sent, n_feats_doc):
super().__init__()
self.embed = embed
self.sent_block = ReadNetBlock(
d_model=d_model,
n_heads=n_heads,
n_blocks=n_blocks,
n_feats=n_feats_sent,
n_out=d_model,
)
self.doc_block = ReadNetBlock(
d_model=d_model,
n_heads=n_heads,
n_blocks=n_blocks,
n_feats=n_feats_doc,
n_out=d_model + n_feats_doc,
)
self.head = nn.Sequential(
nn.Linear(d_model + n_feats_doc, 1),
)
def forward(
self, x, feats_sent=None, feats_doc=None
): # (b, d, s) tokens, (b, d, n_f_s), (b, n_f_d)
if feats_sent is None:
feats_sent = Tensor([])
if feats_doc is None:
feats_doc = Tensor([])
if x.is_cuda:
feats_sent = feats_sent.cuda()
feats_doc = feats_doc.cuda()
x = self.embed(x)
b, d, s, m = x.shape
x = x.reshape(b * d, s, m)
sents_enc = self.sent_block(x, feats_sent.reshape(b * d, -1)) # (b*d, m)
docs = sents_enc.reshape(b, d, m)
docs_enc = self.doc_block(docs, feats_doc)
out = self.head(docs_enc)
return out.squeeze(1)
## DATA PREPARATION ##
class GloveTokenizer:
def __init__(self, num):
# found GloveEmbedding on kaggle and set here.
words = pd.read_csv(
"../input/glove-embeddings/glove.6B.200d.txt",
header=None,
sep=" ",
quoting=csv.QUOTE_NONE,
usecols=[0],
).values
words = [word[0] for word in words]
self.word2idx = {w: i for i, w in enumerate(words)}
def __call__(self, sent):
toks = [self.word2idx.get(w.lower()) for w in word_tokenize(sent)]
return [self.unk_token if t is None else t for t in toks]
@property
def unk_token(self):
return (
400_000 # We appended this to the end of the embedding to return all zeros
)
@property
def pad_token(self):
return self.unk_token # Seems that this is the best option for GLOVE
def prepare_txts(txts, tokenizer):
# Input: (bs,) str, Output: (bs, max_doc_len, max_sent_len)
# We choose to elongate all docs and sentences to the max rather than truncate some of them
# TODO: Do this better later:
# (1) Truncate smartly (if there is one very long outlier sentence or doc)
# (2) Group together docs of similar lengths (in terms of num_sents)
docs = [[tokenizer(sent) for sent in sent_tokenize(txt)] for txt in txts]
# pkl_save(root_dir/"doc_lens", pd.Series([len(doc) for doc in docs]))
max_doc_len = max([len(doc) for doc in docs])
docs = [doc + [[]] * (max_doc_len - len(doc)) for doc in docs]
# pkl_save(root_dir/"sent_lens", pd.Series([len(sent) for doc in docs for sent in doc]))
max_sent_len = max([len(sent) for doc in docs for sent in doc])
docs = [
[s + [tokenizer.pad_token] * (max_sent_len - len(s)) for s in doc]
for doc in docs
]
return Tensor(docs)
def prepare_txts_cut(txts, tokenizer, max_doc_len=18, max_sent_len=49):
docs = [
[tokenizer(sent)[:max_sent_len] for sent in sent_tokenize(txt)[:max_doc_len]]
for txt in txts
]
docs = [doc + [[]] * (max_doc_len - len(doc)) for doc in docs]
docs = [
[s + [tokenizer.pad_token] * (max_sent_len - len(s)) for s in doc]
for doc in docs
]
return Tensor(docs)
# # Preprocess
# below code is from [How To: Preprocessing for GloVe Part2: Usage](https://www.kaggle.com/christofhenkel/how-to-preprocessing-for-glove-part2-usage)
symbols_to_isolate = '.,?!-;*"…:—()%#$&_/@\・ω+=”“[]^–>\\°<~•≠™ˈʊɒ∞§{}·τα❤☺ɡ|¢→̶`❥━┣┫┗O►★©―ɪ✔®\x96\x92●£♥➤´¹☕≈÷♡◐║▬′ɔː€۩۞†μ✒➥═☆ˌ◄½ʻπδηλσερνʃ✬SUPERIT☻±♍µº¾✓◾؟.⬅℅»Вав❣⋅¿¬♫CMβ█▓▒░⇒⭐›¡₂₃❧▰▔◞▀▂▃▄▅▆▇↙γ̄″☹➡«φ⅓„✋:¥̲̅́∙‛◇✏▷❓❗¶˚˙)сиʿ✨。ɑ\x80◕!%¯−flfi₁²ʌ¼⁴⁄₄⌠♭✘╪▶☭✭♪☔☠♂☃☎✈✌✰❆☙○‣⚓年∎ℒ▪▙☏⅛casǀ℮¸w‚∼‖ℳ❄←☼⋆ʒ⊂、⅔¨͡๏⚾⚽Φ×θ₩?(℃⏩☮⚠月✊❌⭕▸■⇌☐☑⚡☄ǫ╭∩╮,例>ʕɐ̣Δ₀✞┈╱╲▏▕┃╰▊▋╯┳┊≥☒↑☝ɹ✅☛♩☞AJB◔◡↓♀⬆̱ℏ\x91⠀ˤ╚↺⇤∏✾◦♬³の|/∵∴√Ω¤☜▲↳▫‿⬇✧ovm-208'‰≤∕ˆ⚜☁'
from nltk.tokenize.treebank import TreebankWordTokenizer
tb_tokenizer = TreebankWordTokenizer()
isolate_dict = {ord(c): f" {c} " for c in symbols_to_isolate}
remove_dict = {ord(c): f"" for c in symbols_to_delete}
def handle_punctuation(x):
x = x.translate(remove_dict)
x = x.translate(isolate_dict)
return x
def handle_contractions(x):
x = tb_tokenizer.tokenize(x)
return x
def fix_quote(x):
x = [x_[1:] if x_.startswith("'") else x_ for x_ in x]
x = " ".join(x)
return x
def preprocess(x):
x = handle_punctuation(x)
x = handle_contractions(x)
x = fix_quote(x)
return x
data = pd.read_csv("../input/commonlitreadabilityprize/train.csv")
cleaned_text = []
for text in data["excerpt"]:
cleaned_text.append(preprocess(text))
data["cleaned_text"] = cleaned_text
## TRAIN ## (using fastai)
tokenizer = GloveTokenizer(200)
embed = GloveEmbedding(200)
def get_splits(data):
num = len(data)
idx = list(range(num))
random.seed(42)
random.shuffle(idx)
split = int(num * 0.9)
return idx[:split], idx[split:]
def get_dls(bs):
txts = data.cleaned_text.tolist()
x = prepare_txts_cut(txts, tokenizer)
# changed form data.readability.tolist()
y = data.target.tolist()
ds = TfmdLists(
zip(x, y),
tfms=[],
splits=get_splits(data),
)
dls = ds.dataloaders(batch_size=bs)
return dls
def get_model():
# changed the dimention from d_model=200 because of glove.6B.100d.txt for GloveEmbedding.
readnet = ReadNet(
embed=embed,
d_model=200,
n_heads=4,
n_blocks=6,
n_feats_sent=0,
n_feats_doc=0,
)
readnet = readnet.cuda()
# Automatically freeze the embedding. We should not be learning this
for p in readnet.embed.parameters():
p.requires_grad = False
return readnet
metrics = [rmse]
learn = Learner(
dls=get_dls(32), model=get_model(), metrics=metrics, loss_func=MSELossFlat()
)
learn.lr_find()
# Result MSE is about 0.40
# Momentum=0.9 improved training. Weight decay didn't work well
cbs = [
SaveModelCallback(
monitor="_rmse", fname="model_0", comp=np.less, reset_on_fit=False
),
GradientAccumulation(32),
]
learn.fit_one_cycle(50, 3e-5, moms=(0.9, 0.9, 0.9), cbs=cbs)
# # Inference
test_df = pd.read_csv("../input/commonlitreadabilityprize/test.csv")
cleaned_text = []
for text in test_df["excerpt"]:
cleaned_text.append(preprocess(text))
test_df["cleaned_text"] = cleaned_text
test_txts = test_df.cleaned_text.tolist()
test_cut_txts = prepare_txts_cut(test_txts, tokenizer)
test_cut_txts_zip = zip(test_cut_txts, [0 for i in range(len(test_cut_txts))])
test_dl = learn.dls.test_dl(test_cut_txts_zip, 128)
preds, _ = learn.get_preds(dl=test_dl)
pred_df = pd.read_csv("../input/commonlitreadabilityprize/sample_submission.csv")
pred_df.target = preds
pred_df.to_csv("submission.csv", index=False)
| false | 3 | 4,665 | 0 | 4,686 | 4,665 |
||
69270878
|
<jupyter_start><jupyter_text>NBA 2k20 player dataset
### Context
NBA 2k20 analysis.
### Content
Detailed attributes for players registered in the NBA2k20.
Kaggle dataset identifier: nba2k20-player-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Importing Libraries
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from sklearn.base import BaseEstimator, TransformerMixin
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
# ## Data Collection
data_init = pd.read_csv("/kaggle/input/nba2k20-player-dataset/nba2k20-full.csv")
data_init.head()
# ## Data Exploration
# Checking the size of the dataset
data_init.shape
# Data Types
data_init.dtypes
# From the look of it, we can make out below observations about the data:
# * There are fields in the dataset that are not relevant for predicting the salary of a player, like - Full Name, Jersey.
# * College - This field contains missing values that may not necessairily mean that the player is an undergrad. It may also mean a true missing information. However, we can use this column for hypothesis testing to see if the salary depends on the college
# * From the Date of birth, we can figure out the age of a player and use that for hypotheis testing. But the date format is MM/DD/YY format
# * The height and weights are present in both metric systems. We will have to use any one metric system and convert this into numerical colums intead of a string
# * Salary needs to be converted to Numeric
# * Draft round and Draft Pick are also strings. We must change them to Integer
# We can skip the EDA for now and we will first do the data cleanup and transformation
# Let's get our hands dirty
#
data = data_init.copy()
data.head()
data["draft_round"].value_counts() # We have to replace the "Undrafted" to 0
plt.figure(figsize=(20, 8))
sns.countplot(data=data.sort_values(by="draft_peak", ascending=True), x="draft_peak")
plt.xticks(rotation=90)
# We have to replace the "Undrafted" to 0
plt.show()
data["college"].value_counts()
data["country"].value_counts()
# ## Data Cleaning and Transformation
data["draft_round"].value_counts()
def weight_to_kg(col):
start = col.find("/") + 1
end = col.find("kg")
return float(col[start:end])
def height_to_mts(col):
start = col.find("/") + 1
end = len(col)
return float(col[start:end])
cols_to_drop = [
"full_name",
"jersey",
"height",
"weight",
"draft_peak",
"salary",
"b_day",
]
class Attribs_transformer(BaseEstimator, TransformerMixin):
def __init__(self, add_bmi=True):
self.add_bmi = add_bmi
def fit(self):
return self
def transform(self, X):
X["weight_in_kg"] = X["weight"].transform(lambda x: weight_to_kg(x))
X["height_in_mts"] = X["height"].transform(lambda x: height_to_mts(x))
X["salary_amount"] = X["salary"].transform(lambda x: float(x[1:]))
X["draft_round"] = (
X["draft_round"]
.transform([lambda x: 0 if x == "Undrafted" else x])
.astype(int)
)
X["draft_pick"] = (
X["draft_peak"]
.transform([lambda x: 0 if x == "Undrafted" else x])
.astype(int)
)
X["age"] = X["b_day"].transform(
lambda x: relativedelta(datetime.today(), parse(x)).years
)
X["years_from_draft_year"] = X["draft_year"].transform(
lambda x: datetime.today().year - x
)
if self.add_bmi:
X["bmi"] = X["weight_in_kg"] / X["height_in_mts"] ** 2
X.drop(cols_to_drop, axis=1, inplace=True)
return X
columns_transformer = Attribs_transformer()
data_transformed = columns_transformer.transform(data)
data_transformed.head()
# Checking for Missing Values
data_transformed.isna().sum().plot(kind="barh")
plt.show()
# The fields "college" and "team" has missing values. We willl later impute this using KNNImputer
# ## Hypothesis:
# * Do younger players earn more?
# * Which college has a good earning probability?
# * Players of which country earn the most?
# * Does rating decide the Salary?
# * Do tall players earn more salary?
# * What is the height to weight ratio of players is a good indicator of salary?
# In all the above hypothesis, we will check for the median salary, to avoid effects of outliers
df = data_transformed.copy()
# ### Do younger players earn more?
df_sal_by_age = (
df["salary_amount"].groupby(pd.cut(df["age"], bins=7)).median().reset_index()
)
plt.figure(figsize=(10, 6))
sns.barplot(data=df_sal_by_age, x="age", y="salary_amount", palette="plasma_r")
plt.xticks(rotation=90)
plt.xlabel("Age bracket", fontsize=13)
plt.ylabel("Salary", fontsize=13)
plt.title("Age vs Salary", fontsize=14)
plt.plot()
# Players between 35 to 38 years have a higher probability of earning more than the others, probably beacause of the experience of the game.
# ### Which college has a good earning probability?
df_sal_by_college = (
df[["salary_amount", "college"]]
.groupby("college")
.agg({"salary_amount": ["median", "size"]})
.reset_index()
)
df_sal_by_college.columns = ["college", "mean_salary", "number_players"]
plt.figure(figsize=(30, 20))
ax1 = plt.subplot(2, 1, 1)
sns.barplot(
data=df_sal_by_college.sort_values(by="mean_salary", ascending=False),
x="college",
y="mean_salary",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("College", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary and Number of players of different colleges", fontsize=16)
ax1.spines[["top", "right"]].set_visible(False)
plt.plot()
ax2 = ax1.twinx()
sns.lineplot(
data=df_sal_by_college.sort_values(by="mean_salary", ascending=False),
x="college",
y="number_players",
palette="plasma_r",
color="black",
marker="o",
)
plt.xticks(rotation=90)
plt.xlabel("College", fontsize=14)
plt.ylabel("Number of Players", fontsize=14)
ax2.spines["top"].set_visible(False)
plt.plot()
# Even though Kentucky and Duke has almost 25 players each, the top earner is from Davidson college and with the 2nd highest salary from Arizona State.
# There is only one player from each college in top 8 salaries.
# The college does not contribute to salary, even though there are so many players.
# ## Which team offers better salaries?
df[["salary_amount", "team", "country"]].groupby(["team", "country"]).agg(
{"salary_amount": ["median", "size"]}
).reset_index()
df_sal_by_team = (
df[["salary_amount", "team"]]
.groupby("team")
.agg({"salary_amount": ["median", "size"]})
.reset_index()
)
df_sal_by_team.columns = ["team", "median_salary", "number_players"]
plt.figure(figsize=(30, 20))
ax1 = plt.subplot(2, 1, 1)
sns.barplot(
data=df_sal_by_team.sort_values(by="median_salary", ascending=False),
x="team",
y="median_salary",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("Team", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary and Number of players of different teams", fontsize=16)
ax1.spines[["top", "right"]].set_visible(False)
plt.plot()
ax2 = ax1.twinx()
sns.lineplot(
data=df_sal_by_team.sort_values(by="median_salary", ascending=False),
x="team",
y="number_players",
palette="plasma_r",
color="black",
marker="o",
)
plt.xticks(rotation=90)
plt.xlabel("Team", fontsize=14)
plt.ylabel("Number of Players", fontsize=14)
ax2.spines["top"].set_visible(False)
plt.plot()
# The Miami Heat team offers maximum salary to its players.
# ### Players of which country earn the most?
df_sal_by_country = (
df[["salary_amount", "country"]].groupby("country").median().reset_index()
)
plt.figure(figsize=(20, 8))
# ax1 = plt.subplot(2,1,1)
sns.barplot(
data=df_sal_by_country.sort_values(by="salary_amount", ascending=False),
x="country",
y="salary_amount",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("Country", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary by different Countries", fontsize=16)
plt.show()
# Dominican Republic, Montenegro and NewZealand players earn more salary than the others.
# ### Does rating decide the Salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="rating", y="salary_amount")
plt.title("Regression Plot between Rating and Salary", fontsize=14)
plt.show()
# It is very evident from the regression plot that the Salary does depend on the Ratings. Higher the ratings, higer will be the salary
# ### Do tall players earn more salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="height_in_mts", y="salary_amount")
plt.title("Regression Plot between height and Salary", fontsize=14)
plt.show()
# Since the slope of the Regression line looks almost straight line parallel to X-axis, the Salary does not depend on the Height of the player.(Must be the Skill then. I wish if there was a metric present in the dataset for the skill)
# ### Does the BMI of a player explain the salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="bmi", y="salary_amount")
plt.title("Regression Plot between bmi and Salary", fontsize=14)
plt.show()
# Doesn't look like so. The slope of the regression line is parallel to BMI axis
# ### Checking the variation of the Salary with the numerical columns using a pair plot
sns.pairplot(df)
plt.show()
# Checking the Correlation coefficient
plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True, cmap="plasma_r")
plt.show()
# The salary is highly corelated with the ratings and age and highly negatively correlated with the Draft year. Also, the years from Draft year is correlated with the Age.
# ## Model Creation
# ### Before entering the model creation phase, let us create a text dataset from the original dataset(the first DF when we collected the data). We create a test dataset and forget it for now untill we have developed a model to test on the test dataset. The data preparation pipeline will be fitted on the test dataset befroe the prediction
data_init.head()
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(data_init, test_size=0.2, random_state=1)
df_train.shape, df_test.shape
df_train.isna().sum()[df_train.isna().sum() > 0]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/270/69270878.ipynb
|
nba2k20-player-dataset
|
isaienkov
|
[{"Id": 69270878, "ScriptId": 18885510, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2763517, "CreationDate": "07/28/2021 23:28:19", "VersionNumber": 3.0, "Title": "NBA 2K20 Salary Prediction", "EvaluationDate": NaN, "IsChange": true, "TotalLines": 260.0, "LinesInsertedFromPrevious": 33.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 227.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 92236056, "KernelVersionId": 69270878, "SourceDatasetVersionId": 1465873}]
|
[{"Id": 1465873, "DatasetId": 843921, "DatasourceVersionId": 1499603, "CreatorUserId": 732424, "LicenseName": "CC0: Public Domain", "CreationDate": "09/05/2020 19:46:29", "VersionNumber": 5.0, "Title": "NBA 2k20 player dataset", "Slug": "nba2k20-player-dataset", "Subtitle": "NBA 2k players with 15 attributes", "Description": "### Context\n\nNBA 2k20 analysis.\n\n\n### Content\n\nDetailed attributes for players registered in the NBA2k20.\n\n\n### Acknowledgements\n\nData scraped from https://hoopshype.com/nba2k/. Additional data about countries and drafts scraped from Wikipedia.\n\n\n### Inspiration\n\nInspired from this dataset: https://www.kaggle.com/karangadiya/fifa19", "VersionNotes": "Update positions for free agents", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 843921, "CreatorUserId": 732424, "OwnerUserId": 732424.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4769431.0, "CurrentDatasourceVersionId": 4832749.0, "ForumId": 859138, "Type": 2, "CreationDate": "08/24/2020 21:07:57", "LastActivityDate": "08/24/2020", "TotalViews": 34730, "TotalDownloads": 4026, "TotalVotes": 90, "TotalKernels": 49}]
|
[{"Id": 732424, "UserName": "isaienkov", "DisplayName": "Kostiantyn Isaienkov", "RegisterDate": "09/28/2016", "PerformanceTier": 4}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ## Importing Libraries
from datetime import datetime
from dateutil.parser import parse
from dateutil.relativedelta import relativedelta
from sklearn.base import BaseEstimator, TransformerMixin
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import mean_squared_error
# ## Data Collection
data_init = pd.read_csv("/kaggle/input/nba2k20-player-dataset/nba2k20-full.csv")
data_init.head()
# ## Data Exploration
# Checking the size of the dataset
data_init.shape
# Data Types
data_init.dtypes
# From the look of it, we can make out below observations about the data:
# * There are fields in the dataset that are not relevant for predicting the salary of a player, like - Full Name, Jersey.
# * College - This field contains missing values that may not necessairily mean that the player is an undergrad. It may also mean a true missing information. However, we can use this column for hypothesis testing to see if the salary depends on the college
# * From the Date of birth, we can figure out the age of a player and use that for hypotheis testing. But the date format is MM/DD/YY format
# * The height and weights are present in both metric systems. We will have to use any one metric system and convert this into numerical colums intead of a string
# * Salary needs to be converted to Numeric
# * Draft round and Draft Pick are also strings. We must change them to Integer
# We can skip the EDA for now and we will first do the data cleanup and transformation
# Let's get our hands dirty
#
data = data_init.copy()
data.head()
data["draft_round"].value_counts() # We have to replace the "Undrafted" to 0
plt.figure(figsize=(20, 8))
sns.countplot(data=data.sort_values(by="draft_peak", ascending=True), x="draft_peak")
plt.xticks(rotation=90)
# We have to replace the "Undrafted" to 0
plt.show()
data["college"].value_counts()
data["country"].value_counts()
# ## Data Cleaning and Transformation
data["draft_round"].value_counts()
def weight_to_kg(col):
start = col.find("/") + 1
end = col.find("kg")
return float(col[start:end])
def height_to_mts(col):
start = col.find("/") + 1
end = len(col)
return float(col[start:end])
cols_to_drop = [
"full_name",
"jersey",
"height",
"weight",
"draft_peak",
"salary",
"b_day",
]
class Attribs_transformer(BaseEstimator, TransformerMixin):
def __init__(self, add_bmi=True):
self.add_bmi = add_bmi
def fit(self):
return self
def transform(self, X):
X["weight_in_kg"] = X["weight"].transform(lambda x: weight_to_kg(x))
X["height_in_mts"] = X["height"].transform(lambda x: height_to_mts(x))
X["salary_amount"] = X["salary"].transform(lambda x: float(x[1:]))
X["draft_round"] = (
X["draft_round"]
.transform([lambda x: 0 if x == "Undrafted" else x])
.astype(int)
)
X["draft_pick"] = (
X["draft_peak"]
.transform([lambda x: 0 if x == "Undrafted" else x])
.astype(int)
)
X["age"] = X["b_day"].transform(
lambda x: relativedelta(datetime.today(), parse(x)).years
)
X["years_from_draft_year"] = X["draft_year"].transform(
lambda x: datetime.today().year - x
)
if self.add_bmi:
X["bmi"] = X["weight_in_kg"] / X["height_in_mts"] ** 2
X.drop(cols_to_drop, axis=1, inplace=True)
return X
columns_transformer = Attribs_transformer()
data_transformed = columns_transformer.transform(data)
data_transformed.head()
# Checking for Missing Values
data_transformed.isna().sum().plot(kind="barh")
plt.show()
# The fields "college" and "team" has missing values. We willl later impute this using KNNImputer
# ## Hypothesis:
# * Do younger players earn more?
# * Which college has a good earning probability?
# * Players of which country earn the most?
# * Does rating decide the Salary?
# * Do tall players earn more salary?
# * What is the height to weight ratio of players is a good indicator of salary?
# In all the above hypothesis, we will check for the median salary, to avoid effects of outliers
df = data_transformed.copy()
# ### Do younger players earn more?
df_sal_by_age = (
df["salary_amount"].groupby(pd.cut(df["age"], bins=7)).median().reset_index()
)
plt.figure(figsize=(10, 6))
sns.barplot(data=df_sal_by_age, x="age", y="salary_amount", palette="plasma_r")
plt.xticks(rotation=90)
plt.xlabel("Age bracket", fontsize=13)
plt.ylabel("Salary", fontsize=13)
plt.title("Age vs Salary", fontsize=14)
plt.plot()
# Players between 35 to 38 years have a higher probability of earning more than the others, probably beacause of the experience of the game.
# ### Which college has a good earning probability?
df_sal_by_college = (
df[["salary_amount", "college"]]
.groupby("college")
.agg({"salary_amount": ["median", "size"]})
.reset_index()
)
df_sal_by_college.columns = ["college", "mean_salary", "number_players"]
plt.figure(figsize=(30, 20))
ax1 = plt.subplot(2, 1, 1)
sns.barplot(
data=df_sal_by_college.sort_values(by="mean_salary", ascending=False),
x="college",
y="mean_salary",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("College", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary and Number of players of different colleges", fontsize=16)
ax1.spines[["top", "right"]].set_visible(False)
plt.plot()
ax2 = ax1.twinx()
sns.lineplot(
data=df_sal_by_college.sort_values(by="mean_salary", ascending=False),
x="college",
y="number_players",
palette="plasma_r",
color="black",
marker="o",
)
plt.xticks(rotation=90)
plt.xlabel("College", fontsize=14)
plt.ylabel("Number of Players", fontsize=14)
ax2.spines["top"].set_visible(False)
plt.plot()
# Even though Kentucky and Duke has almost 25 players each, the top earner is from Davidson college and with the 2nd highest salary from Arizona State.
# There is only one player from each college in top 8 salaries.
# The college does not contribute to salary, even though there are so many players.
# ## Which team offers better salaries?
df[["salary_amount", "team", "country"]].groupby(["team", "country"]).agg(
{"salary_amount": ["median", "size"]}
).reset_index()
df_sal_by_team = (
df[["salary_amount", "team"]]
.groupby("team")
.agg({"salary_amount": ["median", "size"]})
.reset_index()
)
df_sal_by_team.columns = ["team", "median_salary", "number_players"]
plt.figure(figsize=(30, 20))
ax1 = plt.subplot(2, 1, 1)
sns.barplot(
data=df_sal_by_team.sort_values(by="median_salary", ascending=False),
x="team",
y="median_salary",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("Team", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary and Number of players of different teams", fontsize=16)
ax1.spines[["top", "right"]].set_visible(False)
plt.plot()
ax2 = ax1.twinx()
sns.lineplot(
data=df_sal_by_team.sort_values(by="median_salary", ascending=False),
x="team",
y="number_players",
palette="plasma_r",
color="black",
marker="o",
)
plt.xticks(rotation=90)
plt.xlabel("Team", fontsize=14)
plt.ylabel("Number of Players", fontsize=14)
ax2.spines["top"].set_visible(False)
plt.plot()
# The Miami Heat team offers maximum salary to its players.
# ### Players of which country earn the most?
df_sal_by_country = (
df[["salary_amount", "country"]].groupby("country").median().reset_index()
)
plt.figure(figsize=(20, 8))
# ax1 = plt.subplot(2,1,1)
sns.barplot(
data=df_sal_by_country.sort_values(by="salary_amount", ascending=False),
x="country",
y="salary_amount",
palette="plasma_r",
)
plt.xticks(rotation=90)
plt.xlabel("Country", fontsize=14)
plt.ylabel("Salary", fontsize=14)
plt.title("Salary by different Countries", fontsize=16)
plt.show()
# Dominican Republic, Montenegro and NewZealand players earn more salary than the others.
# ### Does rating decide the Salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="rating", y="salary_amount")
plt.title("Regression Plot between Rating and Salary", fontsize=14)
plt.show()
# It is very evident from the regression plot that the Salary does depend on the Ratings. Higher the ratings, higer will be the salary
# ### Do tall players earn more salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="height_in_mts", y="salary_amount")
plt.title("Regression Plot between height and Salary", fontsize=14)
plt.show()
# Since the slope of the Regression line looks almost straight line parallel to X-axis, the Salary does not depend on the Height of the player.(Must be the Skill then. I wish if there was a metric present in the dataset for the skill)
# ### Does the BMI of a player explain the salary?
plt.figure(figsize=(8, 6))
sns.regplot(data=df, x="bmi", y="salary_amount")
plt.title("Regression Plot between bmi and Salary", fontsize=14)
plt.show()
# Doesn't look like so. The slope of the regression line is parallel to BMI axis
# ### Checking the variation of the Salary with the numerical columns using a pair plot
sns.pairplot(df)
plt.show()
# Checking the Correlation coefficient
plt.figure(figsize=(10, 8))
sns.heatmap(df.corr(), annot=True, cmap="plasma_r")
plt.show()
# The salary is highly corelated with the ratings and age and highly negatively correlated with the Draft year. Also, the years from Draft year is correlated with the Age.
# ## Model Creation
# ### Before entering the model creation phase, let us create a text dataset from the original dataset(the first DF when we collected the data). We create a test dataset and forget it for now untill we have developed a model to test on the test dataset. The data preparation pipeline will be fitted on the test dataset befroe the prediction
data_init.head()
from sklearn.model_selection import train_test_split
df_train, df_test = train_test_split(data_init, test_size=0.2, random_state=1)
df_train.shape, df_test.shape
df_train.isna().sum()[df_train.isna().sum() > 0]
| false | 1 | 3,298 | 0 | 3,365 | 3,298 |
||
69637760
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import svm
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train.info()
train.head()
train.describe()
train.isnull().sum()
test.describe()
test.isnull().sum()
def fillNull(df):
df["Fare"].fillna(df["Fare"].median(), inplace=True)
df["Age"].fillna(df["Age"].median(), inplace=True)
df["Embarked"].fillna(("S"), inplace=True)
df["Embarked"] = df["Embarked"].map({"S": 0, "C": 2, "Q": 1})
return df
def encode(df):
df["Sex"] = df["Sex"].replace(["female", "male"], [1, 0])
return df
train = encode(train)
test = encode(test)
train = fillNull(train)
test = fillNull(test)
# dropping unnecessary columns
train.drop(["Name", "Cabin", "Ticket"], axis=1, inplace=True)
test.drop(["Name", "Cabin", "Ticket"], axis=1, inplace=True)
train.corr()
train.head()
test.head()
sns.boxplot(x=train["Fare"])
sns.boxplot(x=train["Age"])
sns.boxplot(x=train["SibSp"])
sns.boxplot(x=train["Pclass"])
sns.barplot(x=train["Embarked"], y=train["Survived"])
Q1 = train.quantile(0.25)
Q3 = train.quantile(0.75)
IQR = Q3 - Q1
print(Q1, Q3)
# getting outliers
l = train < (Q1 - 1.5 * IQR) | (train > (Q3 + 1.5 * IQR))
outliers = pd.DataFrame(columns=["Pclass", "Age", "SibSp", "Parch", "Fare", "Embarked"])
ind = 0
for i in range(len(l)):
if (train.loc[i] < (Q1 - 1.5 * IQR)).sum() > 0 or (
train.loc[i] > (Q3 + 1.5 * IQR)
).sum() > 0:
outliers.loc[ind] = train.loc[i]
ind = ind + 1
display(outliers)
# Imputing outliers in 'Fare' by the median
left = Q1 - 1.5 * IQR
right = Q3 + 1.5 * IQR
for i in range(len(train)):
if (train["Fare"].loc[i] < left["Fare"]) or (train["Fare"].loc[i] > right["Fare"]):
train["Fare"].loc[i] = train["Fare"].median()
Y = train["Survived"]
train.drop("Survived", axis=1, inplace=True)
train["Family"] = train["SibSp"] + train["Parch"]
delete_columns = ["SibSp", "Parch"]
train.drop(delete_columns, axis=1, inplace=True)
test["Family"] = test["SibSp"] + test["Parch"]
delete_columns = ["SibSp", "Parch"]
test.drop(delete_columns, axis=1, inplace=True)
# Normalization
train = (train - train.min()) / (train.max() - train.min())
test = (test - test.min()) / (test.max() - test.min())
clf = svm.SVC()
clf.fit(train, Y)
sub = clf.predict(test)
dp = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
dp["Survived"] = sub
# dp.to_csv("/kaggle/input/titanic/gender_submission.csv", index=False)
sub
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637760.ipynb
| null | null |
[{"Id": 69637760, "ScriptId": 19019005, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6485971, "CreationDate": "08/02/2021 12:45:53", "VersionNumber": 1.0, "Title": "TITANC using SVC", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 121.0, "LinesInsertedFromPrevious": 121.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn import svm
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
test = pd.read_csv("/kaggle/input/titanic/test.csv")
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train.info()
train.head()
train.describe()
train.isnull().sum()
test.describe()
test.isnull().sum()
def fillNull(df):
df["Fare"].fillna(df["Fare"].median(), inplace=True)
df["Age"].fillna(df["Age"].median(), inplace=True)
df["Embarked"].fillna(("S"), inplace=True)
df["Embarked"] = df["Embarked"].map({"S": 0, "C": 2, "Q": 1})
return df
def encode(df):
df["Sex"] = df["Sex"].replace(["female", "male"], [1, 0])
return df
train = encode(train)
test = encode(test)
train = fillNull(train)
test = fillNull(test)
# dropping unnecessary columns
train.drop(["Name", "Cabin", "Ticket"], axis=1, inplace=True)
test.drop(["Name", "Cabin", "Ticket"], axis=1, inplace=True)
train.corr()
train.head()
test.head()
sns.boxplot(x=train["Fare"])
sns.boxplot(x=train["Age"])
sns.boxplot(x=train["SibSp"])
sns.boxplot(x=train["Pclass"])
sns.barplot(x=train["Embarked"], y=train["Survived"])
Q1 = train.quantile(0.25)
Q3 = train.quantile(0.75)
IQR = Q3 - Q1
print(Q1, Q3)
# getting outliers
l = train < (Q1 - 1.5 * IQR) | (train > (Q3 + 1.5 * IQR))
outliers = pd.DataFrame(columns=["Pclass", "Age", "SibSp", "Parch", "Fare", "Embarked"])
ind = 0
for i in range(len(l)):
if (train.loc[i] < (Q1 - 1.5 * IQR)).sum() > 0 or (
train.loc[i] > (Q3 + 1.5 * IQR)
).sum() > 0:
outliers.loc[ind] = train.loc[i]
ind = ind + 1
display(outliers)
# Imputing outliers in 'Fare' by the median
left = Q1 - 1.5 * IQR
right = Q3 + 1.5 * IQR
for i in range(len(train)):
if (train["Fare"].loc[i] < left["Fare"]) or (train["Fare"].loc[i] > right["Fare"]):
train["Fare"].loc[i] = train["Fare"].median()
Y = train["Survived"]
train.drop("Survived", axis=1, inplace=True)
train["Family"] = train["SibSp"] + train["Parch"]
delete_columns = ["SibSp", "Parch"]
train.drop(delete_columns, axis=1, inplace=True)
test["Family"] = test["SibSp"] + test["Parch"]
delete_columns = ["SibSp", "Parch"]
test.drop(delete_columns, axis=1, inplace=True)
# Normalization
train = (train - train.min()) / (train.max() - train.min())
test = (test - test.min()) / (test.max() - test.min())
clf = svm.SVC()
clf.fit(train, Y)
sub = clf.predict(test)
dp = pd.read_csv("/kaggle/input/titanic/gender_submission.csv")
dp["Survived"] = sub
# dp.to_csv("/kaggle/input/titanic/gender_submission.csv", index=False)
sub
| false | 0 | 1,149 | 0 | 1,149 | 1,149 |
||
69637948
|
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv(r"../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv(r"../input/tabular-playground-series-aug-2021/test.csv")
submission = pd.read_csv(
r"../input/tabular-playground-series-aug-2021/sample_submission.csv"
)
train.shape, test.shape, submission.shape
train.head()
train.drop("id", axis=1, inplace=True)
test.drop("id", axis=1, inplace=True)
train.nunique()
# #### **VIF (Variable Inflation Factors)**
# for multicollinearity detection
from statsmodels.stats.outliers_influence import variance_inflation_factor
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return vif
# * VIF = 1, no correlation between the independent variable and the other variables
# * VIF exceeding 5 or 10 indicates high multicollinearity between this independent variable and the others
X = train.drop("loss", axis=1)
df_vif = calc_vif(X)
df_vif[round(df_vif.VIF) == 1].sort_values(by="VIF")
df_vif[df_vif.VIF > 1.01].sort_values(by="VIF", ascending=False)
# ### **PyCaret**
from pycaret.regression import (
setup,
compare_models,
blend_models,
finalize_model,
predict_model,
)
def pycaret_model(train, target, test, n_select, fold, opt):
print("Setup Your Data....")
setup(
data=train,
target=target,
silent=True,
use_gpu=True,
feature_selection=True,
profile=True,
)
print("Comparing Models....")
best = compare_models(
sort=opt, n_select=n_select, fold=fold, exclude=["et", "lr", "dt", "ada"]
)
print("Blending Models....")
blended = blend_models(estimator_list=best, fold=fold, optimize=opt)
pred = predict_model(blended)
# print('Finallizing Models....')
# final_model = finalize_model(blended)
# print('Done...!!!')
pred_test = predict_model(blended, test)
re = pred_test["Label"]
return re
submission["loss"] = pycaret_model(train, "loss", test, 3, 3, "RMSE")
submission.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637948.ipynb
| null | null |
[{"Id": 69637948, "ScriptId": 18987663, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4811132, "CreationDate": "08/02/2021 12:47:47", "VersionNumber": 3.0, "Title": "[TPS AUG 21] PyCaret", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 84.0, "LinesInsertedFromPrevious": 12.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 72.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv(r"../input/tabular-playground-series-aug-2021/train.csv")
test = pd.read_csv(r"../input/tabular-playground-series-aug-2021/test.csv")
submission = pd.read_csv(
r"../input/tabular-playground-series-aug-2021/sample_submission.csv"
)
train.shape, test.shape, submission.shape
train.head()
train.drop("id", axis=1, inplace=True)
test.drop("id", axis=1, inplace=True)
train.nunique()
# #### **VIF (Variable Inflation Factors)**
# for multicollinearity detection
from statsmodels.stats.outliers_influence import variance_inflation_factor
def calc_vif(X):
# Calculating VIF
vif = pd.DataFrame()
vif["variables"] = X.columns
vif["VIF"] = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
return vif
# * VIF = 1, no correlation between the independent variable and the other variables
# * VIF exceeding 5 or 10 indicates high multicollinearity between this independent variable and the others
X = train.drop("loss", axis=1)
df_vif = calc_vif(X)
df_vif[round(df_vif.VIF) == 1].sort_values(by="VIF")
df_vif[df_vif.VIF > 1.01].sort_values(by="VIF", ascending=False)
# ### **PyCaret**
from pycaret.regression import (
setup,
compare_models,
blend_models,
finalize_model,
predict_model,
)
def pycaret_model(train, target, test, n_select, fold, opt):
print("Setup Your Data....")
setup(
data=train,
target=target,
silent=True,
use_gpu=True,
feature_selection=True,
profile=True,
)
print("Comparing Models....")
best = compare_models(
sort=opt, n_select=n_select, fold=fold, exclude=["et", "lr", "dt", "ada"]
)
print("Blending Models....")
blended = blend_models(estimator_list=best, fold=fold, optimize=opt)
pred = predict_model(blended)
# print('Finallizing Models....')
# final_model = finalize_model(blended)
# print('Done...!!!')
pred_test = predict_model(blended, test)
re = pred_test["Label"]
return re
submission["loss"] = pycaret_model(train, "loss", test, 3, 3, "RMSE")
submission.to_csv("submission.csv", index=False)
| false | 0 | 721 | 0 | 721 | 721 |
||
69637070
|
<jupyter_start><jupyter_text>[DEPRECATED] keras-applications
DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications
Kaggle dataset identifier: kerasapplications
<jupyter_script># [Inference] Swin Transformer Object Detection
# Highlights
# 👋 Big thanks to:
# > [Sreevishnu Damodaran for his great notebook](https://www.kaggle.com/sreevishnudamodaran/siim-effnetv2-l-cascadercnn-mmdetection-infer)
# References:
# - https://www.kaggle.com/h053473666/siim-cov19-efnb7-yolov5-infer
# - https://github.com/tensorflow/hub
# - https://github.com/open-mmlab/mmdetection
# - https://github.com/SwinTransformer/Swin-Transformer-Object-Detection
## Compatible Cuda Toolkit installation
## MMDetection Offline Installation
# !cp -r /kaggle/input/mmdetectionv2140/mmdetection-2.14.0 /kaggle/working/
# !mv /kaggle/working/mmdetection-2.14.0 /kaggle/working/mmdetection
# %cd /kaggle/working/mmdetection
# !pip install -e . --no-deps
import shutil
shutil.copytree(
"/kaggle/input/swin-detection-repo/Swin-Transformer-Object-Detection-master",
"/kaggle/working/Swin-Transformer-Object-Detection-master",
)
# !pip install '/kaggle/input/mmdetectionv2140/mmpycocotools-12.0.3/mmpycocotools-12.0.3' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/terminaltables-3.1.0-py3-none-any.whl' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/addict-2.4.0-py3-none-any.whl' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/yapf-0.31.0-py2.py3-none-any.whl' --no-deps
shutil.copytree("/kaggle/input/apex-repo/apex-master", "/kaggle/working/apex-master")
# from mmdet.apis import init_detector, inference_detector
# import sys
# sys.path.append('/kaggle/working/mmdetection')
import sys
sys.path.append("/kaggle/working/Swin-Transformer-Object-Detection-master")
import os
from PIL import Image
import pandas as pd
from tqdm.auto import tqdm
import gc
import glob
import numpy as np
# Create Study and Image Level Dataframes
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
# Form study and image dataframes
sub_df["level"] = sub_df.id.map(lambda idx: idx[-5:])
study_df = sub_df[sub_df.level == "study"].rename({"id": "study_id"}, axis=1)
image_df = sub_df[sub_df.level == "image"].rename({"id": "image_id"}, axis=1)
dcm_path = glob.glob(
"/kaggle/input/siim-covid19-detection/test/**/*dcm", recursive=True
)
test_meta = pd.DataFrame({"dcm_path": dcm_path})
test_meta["image_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-1].replace(".dcm", "") + "_image"
)
test_meta["study_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-3].replace(".dcm", "") + "_study"
)
study_df = study_df.merge(test_meta, on="study_id", how="left")
image_df = image_df.merge(test_meta, on="image_id", how="left")
# Remove duplicates study_ids from study_df
study_df.drop_duplicates(subset="study_id", keep="first", inplace=True)
# Fast or Full Predictions
# In case of non-competetion submission commits, we run the notebook with just two images each for image level and study level inference from the public test data.
fast_sub = False
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
STUDY_DIMS = (768, 768)
IMAGE_DIMS = (512, 512)
study_dir = f"/kaggle/tmp/test/study/"
os.makedirs(study_dir, exist_ok=True)
image_dir = f"/kaggle/tmp/test/image/"
os.makedirs(image_dir, exist_ok=True)
def read_xray(path, voi_lut=True, fix_monochrome=True):
# Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to transform raw DICOM data to
# "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# depending on this value, X-ray may look inverted - fix that:
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
data = data - np.min(data)
data = data / np.max(data)
data = (data * 255).astype(np.uint8)
return data
def resize(array, size, keep_ratio=False, resample=Image.LANCZOS):
# Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image
im = Image.fromarray(array)
if keep_ratio:
im.thumbnail((size, size), resample)
else:
im = im.resize((size, size), resample)
return im
for index, row in tqdm(
study_df[["study_id", "dcm_path"]].iterrows(), total=study_df.shape[0]
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=STUDY_DIMS[0])
im.save(os.path.join(study_dir, row["study_id"] + ".png"))
image_df["dim0"] = -1
image_df["dim1"] = -1
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=IMAGE_DIMS[0])
im.save(os.path.join(image_dir, row["image_id"] + ".png"))
image_df.loc[image_df.image_id == row.image_id, "dim0"] = xray.shape[0]
image_df.loc[image_df.image_id == row.image_id, "dim1"] = xray.shape[1]
# nonresized images
image_dir_orig = f"/kaggle/tmp/test/image_orig/"
os.makedirs(image_dir_orig, exist_ok=True)
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = Image.fromarray(xray)
im.save(os.path.join(image_dir_orig, row["image_id"] + ".png"))
study_df["image_path"] = study_dir + study_df["study_id"] + ".png"
image_df["image_path"] = image_dir + image_df["image_id"] + ".png"
# Custom Wrapper for Loading TFHub Model trained in TPU
# Since the EffNetV2 Classifier models were trained on a TPU with the `tfhub.KerasLayer` formed with the handle argument as a GCS path, while loading the saved model for inference, the method tries to download the pre-trained weights from the definition of the layer from training i.e a GCS path.
# Since, inference notebooks don't have GCS and internet access, it is not possible to load the model without the pretrained weights explicitly loaded from the local directory.
# If the models were trained on a GPU, we can use the cache location method to load the pre-trained weights by storing them in a cache folder with the hashed key of the model location, as the folder name. I tried this method here but, it doesn't seem to work as the model was trained with a GCS path defined in the `tfhub.KerasLayer` and the method kept on hitting the GCS path rather than loading the weights from the cache location.
# The only solution was to create a wrapper class to correct the handle argument to load the right pretrained weights explicitly from the local directory.
import tensorflow as tf
import tensorflow_hub as tfhub
MODEL_ARCH = "efficientnetv2-l-21k-ft1k"
# Get the TensorFlow Hub model URL
hub_type = "feature_vector" # ['classification', 'feature_vector']
MODEL_ARCH_PATH = f"/kaggle/input/efficientnetv2-tfhub-weight-files/tfhub_models/{MODEL_ARCH}/{hub_type}"
# Custom wrapper class to load the right pretrained weights explicitly from the local directory
class KerasLayerWrapper(tfhub.KerasLayer):
def __init__(self, handle, **kwargs):
handle = tfhub.KerasLayer(tfhub.load(MODEL_ARCH_PATH))
super().__init__(handle, **kwargs)
# Predict Study Level
MODEL_PATH = "/kaggle/input/siim-effnetv2-keras-study-train-tpu-cv0-805"
test_paths = study_df.image_path.tolist()
BATCH_SIZE = 16
def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 355.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
# strategy = auto_select_accelerator()
# BATCH_SIZE = strategy.num_replicas_in_sync * 16
label_cols = ["negative", "typical", "indeterminate", "atypical"]
study_df[label_cols] = 0
test_decoder = build_decoder(
with_labels=False, target_size=(STUDY_DIMS[0], STUDY_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(
f"{MODEL_PATH}/model0.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models1 = tf.keras.models.load_model(
f"{MODEL_PATH}/model1.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models2 = tf.keras.models.load_model(
f"{MODEL_PATH}/model2.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models3 = tf.keras.models.load_model(
f"{MODEL_PATH}/model3.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models4 = tf.keras.models.load_model(
f"{MODEL_PATH}/model4.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
study_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
study_df["PredictionString"] = study_df[label_cols].apply(
lambda row: f"negative {row.negative} 0 0 1 1 typical {row.typical} 0 0 1 1 indeterminate {row.indeterminate} 0 0 1 1 atypical {row.atypical} 0 0 1 1",
axis=1,
)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict 2Class Image Level
# Using [@Alien](https://www.kaggle.com/h053473666) 2class model.
import efficientnet.tfkeras as efn
MODEL_PATH = "/kaggle/input/siim-covid19-efnb7-train-fold0-5-2class"
test_paths = image_df.image_path.tolist()
image_df["none"] = 0
label_cols = ["none"]
test_decoder = build_decoder(
with_labels=False, target_size=(IMAGE_DIMS[0], IMAGE_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(f"{MODEL_PATH}/model0.h5")
models1 = tf.keras.models.load_model(f"{MODEL_PATH}/model1.h5")
models2 = tf.keras.models.load_model(f"{MODEL_PATH}/model2.h5")
models3 = tf.keras.models.load_model(f"{MODEL_PATH}/model3.h5")
models4 = tf.keras.models.load_model(f"{MODEL_PATH}/model4.h5")
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
image_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict Image Level
# ## SWIN Transformer
# from tqdm.notebook import tqdm
# import torch
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# print(device.type)
# import torchvision
# print(torch.__version__, torch.cuda.is_available())
# # Check mmcv installation
# from mmcv.ops import get_compiling_cuda_version, get_compiler_version
# print(get_compiling_cuda_version())
# print(get_compiler_version())
# # Check MMDetection installation
# from mmdet.apis import set_random_seed
# # Imports
# import mmdet
# from mmdet.apis import set_random_seed
# from mmdet.datasets import build_dataset
# from mmdet.models import build_detector
# import mmcv
# from mmcv import Config
# from mmcv.runner import load_checkpoint
# from mmcv.parallel import MMDataParallel
# from mmdet.apis import inference_detector, init_detector, show_result_pyplot
# from mmdet.apis import single_gpu_test
# from mmdet.datasets import build_dataloader, build_dataset
# import apex
from mmdet.apis import init_detector, inference_detector
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y2]
"""
bboxes = bboxes.copy().astype(
float
) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]] * image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]] * image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]] / 2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def get_all_files_in_folder(folder, types):
files_grabbed = []
for t in types:
files_grabbed.extend(folder.rglob(t))
files_grabbed = sorted(files_grabbed, key=lambda x: x)
return files_grabbed
from pathlib import Path
from tqdm import tqdm
import cv2
import numpy as np
config_file = "/kaggle/input/swin-transformer-data/mask_rcnn_swin_small_patch4_window7_mstrain_480-800_adamw_3x_coco_without_mask_head.py"
checkpoint_file = "/kaggle/input/swin-transformer-data/epoch_14.pth"
images_dir = Path("/kaggle/tmp/test/image_orig/")
images_ext = ["*.png"]
conf_threshold = 0.001
device = "cuda:0"
image_ids = []
PredictionStrings = []
model = init_detector(config_file, checkpoint_file, device=device)
files = get_all_files_in_folder(images_dir, images_ext)
for file in tqdm(files):
image = cv2.imread(str(file), cv2.IMREAD_COLOR)
h, w = image.shape[:2]
result = inference_detector(model, file)
bboxes = result[0]
dect_results = []
boxes_valid = []
for box in bboxes:
if box[4] > conf_threshold:
boxes_valid.append(box)
if len(boxes_valid) > 0:
for box in boxes_valid:
wn = (box[2] - box[0]) / w
hn = (box[3] - box[1]) / h
x_center_norm = ((box[2] - box[0]) / 2 + box[0]) / w
y_center_norm = ((box[3] - box[1]) / 2 + box[1]) / h
label = 0
d = [label, box[4], x_center_norm, y_center_norm, wn, hn]
dect_results.append(d)
if len(dect_results):
data = np.array(dect_results)
bboxes = list(
np.round(
np.concatenate(
(data[:, :2], np.round(yolo2voc(h, w, data[:, 2:]))), axis=1
).reshape(-1),
12,
).astype(str)
)
for idx in range(len(bboxes)):
bboxes[idx] = str(int(float(bboxes[idx]))) if idx % 6 != 1 else bboxes[idx]
PredictionStrings.append(" ".join(bboxes))
image_ids.append(file.stem)
PredictionStrings[0]
import pandas as pd
detection_df = pd.DataFrame({"id": image_ids, "PredictionString": PredictionStrings})
detection_df
detection_df = detection_df.merge(
image_df[["image_id", "none"]].rename({"image_id": "id"}, axis=1),
on="id",
how="left",
)
for i in range(detection_df.shape[0]):
if detection_df.loc[i, "PredictionString"] != "none 1 0 0 1 1":
detection_df.loc[i, "PredictionString"] = (
detection_df.loc[i, "PredictionString"]
+ " none "
+ str(detection_df.loc[i, "none"])
+ " 0 0 1 1"
)
detection_df = detection_df[["id", "PredictionString"]]
results_df = study_df[["study_id", "PredictionString"]].rename(
{"study_id": "id"}, axis=1
)
results_df = results_df.append(detection_df[["id", "PredictionString"]])
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
sub_df = sub_df.set_index("id")
results_df = results_df.set_index("id")
sub_df.update(results_df)
sub_df = sub_df.reset_index()
sub_df = sub_df.fillna("none 1 0 0 1 1")
sub_df.to_csv("/kaggle/working/submission.csv", index=False)
if fast_sub:
display(sub_df.head(2))
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
# sub_df = sub_df.set_index('id')
sub_df.tail(5)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637070.ipynb
|
kerasapplications
|
xhlulu
|
[{"Id": 69637070, "ScriptId": 19008941, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5878502, "CreationDate": "08/02/2021 12:37:38", "VersionNumber": 2.0, "Title": "[Inference] Swin Transformer Object Detection", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 543.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 536.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93072565, "KernelVersionId": 69637070, "SourceDatasetVersionId": 1666454}, {"Id": 93072576, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2490344}, {"Id": 93072571, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2475994}, {"Id": 93072573, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2477207}, {"Id": 93072578, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2490456}, {"Id": 93072569, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2442246}, {"Id": 93072568, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2437252}, {"Id": 93072574, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2479140}, {"Id": 93072577, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2490388}, {"Id": 93072575, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2490317}, {"Id": 93072566, "KernelVersionId": 69637070, "SourceDatasetVersionId": 1874023}, {"Id": 93072570, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2474118}, {"Id": 93072572, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2477188}, {"Id": 93072567, "KernelVersionId": 69637070, "SourceDatasetVersionId": 2406834}]
|
[{"Id": 1666454, "DatasetId": 986800, "DatasourceVersionId": 1702809, "CreatorUserId": 2352583, "LicenseName": "Unknown", "CreationDate": "11/21/2020 05:57:19", "VersionNumber": 1.0, "Title": "[DEPRECATED] keras-applications", "Slug": "kerasapplications", "Subtitle": "DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications", "Description": "DEPRECATED, USE: https://www.kaggle.com/datasets/xhlulu/keras-applications", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 986800, "CreatorUserId": 2352583, "OwnerUserId": 2352583.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1666454.0, "CurrentDatasourceVersionId": 1702809.0, "ForumId": 1003340, "Type": 2, "CreationDate": "11/21/2020 05:57:19", "LastActivityDate": "11/21/2020", "TotalViews": 3409, "TotalDownloads": 118, "TotalVotes": 17, "TotalKernels": 124}]
|
[{"Id": 2352583, "UserName": "xhlulu", "DisplayName": "xhlulu", "RegisterDate": "10/12/2018", "PerformanceTier": 4}]
|
# [Inference] Swin Transformer Object Detection
# Highlights
# 👋 Big thanks to:
# > [Sreevishnu Damodaran for his great notebook](https://www.kaggle.com/sreevishnudamodaran/siim-effnetv2-l-cascadercnn-mmdetection-infer)
# References:
# - https://www.kaggle.com/h053473666/siim-cov19-efnb7-yolov5-infer
# - https://github.com/tensorflow/hub
# - https://github.com/open-mmlab/mmdetection
# - https://github.com/SwinTransformer/Swin-Transformer-Object-Detection
## Compatible Cuda Toolkit installation
## MMDetection Offline Installation
# !cp -r /kaggle/input/mmdetectionv2140/mmdetection-2.14.0 /kaggle/working/
# !mv /kaggle/working/mmdetection-2.14.0 /kaggle/working/mmdetection
# %cd /kaggle/working/mmdetection
# !pip install -e . --no-deps
import shutil
shutil.copytree(
"/kaggle/input/swin-detection-repo/Swin-Transformer-Object-Detection-master",
"/kaggle/working/Swin-Transformer-Object-Detection-master",
)
# !pip install '/kaggle/input/mmdetectionv2140/mmpycocotools-12.0.3/mmpycocotools-12.0.3' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/terminaltables-3.1.0-py3-none-any.whl' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/addict-2.4.0-py3-none-any.whl' --no-deps
# !pip install '/kaggle/input/mmdetectionv2140/yapf-0.31.0-py2.py3-none-any.whl' --no-deps
shutil.copytree("/kaggle/input/apex-repo/apex-master", "/kaggle/working/apex-master")
# from mmdet.apis import init_detector, inference_detector
# import sys
# sys.path.append('/kaggle/working/mmdetection')
import sys
sys.path.append("/kaggle/working/Swin-Transformer-Object-Detection-master")
import os
from PIL import Image
import pandas as pd
from tqdm.auto import tqdm
import gc
import glob
import numpy as np
# Create Study and Image Level Dataframes
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
# Form study and image dataframes
sub_df["level"] = sub_df.id.map(lambda idx: idx[-5:])
study_df = sub_df[sub_df.level == "study"].rename({"id": "study_id"}, axis=1)
image_df = sub_df[sub_df.level == "image"].rename({"id": "image_id"}, axis=1)
dcm_path = glob.glob(
"/kaggle/input/siim-covid19-detection/test/**/*dcm", recursive=True
)
test_meta = pd.DataFrame({"dcm_path": dcm_path})
test_meta["image_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-1].replace(".dcm", "") + "_image"
)
test_meta["study_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-3].replace(".dcm", "") + "_study"
)
study_df = study_df.merge(test_meta, on="study_id", how="left")
image_df = image_df.merge(test_meta, on="image_id", how="left")
# Remove duplicates study_ids from study_df
study_df.drop_duplicates(subset="study_id", keep="first", inplace=True)
# Fast or Full Predictions
# In case of non-competetion submission commits, we run the notebook with just two images each for image level and study level inference from the public test data.
fast_sub = False
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
STUDY_DIMS = (768, 768)
IMAGE_DIMS = (512, 512)
study_dir = f"/kaggle/tmp/test/study/"
os.makedirs(study_dir, exist_ok=True)
image_dir = f"/kaggle/tmp/test/image/"
os.makedirs(image_dir, exist_ok=True)
def read_xray(path, voi_lut=True, fix_monochrome=True):
# Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to transform raw DICOM data to
# "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# depending on this value, X-ray may look inverted - fix that:
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
data = data - np.min(data)
data = data / np.max(data)
data = (data * 255).astype(np.uint8)
return data
def resize(array, size, keep_ratio=False, resample=Image.LANCZOS):
# Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image
im = Image.fromarray(array)
if keep_ratio:
im.thumbnail((size, size), resample)
else:
im = im.resize((size, size), resample)
return im
for index, row in tqdm(
study_df[["study_id", "dcm_path"]].iterrows(), total=study_df.shape[0]
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=STUDY_DIMS[0])
im.save(os.path.join(study_dir, row["study_id"] + ".png"))
image_df["dim0"] = -1
image_df["dim1"] = -1
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=IMAGE_DIMS[0])
im.save(os.path.join(image_dir, row["image_id"] + ".png"))
image_df.loc[image_df.image_id == row.image_id, "dim0"] = xray.shape[0]
image_df.loc[image_df.image_id == row.image_id, "dim1"] = xray.shape[1]
# nonresized images
image_dir_orig = f"/kaggle/tmp/test/image_orig/"
os.makedirs(image_dir_orig, exist_ok=True)
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = Image.fromarray(xray)
im.save(os.path.join(image_dir_orig, row["image_id"] + ".png"))
study_df["image_path"] = study_dir + study_df["study_id"] + ".png"
image_df["image_path"] = image_dir + image_df["image_id"] + ".png"
# Custom Wrapper for Loading TFHub Model trained in TPU
# Since the EffNetV2 Classifier models were trained on a TPU with the `tfhub.KerasLayer` formed with the handle argument as a GCS path, while loading the saved model for inference, the method tries to download the pre-trained weights from the definition of the layer from training i.e a GCS path.
# Since, inference notebooks don't have GCS and internet access, it is not possible to load the model without the pretrained weights explicitly loaded from the local directory.
# If the models were trained on a GPU, we can use the cache location method to load the pre-trained weights by storing them in a cache folder with the hashed key of the model location, as the folder name. I tried this method here but, it doesn't seem to work as the model was trained with a GCS path defined in the `tfhub.KerasLayer` and the method kept on hitting the GCS path rather than loading the weights from the cache location.
# The only solution was to create a wrapper class to correct the handle argument to load the right pretrained weights explicitly from the local directory.
import tensorflow as tf
import tensorflow_hub as tfhub
MODEL_ARCH = "efficientnetv2-l-21k-ft1k"
# Get the TensorFlow Hub model URL
hub_type = "feature_vector" # ['classification', 'feature_vector']
MODEL_ARCH_PATH = f"/kaggle/input/efficientnetv2-tfhub-weight-files/tfhub_models/{MODEL_ARCH}/{hub_type}"
# Custom wrapper class to load the right pretrained weights explicitly from the local directory
class KerasLayerWrapper(tfhub.KerasLayer):
def __init__(self, handle, **kwargs):
handle = tfhub.KerasLayer(tfhub.load(MODEL_ARCH_PATH))
super().__init__(handle, **kwargs)
# Predict Study Level
MODEL_PATH = "/kaggle/input/siim-effnetv2-keras-study-train-tpu-cv0-805"
test_paths = study_df.image_path.tolist()
BATCH_SIZE = 16
def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 355.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
# strategy = auto_select_accelerator()
# BATCH_SIZE = strategy.num_replicas_in_sync * 16
label_cols = ["negative", "typical", "indeterminate", "atypical"]
study_df[label_cols] = 0
test_decoder = build_decoder(
with_labels=False, target_size=(STUDY_DIMS[0], STUDY_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(
f"{MODEL_PATH}/model0.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models1 = tf.keras.models.load_model(
f"{MODEL_PATH}/model1.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models2 = tf.keras.models.load_model(
f"{MODEL_PATH}/model2.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models3 = tf.keras.models.load_model(
f"{MODEL_PATH}/model3.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models4 = tf.keras.models.load_model(
f"{MODEL_PATH}/model4.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
study_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
study_df["PredictionString"] = study_df[label_cols].apply(
lambda row: f"negative {row.negative} 0 0 1 1 typical {row.typical} 0 0 1 1 indeterminate {row.indeterminate} 0 0 1 1 atypical {row.atypical} 0 0 1 1",
axis=1,
)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict 2Class Image Level
# Using [@Alien](https://www.kaggle.com/h053473666) 2class model.
import efficientnet.tfkeras as efn
MODEL_PATH = "/kaggle/input/siim-covid19-efnb7-train-fold0-5-2class"
test_paths = image_df.image_path.tolist()
image_df["none"] = 0
label_cols = ["none"]
test_decoder = build_decoder(
with_labels=False, target_size=(IMAGE_DIMS[0], IMAGE_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(f"{MODEL_PATH}/model0.h5")
models1 = tf.keras.models.load_model(f"{MODEL_PATH}/model1.h5")
models2 = tf.keras.models.load_model(f"{MODEL_PATH}/model2.h5")
models3 = tf.keras.models.load_model(f"{MODEL_PATH}/model3.h5")
models4 = tf.keras.models.load_model(f"{MODEL_PATH}/model4.h5")
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
image_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict Image Level
# ## SWIN Transformer
# from tqdm.notebook import tqdm
# import torch
# device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# print(device.type)
# import torchvision
# print(torch.__version__, torch.cuda.is_available())
# # Check mmcv installation
# from mmcv.ops import get_compiling_cuda_version, get_compiler_version
# print(get_compiling_cuda_version())
# print(get_compiler_version())
# # Check MMDetection installation
# from mmdet.apis import set_random_seed
# # Imports
# import mmdet
# from mmdet.apis import set_random_seed
# from mmdet.datasets import build_dataset
# from mmdet.models import build_detector
# import mmcv
# from mmcv import Config
# from mmcv.runner import load_checkpoint
# from mmcv.parallel import MMDataParallel
# from mmdet.apis import inference_detector, init_detector, show_result_pyplot
# from mmdet.apis import single_gpu_test
# from mmdet.datasets import build_dataloader, build_dataset
# import apex
from mmdet.apis import init_detector, inference_detector
def yolo2voc(image_height, image_width, bboxes):
"""
yolo => [xmid, ymid, w, h] (normalized)
voc => [x1, y1, x2, y2]
"""
bboxes = bboxes.copy().astype(
float
) # otherwise all value will be 0 as voc_pascal dtype is np.int
bboxes[..., [0, 2]] = bboxes[..., [0, 2]] * image_width
bboxes[..., [1, 3]] = bboxes[..., [1, 3]] * image_height
bboxes[..., [0, 1]] = bboxes[..., [0, 1]] - bboxes[..., [2, 3]] / 2
bboxes[..., [2, 3]] = bboxes[..., [0, 1]] + bboxes[..., [2, 3]]
return bboxes
def get_all_files_in_folder(folder, types):
files_grabbed = []
for t in types:
files_grabbed.extend(folder.rglob(t))
files_grabbed = sorted(files_grabbed, key=lambda x: x)
return files_grabbed
from pathlib import Path
from tqdm import tqdm
import cv2
import numpy as np
config_file = "/kaggle/input/swin-transformer-data/mask_rcnn_swin_small_patch4_window7_mstrain_480-800_adamw_3x_coco_without_mask_head.py"
checkpoint_file = "/kaggle/input/swin-transformer-data/epoch_14.pth"
images_dir = Path("/kaggle/tmp/test/image_orig/")
images_ext = ["*.png"]
conf_threshold = 0.001
device = "cuda:0"
image_ids = []
PredictionStrings = []
model = init_detector(config_file, checkpoint_file, device=device)
files = get_all_files_in_folder(images_dir, images_ext)
for file in tqdm(files):
image = cv2.imread(str(file), cv2.IMREAD_COLOR)
h, w = image.shape[:2]
result = inference_detector(model, file)
bboxes = result[0]
dect_results = []
boxes_valid = []
for box in bboxes:
if box[4] > conf_threshold:
boxes_valid.append(box)
if len(boxes_valid) > 0:
for box in boxes_valid:
wn = (box[2] - box[0]) / w
hn = (box[3] - box[1]) / h
x_center_norm = ((box[2] - box[0]) / 2 + box[0]) / w
y_center_norm = ((box[3] - box[1]) / 2 + box[1]) / h
label = 0
d = [label, box[4], x_center_norm, y_center_norm, wn, hn]
dect_results.append(d)
if len(dect_results):
data = np.array(dect_results)
bboxes = list(
np.round(
np.concatenate(
(data[:, :2], np.round(yolo2voc(h, w, data[:, 2:]))), axis=1
).reshape(-1),
12,
).astype(str)
)
for idx in range(len(bboxes)):
bboxes[idx] = str(int(float(bboxes[idx]))) if idx % 6 != 1 else bboxes[idx]
PredictionStrings.append(" ".join(bboxes))
image_ids.append(file.stem)
PredictionStrings[0]
import pandas as pd
detection_df = pd.DataFrame({"id": image_ids, "PredictionString": PredictionStrings})
detection_df
detection_df = detection_df.merge(
image_df[["image_id", "none"]].rename({"image_id": "id"}, axis=1),
on="id",
how="left",
)
for i in range(detection_df.shape[0]):
if detection_df.loc[i, "PredictionString"] != "none 1 0 0 1 1":
detection_df.loc[i, "PredictionString"] = (
detection_df.loc[i, "PredictionString"]
+ " none "
+ str(detection_df.loc[i, "none"])
+ " 0 0 1 1"
)
detection_df = detection_df[["id", "PredictionString"]]
results_df = study_df[["study_id", "PredictionString"]].rename(
{"study_id": "id"}, axis=1
)
results_df = results_df.append(detection_df[["id", "PredictionString"]])
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
sub_df = sub_df.set_index("id")
results_df = results_df.set_index("id")
sub_df.update(results_df)
sub_df = sub_df.reset_index()
sub_df = sub_df.fillna("none 1 0 0 1 1")
sub_df.to_csv("/kaggle/working/submission.csv", index=False)
if fast_sub:
display(sub_df.head(2))
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
# sub_df = sub_df.set_index('id')
sub_df.tail(5)
| false | 1 | 5,832 | 0 | 5,876 | 5,832 |
||
69637573
|
# # About
# This time I decided to follow a completely differnt approach than in the last Tabular Playground Competitions: Use Auto ML Tools.
# I will NOT do EDA.
# I will NOT learn anything about the data.
# I will just throw it into an Auto ML Tool and observe the result. In this notebook, it's Pycaret.
# (Somehow I already feel bad... I love EDA... but hey, I think it's time to try out something new this time)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pycaret
from pycaret.regression import *
print("PyCaret: %s" % pycaret.__version__)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# read in competition data
df_train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
df_test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
sample_submission = pd.read_csv(
"../input/tabular-playground-series-aug-2021/sample_submission.csv"
)
# I checked quickly the data types and that there are no missing values. Nothing more. Now let's throw the data into pycaret.
# setup an experiment to setup pycaret environment and define pre-processing pipeline
pc_experiment = setup(
data=df_train,
target=df_train.columns[-1],
log_experiment=True,
experiment_name="TPS8_fistlook",
)
# Note: the loss column has been identified as label not as numeric, not sure yet what this means... below it sais all features are numeric...
# We can see here that Pycaret did not create any additional features (like it would e.g. do when one hot encoding categorical features): The Transformed Train Set and the Transformed Test Set have still 100 features. The Transformed Test Set is the validation set used for cross validation. The split was done with the default of 70/30.
# compare different baseline models with 5 fold cross-validation
# best_model = compare_models(fold=5)
# Oberservations: the first three quaters on the processing bar passed quite quickly. Then the visible progress stalled. "Estimator" is changing from time to time... so it seems to be still running.
# Pycaret runs algorithms I haven't even heard of...
# Now, it's done and the winner is: CatBoost!
# create model - this function uses the default hyperparameter
catboost = create_model("catboost")
# now let's tune the model - tune_model uses Random Grid Search on a pre-defined search space, a custom_grid may be passed
tuned_catboost = tune_model(catboost, optimize="RMSE")
print(tuned_catboost.get_all_params())
plot_model(tuned_catboost, plot="error")
plot_model(tuned_catboost)
evaluate_model(tuned_catboost)
# prediction on validation set
predict_model(tuned_catboost)
# finalize model: re-train on whole training data (=train+val)
final_catboost = finalize_model(tuned_catboost)
# predict for test set
test_predictions = predict_model(final_catboost, data=df_test)
test_predictions.head()
sample_submission.head()
# prepare submission file
submission = test_predictions[["id", "Label"]]
submission = submission.rename(columns={"Label": "loss"})
submission.to_csv("submission_pycaret.csv", index=False)
submission.head()
# save model
save_model(final_catboost, "Final Catboost Model 20210802")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637573.ipynb
| null | null |
[{"Id": 69637573, "ScriptId": 19011671, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3982116, "CreationDate": "08/02/2021 12:43:48", "VersionNumber": 2.0, "Title": "TPS8 - pycaret commented", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 90.0, "LinesInsertedFromPrevious": 32.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 58.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # About
# This time I decided to follow a completely differnt approach than in the last Tabular Playground Competitions: Use Auto ML Tools.
# I will NOT do EDA.
# I will NOT learn anything about the data.
# I will just throw it into an Auto ML Tool and observe the result. In this notebook, it's Pycaret.
# (Somehow I already feel bad... I love EDA... but hey, I think it's time to try out something new this time)
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import pycaret
from pycaret.regression import *
print("PyCaret: %s" % pycaret.__version__)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# read in competition data
df_train = pd.read_csv("../input/tabular-playground-series-aug-2021/train.csv")
df_test = pd.read_csv("../input/tabular-playground-series-aug-2021/test.csv")
sample_submission = pd.read_csv(
"../input/tabular-playground-series-aug-2021/sample_submission.csv"
)
# I checked quickly the data types and that there are no missing values. Nothing more. Now let's throw the data into pycaret.
# setup an experiment to setup pycaret environment and define pre-processing pipeline
pc_experiment = setup(
data=df_train,
target=df_train.columns[-1],
log_experiment=True,
experiment_name="TPS8_fistlook",
)
# Note: the loss column has been identified as label not as numeric, not sure yet what this means... below it sais all features are numeric...
# We can see here that Pycaret did not create any additional features (like it would e.g. do when one hot encoding categorical features): The Transformed Train Set and the Transformed Test Set have still 100 features. The Transformed Test Set is the validation set used for cross validation. The split was done with the default of 70/30.
# compare different baseline models with 5 fold cross-validation
# best_model = compare_models(fold=5)
# Oberservations: the first three quaters on the processing bar passed quite quickly. Then the visible progress stalled. "Estimator" is changing from time to time... so it seems to be still running.
# Pycaret runs algorithms I haven't even heard of...
# Now, it's done and the winner is: CatBoost!
# create model - this function uses the default hyperparameter
catboost = create_model("catboost")
# now let's tune the model - tune_model uses Random Grid Search on a pre-defined search space, a custom_grid may be passed
tuned_catboost = tune_model(catboost, optimize="RMSE")
print(tuned_catboost.get_all_params())
plot_model(tuned_catboost, plot="error")
plot_model(tuned_catboost)
evaluate_model(tuned_catboost)
# prediction on validation set
predict_model(tuned_catboost)
# finalize model: re-train on whole training data (=train+val)
final_catboost = finalize_model(tuned_catboost)
# predict for test set
test_predictions = predict_model(final_catboost, data=df_test)
test_predictions.head()
sample_submission.head()
# prepare submission file
submission = test_predictions[["id", "Label"]]
submission = submission.rename(columns={"Label": "loss"})
submission.to_csv("submission_pycaret.csv", index=False)
submission.head()
# save model
save_model(final_catboost, "Final Catboost Model 20210802")
| false | 0 | 986 | 0 | 986 | 986 |
||
69637999
|
<jupyter_start><jupyter_text>MMDetection v2.14.0
### MMDetection v2.14.0 + Offline Dependancies
<a href="https://www.kaggle.com/sreevishnudamodaran"><img alt="Ask Me Something" src="https://img.shields.io/badge/Ask%20me-something-1abc9c.svg?style=flat-square&logo=kaggle"></a> <a href="https://www.kaggle.com/sreevishnudamodaran"><img alt="Please Upvote If You Like This" src="https://img.shields.io/badge/Please-Upvote%20If%20you%20like%20this-07b3c8?style=flat-square&for-the-badge&logo=kaggle"></a>
***License: Apache License 2.0***
### Contents
✅ MMCV 1.3.8
✅ MMDetection v2.14.0 + Git Repo
✅ MMPycocotools & Other dependancies
<br>
### Suggested Usage
```
## MMDetection Offline Installation
!pip install /kaggle/input/mmdetectionv2140/addict-2.4.0-py3-none-any.whl
!pip install /kaggle/input/mmdetectionv2140/yapf-0.31.0-py2.py3-none-any.whl
!pip install /kaggle/input/mmdetectionv2140/terminal-0.4.0-py3-none-any.whl
!pip install /kaggle/input/mmdetectionv2140/terminaltables-3.1.0-py3-none-any.whl
!pip install /kaggle/input/mmdetectionv2140/mmcv_full-1_3_8-cu110-torch1_7_0/mmcv_full-1.3.8-cp37-cp37m-manylinux1_x86_64.whl
!pip install /kaggle/input/mmdetectionv2140/pycocotools-2.0.2/pycocotools-2.0.2
!pip install /kaggle/input/mmdetectionv2140/mmpycocotools-12.0.3/mmpycocotools-12.0.3
!cp -r /kaggle/input/mmdetectionv2140/mmdetection-2.14.0 /kaggle/working/
!mv /kaggle/working/mmdetection-2.14.0 /kaggle/working/mmdetection
%cd /kaggle/working/mmdetection
!pip install -e . --no-deps
%cd /kaggle/working/
```
<br>
Kaggle dataset identifier: mmdetectionv2140
<jupyter_script># 
# SIIM COVID-19 EffNetV2 CascadeRCNN MMDetection Inference
# Overview
# ✅ EfficientNetV2 TF Model Study Level Inference on GPU with Keras
# ✅ CascadeRCNN Image Level Inference on GPU with MMDetection
# 🏷️ Dataset with EffNetV2 TfHub Weights used in this notebook:
# > [EfficientNetV2 TFHub Weight Files](https://www.kaggle.com/sreevishnudamodaran/efficientnetv2-tfhub-weight-files?select=tfhub_models)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# 🏷️ EffNetV2 Keras Study Level Train notebook:
# > [SIIM EffNetV2 Keras Study Train [TPU CV0.805+]🎏](https://www.kaggle.com/sreevishnudamodaran/siim-effnetv2-keras-study-train-tpu-cv0-805)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# 🏷️ MMDetection CascadeRCNN Image Level Train notebook:
# > [SIIM MMDetection+CascadeRCNN+Weight&Bias☄️🔮](https://www.kaggle.com/sreevishnudamodaran/siim-mmdetection-cascadercnn-weight-bias)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# References:
# - https://www.kaggle.com/h053473666/siim-cov19-efnb7-yolov5-infer
# - https://github.com/tensorflow/hub
# - https://github.com/open-mmlab/mmdetection
#
## MMDetection compatible torch installation
## Compatible Cuda Toolkit installation
## MMDetection Offline Installation
import sys
sys.path.append("/kaggle/working/mmdetection")
import os
from PIL import Image
import pandas as pd
from tqdm.auto import tqdm
import gc
import glob
import numpy as np
# Create Study and Image Level Dataframes
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
# Form study and image dataframes
sub_df["level"] = sub_df.id.map(lambda idx: idx[-5:])
study_df = sub_df[sub_df.level == "study"].rename({"id": "study_id"}, axis=1)
image_df = sub_df[sub_df.level == "image"].rename({"id": "image_id"}, axis=1)
dcm_path = glob.glob(
"/kaggle/input/siim-covid19-detection/test/**/*dcm", recursive=True
)
test_meta = pd.DataFrame({"dcm_path": dcm_path})
test_meta["image_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-1].replace(".dcm", "") + "_image"
)
test_meta["study_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-3].replace(".dcm", "") + "_study"
)
study_df = study_df.merge(test_meta, on="study_id", how="left")
image_df = image_df.merge(test_meta, on="image_id", how="left")
# Remove duplicates study_ids from study_df
study_df.drop_duplicates(subset="study_id", keep="first", inplace=True)
# Fast or Full Predictions
# In case of non-competetion submission commits, we run the notebook with just two images each for image level and study level inference from the public test data.
fast_sub = False
if sub_df.shape[0] == 2477:
fast_sub = True
study_df = study_df.sample(2)
image_df = image_df.sample(2)
print("\nstudy_df")
display(study_df.head(2))
print("\nimage_df")
display(image_df.head(2))
print("\ntest_meta")
display(test_meta.head(2))
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
STUDY_DIMS = (768, 768)
IMAGE_DIMS = (512, 512)
study_dir = f"/kaggle/tmp/test/study/"
os.makedirs(study_dir, exist_ok=True)
image_dir = f"/kaggle/tmp/test/image/"
os.makedirs(image_dir, exist_ok=True)
def read_xray(path, voi_lut=True, fix_monochrome=True):
# Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to transform raw DICOM data to
# "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# depending on this value, X-ray may look inverted - fix that:
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
data = data - np.min(data)
data = data / np.max(data)
data = (data * 255).astype(np.uint8)
return data
def resize(array, size, keep_ratio=False, resample=Image.LANCZOS):
# Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image
im = Image.fromarray(array)
if keep_ratio:
im.thumbnail((size, size), resample)
else:
im = im.resize((size, size), resample)
return im
for index, row in tqdm(
study_df[["study_id", "dcm_path"]].iterrows(), total=study_df.shape[0]
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=STUDY_DIMS[0])
im.save(os.path.join(study_dir, row["study_id"] + ".png"))
image_df["dim0"] = -1
image_df["dim1"] = -1
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=IMAGE_DIMS[0])
im.save(os.path.join(image_dir, row["image_id"] + ".png"))
image_df.loc[image_df.image_id == row.image_id, "dim0"] = xray.shape[0]
image_df.loc[image_df.image_id == row.image_id, "dim1"] = xray.shape[1]
study_df["image_path"] = study_dir + study_df["study_id"] + ".png"
image_df["image_path"] = image_dir + image_df["image_id"] + ".png"
# Custom Wrapper for Loading TFHub Model trained in TPU
# Since the EffNetV2 Classifier models were trained on a TPU with the `tfhub.KerasLayer` formed with the handle argument as a GCS path, while loading the saved model for inference, the method tries to download the pre-trained weights from the definition of the layer from training i.e a GCS path.
# Since, inference notebooks don't have GCS and internet access, it is not possible to load the model without the pretrained weights explicitly loaded from the local directory.
# If the models were trained on a GPU, we can use the cache location method to load the pre-trained weights by storing them in a cache folder with the hashed key of the model location, as the folder name. I tried this method here but, it doesn't seem to work as the model was trained with a GCS path defined in the `tfhub.KerasLayer` and the method kept on hitting the GCS path rather than loading the weights from the cache location.
# The only solution was to create a wrapper class to correct the handle argument to load the right pretrained weights explicitly from the local directory.
import tensorflow as tf
import tensorflow_hub as tfhub
MODEL_ARCH = "efficientnetv2-l-21k-ft1k"
# Get the TensorFlow Hub model URL
hub_type = "feature_vector" # ['classification', 'feature_vector']
MODEL_ARCH_PATH = f"/kaggle/input/efficientnetv2-tfhub-weight-files/tfhub_models/{MODEL_ARCH}/{hub_type}"
# Custom wrapper class to load the right pretrained weights explicitly from the local directory
class KerasLayerWrapper(tfhub.KerasLayer):
def __init__(self, handle, **kwargs):
handle = tfhub.KerasLayer(tfhub.load(MODEL_ARCH_PATH))
super().__init__(handle, **kwargs)
# Predict Study Level
MODEL_PATH = "/kaggle/input/siim-effnetv2-keras-study-train-tpu-cv0-805"
test_paths = study_df.image_path.tolist()
BATCH_SIZE = 16
def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
# strategy = auto_select_accelerator()
# BATCH_SIZE = strategy.num_replicas_in_sync * 16
label_cols = ["negative", "typical", "indeterminate", "atypical"]
study_df[label_cols] = 0
test_decoder = build_decoder(
with_labels=False, target_size=(STUDY_DIMS[0], STUDY_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(
f"{MODEL_PATH}/model0.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models1 = tf.keras.models.load_model(
f"{MODEL_PATH}/model1.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models2 = tf.keras.models.load_model(
f"{MODEL_PATH}/model2.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models3 = tf.keras.models.load_model(
f"{MODEL_PATH}/model3.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models4 = tf.keras.models.load_model(
f"{MODEL_PATH}/model4.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
study_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
study_df["PredictionString"] = study_df[label_cols].apply(
lambda row: f"negative {row.negative} 0 0 1 1 typical {row.typical} 0 0 1 1 indeterminate {row.indeterminate} 0 0 1 1 atypical {row.atypical} 0 0 1 1",
axis=1,
)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict 2Class Image Level
# Using [@Alien](https://www.kaggle.com/h053473666) 2class model.
import efficientnet.tfkeras as efn
MODEL_PATH = "/kaggle/input/siim-covid19-efnb7-train-fold0-5-2class"
test_paths = image_df.image_path.tolist()
image_df["none"] = 0
label_cols = ["none"]
test_decoder = build_decoder(
with_labels=False, target_size=(IMAGE_DIMS[0], IMAGE_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(f"{MODEL_PATH}/model0.h5")
models1 = tf.keras.models.load_model(f"{MODEL_PATH}/model1.h5")
models2 = tf.keras.models.load_model(f"{MODEL_PATH}/model2.h5")
models3 = tf.keras.models.load_model(f"{MODEL_PATH}/model3.h5")
models4 = tf.keras.models.load_model(f"{MODEL_PATH}/model4.h5")
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
models.append(tf.keras.models.load_model("../input/resnet152models/model0.h5"))
models.append(tf.keras.models.load_model("../input/resnet152models/model1.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model0.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model1.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model2.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model3.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model4.h5"))
image_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict Image Level
from numba import cuda
import torch
cuda.select_device(0)
cuda.close()
cuda.select_device(0)
from tqdm.notebook import tqdm
import torch
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(device.type)
import torchvision
print(torch.__version__, torch.cuda.is_available())
# Check mmcv installation
from mmcv.ops import get_compiling_cuda_version, get_compiler_version
print(get_compiling_cuda_version())
print(get_compiler_version())
# Check MMDetection installation
from mmdet.apis import set_random_seed
# Imports
import mmdet
from mmdet.apis import set_random_seed
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
import mmcv
from mmcv import Config
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.apis import single_gpu_test
from mmdet.datasets import build_dataloader, build_dataset
import cv2
import matplotlib.pyplot as plt
label2color = [[59, 238, 119]]
viz_labels = ["Covid_Abnormality"]
def plot_img(img, size=(18, 18), is_rgb=True, title="", cmap=None):
plt.figure(figsize=size)
plt.imshow(img, cmap=cmap)
plt.suptitle(title)
plt.show()
def plot_imgs(imgs, cols=2, size=10, is_rgb=True, title="", cmap=None, img_size=None):
rows = len(imgs) // cols + 1
fig = plt.figure(figsize=(cols * size, rows * size))
for i, img in enumerate(imgs):
if img_size is not None:
img = cv2.resize(img, img_size)
fig.add_subplot(rows, cols, i + 1)
plt.imshow(img, cmap=cmap)
plt.suptitle(title)
return fig
def draw_bbox(image, box, label, color):
alpha = 0.1
alpha_font = 0.6
thickness = 8
font_size = 2.0
font_weight = 1
overlay_bbox = image.copy()
overlay_text = image.copy()
output = image.copy()
text_width, text_height = cv2.getTextSize(
label.upper(), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_weight
)[0]
cv2.rectangle(overlay_bbox, (box[0], box[1]), (box[2], box[3]), color, -1)
cv2.addWeighted(overlay_bbox, alpha, output, 1 - alpha, 0, output)
cv2.rectangle(
overlay_text,
(box[0], box[1] - 18 - text_height),
(box[0] + text_width + 8, box[1]),
(0, 0, 0),
-1,
)
cv2.addWeighted(overlay_text, alpha_font, output, 1 - alpha_font, 0, output)
cv2.rectangle(output, (box[0], box[1]), (box[2], box[3]), color, thickness)
cv2.putText(
output,
label.upper(),
(box[0], box[1] - 12),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
(255, 255, 255),
font_weight,
cv2.LINE_AA,
)
return output
def draw_bbox_small(image, box, label, color):
alpha = 0.1
alpha_text = 0.3
thickness = 1
font_size = 0.4
overlay_bbox = image.copy()
overlay_text = image.copy()
output = image.copy()
text_width, text_height = cv2.getTextSize(
label.upper(), cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness
)[0]
cv2.rectangle(overlay_bbox, (box[0], box[1]), (box[2], box[3]), color, -1)
cv2.addWeighted(overlay_bbox, alpha, output, 1 - alpha, 0, output)
cv2.rectangle(
overlay_text,
(box[0], box[1] - 7 - text_height),
(box[0] + text_width + 2, box[1]),
(0, 0, 0),
-1,
)
cv2.addWeighted(overlay_text, alpha_text, output, 1 - alpha_text, 0, output)
cv2.rectangle(output, (box[0], box[1]), (box[2], box[3]), color, thickness)
cv2.putText(
output,
label.upper(),
(box[0], box[1] - 5),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
(255, 255, 255),
thickness,
cv2.LINE_AA,
)
return output
baseline_cfg_path = "/kaggle/input/siim-mmdetection-cascadercnn-weight-bias/job4_cascade_rcnn_x101_32x4d_fpn_1x_fold0/job4_cascade_rcnn_x101_32x4d_fpn_1x_coco.py"
cfg = Config.fromfile(baseline_cfg_path)
cfg.classes = "Covid_Abnormality"
cfg.data.test.img_prefix = ""
cfg.data.test.classes = cfg.classes
# cfg.model.roi_head.bbox_head.num_classes = 1
# cfg.model.bbox_head.num_classes = 1
for head in cfg.model.roi_head.bbox_head:
head.num_classes = 1
# Set seed thus the results are more reproducible
cfg.seed = 211
set_random_seed(211, deterministic=False)
cfg.gpu_ids = [0]
cfg.data.test.pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip", direction="horizontal"),
dict(
type="Normalize",
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img"]),
],
),
]
cfg.test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip", direction="horizontal"),
dict(
type="Normalize",
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img"]),
],
),
]
# cfg.data.samples_per_gpu = 4
# cfg.data.workers_per_gpu = 4
# cfg.model.test_cfg.nms.iou_threshold = 0.3
cfg.model.test_cfg.rcnn.score_thr = 0.001
WEIGHTS_FILE = "/kaggle/input/siim-mmdetection-cascadercnn-weight-bias/job4_cascade_rcnn_x101_32x4d_fpn_1x_fold0/epoch_10.pth"
options = dict(classes=("Covid_Abnormality"))
model = init_detector(cfg, WEIGHTS_FILE, device="cuda:0")
from ensemble_boxes import weighted_boxes_fusion, nms
viz_images = []
results = []
score_threshold = cfg.model.test_cfg.rcnn.score_thr
def format_pred(boxes: np.ndarray, scores: np.ndarray, labels: np.ndarray) -> str:
pred_strings = []
label_str = ["opacity"]
for label, score, bbox in zip(labels, scores, boxes):
xmin, ymin, xmax, ymax = bbox.astype(np.int64)
pred_strings.append(
f"{label_str[int(label)]} {score:.16f} {xmin} {ymin} {xmax} {ymax}"
)
return " ".join(pred_strings)
model.to(device)
model.eval()
viz_images = []
with torch.no_grad():
for index, row in tqdm(image_df.iterrows(), total=image_df.shape[0]):
original_H, original_W = (int(row.dim0), int(row.dim1))
predictions = inference_detector(model, row.image_path)
boxes, scores, labels = (list(), list(), list())
for k, cls_result in enumerate(predictions):
# print("cls_result", cls_result)
if cls_result.size != 0:
if len(labels) == 0:
boxes = np.array(cls_result[:, :4])
scores = np.array(cls_result[:, 4])
labels = np.array([k] * len(cls_result[:, 4]))
else:
boxes = np.concatenate((boxes, np.array(cls_result[:, :4])))
scores = np.concatenate((scores, np.array(cls_result[:, 4])))
labels = np.concatenate((labels, [k] * len(cls_result[:, 4])))
if fast_sub:
img_viz = cv2.imread(row.image_path)
for box, label, score in zip(boxes, labels, scores):
color = label2color[int(label)]
img_viz = draw_bbox_small(
img_viz, box.astype(np.int32), f"opacity_{score:.4f}", color
)
viz_images.append(img_viz)
indexes = np.where(scores > score_threshold)
# print(indexes)
boxes = boxes[indexes]
scores = scores[indexes]
labels = labels[indexes]
if len(labels) != 0:
h_ratio = original_H / IMAGE_DIMS[0]
w_ratio = original_W / IMAGE_DIMS[1]
boxes[:, [0, 2]] *= w_ratio
boxes[:, [1, 3]] *= h_ratio
result = {
"id": row.image_id,
"PredictionString": format_pred(boxes, scores, labels),
}
results.append(result)
del model
gc.collect()
detection_df = pd.DataFrame(results, columns=["id", "PredictionString"])
if fast_sub:
display(detection_df.sample(2))
# Plot sample images
plot_imgs(viz_images, cmap=None)
plt.savefig("viz_fig_siim.png", bbox_inches="tight")
plt.show()
detection_df = detection_df.merge(
image_df[["image_id", "none"]].rename({"image_id": "id"}, axis=1),
on="id",
how="left",
)
for i in range(detection_df.shape[0]):
if detection_df.loc[i, "PredictionString"] != "none 1 0 0 1 1":
detection_df.loc[i, "PredictionString"] = (
detection_df.loc[i, "PredictionString"]
+ " none "
+ str(detection_df.loc[i, "none"])
+ " 0 0 1 1"
)
detection_df = detection_df[["id", "PredictionString"]]
results_df = study_df[["study_id", "PredictionString"]].rename(
{"study_id": "id"}, axis=1
)
results_df = results_df.append(detection_df[["id", "PredictionString"]])
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
sub_df = sub_df.set_index("id")
results_df = results_df.set_index("id")
sub_df.update(results_df)
sub_df = sub_df.reset_index()
sub_df = sub_df.fillna("none 1 0 0 1 1")
sub_df.to_csv("/kaggle/working/submission.csv", index=False)
if fast_sub:
display(sub_df.head(2))
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
# sub_df = sub_df.set_index('id')
sub_df.head(5)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637999.ipynb
|
mmdetectionv2140
|
sreevishnudamodaran
|
[{"Id": 69637999, "ScriptId": 18963477, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4239960, "CreationDate": "08/02/2021 12:48:11", "VersionNumber": 5.0, "Title": "SIIM EffNetV2_L CascadeRCNN MMDetection Infer\u26a1\ud83d\udef0\ufe0f", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 652.0, "LinesInsertedFromPrevious": 10.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 642.0, "LinesInsertedFromFork": 9.0, "LinesDeletedFromFork": 1.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 643.0, "TotalVotes": 0}]
|
[{"Id": 93074904, "KernelVersionId": 69637999, "SourceDatasetVersionId": 2437252}, {"Id": 93074901, "KernelVersionId": 69637999, "SourceDatasetVersionId": 1874023}, {"Id": 93074905, "KernelVersionId": 69637999, "SourceDatasetVersionId": 2450549}, {"Id": 93074903, "KernelVersionId": 69637999, "SourceDatasetVersionId": 2436967}, {"Id": 93074900, "KernelVersionId": 69637999, "SourceDatasetVersionId": 1666454}, {"Id": 93074902, "KernelVersionId": 69637999, "SourceDatasetVersionId": 2406834}]
|
[{"Id": 2437252, "DatasetId": 1448714, "DatasourceVersionId": 2479513, "CreatorUserId": 5532218, "LicenseName": "Other (specified in description)", "CreationDate": "07/18/2021 10:10:00", "VersionNumber": 4.0, "Title": "MMDetection v2.14.0", "Slug": "mmdetectionv2140", "Subtitle": "MMDetection v2.14.0 + Dependancies for offline inference", "Description": "### MMDetection v2.14.0 + Offline Dependancies\n<a href=\"https://www.kaggle.com/sreevishnudamodaran\"><img alt=\"Ask Me Something\" src=\"https://img.shields.io/badge/Ask%20me-something-1abc9c.svg?style=flat-square&logo=kaggle\"></a> <a href=\"https://www.kaggle.com/sreevishnudamodaran\"><img alt=\"Please Upvote If You Like This\" src=\"https://img.shields.io/badge/Please-Upvote%20If%20you%20like%20this-07b3c8?style=flat-square&for-the-badge&logo=kaggle\"></a>\n***License: Apache License 2.0***\n\n### Contents\n\n \u2705 MMCV 1.3.8\n \u2705 MMDetection v2.14.0 + Git Repo\n \u2705 MMPycocotools & Other dependancies\n <br>\n\n### Suggested Usage\n\n```\n## MMDetection Offline Installation\n!pip install /kaggle/input/mmdetectionv2140/addict-2.4.0-py3-none-any.whl\n!pip install /kaggle/input/mmdetectionv2140/yapf-0.31.0-py2.py3-none-any.whl\n!pip install /kaggle/input/mmdetectionv2140/terminal-0.4.0-py3-none-any.whl\n!pip install /kaggle/input/mmdetectionv2140/terminaltables-3.1.0-py3-none-any.whl\n!pip install /kaggle/input/mmdetectionv2140/mmcv_full-1_3_8-cu110-torch1_7_0/mmcv_full-1.3.8-cp37-cp37m-manylinux1_x86_64.whl\n!pip install /kaggle/input/mmdetectionv2140/pycocotools-2.0.2/pycocotools-2.0.2\n!pip install /kaggle/input/mmdetectionv2140/mmpycocotools-12.0.3/mmpycocotools-12.0.3\n\n!cp -r /kaggle/input/mmdetectionv2140/mmdetection-2.14.0 /kaggle/working/\n!mv /kaggle/working/mmdetection-2.14.0 /kaggle/working/mmdetection\n%cd /kaggle/working/mmdetection\n!pip install -e . --no-deps\n%cd /kaggle/working/\n```\n <br>\n### Acknowledgements\nhttps://github.com/open-mmlab/mmdetection", "VersionNotes": "Add terminal and terminal tables dependancies.", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1448714, "CreatorUserId": 5532218, "OwnerUserId": 5532218.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2437252.0, "CurrentDatasourceVersionId": 2479513.0, "ForumId": 1468255, "Type": 2, "CreationDate": "07/05/2021 06:22:27", "LastActivityDate": "07/05/2021", "TotalViews": 4627, "TotalDownloads": 239, "TotalVotes": 31, "TotalKernels": 65}]
|
[{"Id": 5532218, "UserName": "sreevishnudamodaran", "DisplayName": "Sreevishnu Damodaran", "RegisterDate": "07/27/2020", "PerformanceTier": 3}]
|
# 
# SIIM COVID-19 EffNetV2 CascadeRCNN MMDetection Inference
# Overview
# ✅ EfficientNetV2 TF Model Study Level Inference on GPU with Keras
# ✅ CascadeRCNN Image Level Inference on GPU with MMDetection
# 🏷️ Dataset with EffNetV2 TfHub Weights used in this notebook:
# > [EfficientNetV2 TFHub Weight Files](https://www.kaggle.com/sreevishnudamodaran/efficientnetv2-tfhub-weight-files?select=tfhub_models)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# 🏷️ EffNetV2 Keras Study Level Train notebook:
# > [SIIM EffNetV2 Keras Study Train [TPU CV0.805+]🎏](https://www.kaggle.com/sreevishnudamodaran/siim-effnetv2-keras-study-train-tpu-cv0-805)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# 🏷️ MMDetection CascadeRCNN Image Level Train notebook:
# > [SIIM MMDetection+CascadeRCNN+Weight&Bias☄️🔮](https://www.kaggle.com/sreevishnudamodaran/siim-mmdetection-cascadercnn-weight-bias)
# Official EfficientNetV2 Saved Model Files from tfhub.dev
# References:
# - https://www.kaggle.com/h053473666/siim-cov19-efnb7-yolov5-infer
# - https://github.com/tensorflow/hub
# - https://github.com/open-mmlab/mmdetection
#
## MMDetection compatible torch installation
## Compatible Cuda Toolkit installation
## MMDetection Offline Installation
import sys
sys.path.append("/kaggle/working/mmdetection")
import os
from PIL import Image
import pandas as pd
from tqdm.auto import tqdm
import gc
import glob
import numpy as np
# Create Study and Image Level Dataframes
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
# Form study and image dataframes
sub_df["level"] = sub_df.id.map(lambda idx: idx[-5:])
study_df = sub_df[sub_df.level == "study"].rename({"id": "study_id"}, axis=1)
image_df = sub_df[sub_df.level == "image"].rename({"id": "image_id"}, axis=1)
dcm_path = glob.glob(
"/kaggle/input/siim-covid19-detection/test/**/*dcm", recursive=True
)
test_meta = pd.DataFrame({"dcm_path": dcm_path})
test_meta["image_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-1].replace(".dcm", "") + "_image"
)
test_meta["study_id"] = test_meta.dcm_path.map(
lambda x: x.split("/")[-3].replace(".dcm", "") + "_study"
)
study_df = study_df.merge(test_meta, on="study_id", how="left")
image_df = image_df.merge(test_meta, on="image_id", how="left")
# Remove duplicates study_ids from study_df
study_df.drop_duplicates(subset="study_id", keep="first", inplace=True)
# Fast or Full Predictions
# In case of non-competetion submission commits, we run the notebook with just two images each for image level and study level inference from the public test data.
fast_sub = False
if sub_df.shape[0] == 2477:
fast_sub = True
study_df = study_df.sample(2)
image_df = image_df.sample(2)
print("\nstudy_df")
display(study_df.head(2))
print("\nimage_df")
display(image_df.head(2))
print("\ntest_meta")
display(test_meta.head(2))
import pydicom
from pydicom.pixel_data_handlers.util import apply_voi_lut
STUDY_DIMS = (768, 768)
IMAGE_DIMS = (512, 512)
study_dir = f"/kaggle/tmp/test/study/"
os.makedirs(study_dir, exist_ok=True)
image_dir = f"/kaggle/tmp/test/image/"
os.makedirs(image_dir, exist_ok=True)
def read_xray(path, voi_lut=True, fix_monochrome=True):
# Original from: https://www.kaggle.com/raddar/convert-dicom-to-np-array-the-correct-way
dicom = pydicom.read_file(path)
# VOI LUT (if available by DICOM device) is used to transform raw DICOM data to
# "human-friendly" view
if voi_lut:
data = apply_voi_lut(dicom.pixel_array, dicom)
else:
data = dicom.pixel_array
# depending on this value, X-ray may look inverted - fix that:
if fix_monochrome and dicom.PhotometricInterpretation == "MONOCHROME1":
data = np.amax(data) - data
data = data - np.min(data)
data = data / np.max(data)
data = (data * 255).astype(np.uint8)
return data
def resize(array, size, keep_ratio=False, resample=Image.LANCZOS):
# Original from: https://www.kaggle.com/xhlulu/vinbigdata-process-and-resize-to-image
im = Image.fromarray(array)
if keep_ratio:
im.thumbnail((size, size), resample)
else:
im = im.resize((size, size), resample)
return im
for index, row in tqdm(
study_df[["study_id", "dcm_path"]].iterrows(), total=study_df.shape[0]
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=STUDY_DIMS[0])
im.save(os.path.join(study_dir, row["study_id"] + ".png"))
image_df["dim0"] = -1
image_df["dim1"] = -1
for index, row in tqdm(
image_df[["image_id", "dcm_path", "dim0", "dim1"]].iterrows(),
total=image_df.shape[0],
):
# set keep_ratio=True to have original aspect ratio
xray = read_xray(row["dcm_path"])
im = resize(xray, size=IMAGE_DIMS[0])
im.save(os.path.join(image_dir, row["image_id"] + ".png"))
image_df.loc[image_df.image_id == row.image_id, "dim0"] = xray.shape[0]
image_df.loc[image_df.image_id == row.image_id, "dim1"] = xray.shape[1]
study_df["image_path"] = study_dir + study_df["study_id"] + ".png"
image_df["image_path"] = image_dir + image_df["image_id"] + ".png"
# Custom Wrapper for Loading TFHub Model trained in TPU
# Since the EffNetV2 Classifier models were trained on a TPU with the `tfhub.KerasLayer` formed with the handle argument as a GCS path, while loading the saved model for inference, the method tries to download the pre-trained weights from the definition of the layer from training i.e a GCS path.
# Since, inference notebooks don't have GCS and internet access, it is not possible to load the model without the pretrained weights explicitly loaded from the local directory.
# If the models were trained on a GPU, we can use the cache location method to load the pre-trained weights by storing them in a cache folder with the hashed key of the model location, as the folder name. I tried this method here but, it doesn't seem to work as the model was trained with a GCS path defined in the `tfhub.KerasLayer` and the method kept on hitting the GCS path rather than loading the weights from the cache location.
# The only solution was to create a wrapper class to correct the handle argument to load the right pretrained weights explicitly from the local directory.
import tensorflow as tf
import tensorflow_hub as tfhub
MODEL_ARCH = "efficientnetv2-l-21k-ft1k"
# Get the TensorFlow Hub model URL
hub_type = "feature_vector" # ['classification', 'feature_vector']
MODEL_ARCH_PATH = f"/kaggle/input/efficientnetv2-tfhub-weight-files/tfhub_models/{MODEL_ARCH}/{hub_type}"
# Custom wrapper class to load the right pretrained weights explicitly from the local directory
class KerasLayerWrapper(tfhub.KerasLayer):
def __init__(self, handle, **kwargs):
handle = tfhub.KerasLayer(tfhub.load(MODEL_ARCH_PATH))
super().__init__(handle, **kwargs)
# Predict Study Level
MODEL_PATH = "/kaggle/input/siim-effnetv2-keras-study-train-tpu-cv0-805"
test_paths = study_df.image_path.tolist()
BATCH_SIZE = 16
def build_decoder(with_labels=True, target_size=(300, 300), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=32,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
# strategy = auto_select_accelerator()
# BATCH_SIZE = strategy.num_replicas_in_sync * 16
label_cols = ["negative", "typical", "indeterminate", "atypical"]
study_df[label_cols] = 0
test_decoder = build_decoder(
with_labels=False, target_size=(STUDY_DIMS[0], STUDY_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(
f"{MODEL_PATH}/model0.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models1 = tf.keras.models.load_model(
f"{MODEL_PATH}/model1.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models2 = tf.keras.models.load_model(
f"{MODEL_PATH}/model2.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models3 = tf.keras.models.load_model(
f"{MODEL_PATH}/model3.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models4 = tf.keras.models.load_model(
f"{MODEL_PATH}/model4.h5", custom_objects={"KerasLayer": KerasLayerWrapper}
)
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
study_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
study_df["PredictionString"] = study_df[label_cols].apply(
lambda row: f"negative {row.negative} 0 0 1 1 typical {row.typical} 0 0 1 1 indeterminate {row.indeterminate} 0 0 1 1 atypical {row.atypical} 0 0 1 1",
axis=1,
)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict 2Class Image Level
# Using [@Alien](https://www.kaggle.com/h053473666) 2class model.
import efficientnet.tfkeras as efn
MODEL_PATH = "/kaggle/input/siim-covid19-efnb7-train-fold0-5-2class"
test_paths = image_df.image_path.tolist()
image_df["none"] = 0
label_cols = ["none"]
test_decoder = build_decoder(
with_labels=False, target_size=(IMAGE_DIMS[0], IMAGE_DIMS[0]), ext="png"
)
test_dataset = build_dataset(
test_paths,
bsize=BATCH_SIZE,
repeat=False,
shuffle=False,
augment=False,
cache=False,
decode_fn=test_decoder,
)
with tf.device("/device:GPU:0"):
models = []
models0 = tf.keras.models.load_model(f"{MODEL_PATH}/model0.h5")
models1 = tf.keras.models.load_model(f"{MODEL_PATH}/model1.h5")
models2 = tf.keras.models.load_model(f"{MODEL_PATH}/model2.h5")
models3 = tf.keras.models.load_model(f"{MODEL_PATH}/model3.h5")
models4 = tf.keras.models.load_model(f"{MODEL_PATH}/model4.h5")
models.append(models0)
models.append(models1)
models.append(models2)
models.append(models3)
models.append(models4)
models.append(tf.keras.models.load_model("../input/resnet152models/model0.h5"))
models.append(tf.keras.models.load_model("../input/resnet152models/model1.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model0.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model1.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model2.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model3.h5"))
models.append(tf.keras.models.load_model("../input/b5trian/model4.h5"))
image_df[label_cols] = sum(
[model.predict(test_dataset, verbose=1) for model in models]
) / len(models)
del models
del models0, models1, models2, models3, models4
del test_dataset, test_decoder
gc.collect()
# Predict Image Level
from numba import cuda
import torch
cuda.select_device(0)
cuda.close()
cuda.select_device(0)
from tqdm.notebook import tqdm
import torch
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
print(device.type)
import torchvision
print(torch.__version__, torch.cuda.is_available())
# Check mmcv installation
from mmcv.ops import get_compiling_cuda_version, get_compiler_version
print(get_compiling_cuda_version())
print(get_compiler_version())
# Check MMDetection installation
from mmdet.apis import set_random_seed
# Imports
import mmdet
from mmdet.apis import set_random_seed
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
import mmcv
from mmcv import Config
from mmcv.runner import load_checkpoint
from mmcv.parallel import MMDataParallel
from mmdet.apis import inference_detector, init_detector, show_result_pyplot
from mmdet.apis import single_gpu_test
from mmdet.datasets import build_dataloader, build_dataset
import cv2
import matplotlib.pyplot as plt
label2color = [[59, 238, 119]]
viz_labels = ["Covid_Abnormality"]
def plot_img(img, size=(18, 18), is_rgb=True, title="", cmap=None):
plt.figure(figsize=size)
plt.imshow(img, cmap=cmap)
plt.suptitle(title)
plt.show()
def plot_imgs(imgs, cols=2, size=10, is_rgb=True, title="", cmap=None, img_size=None):
rows = len(imgs) // cols + 1
fig = plt.figure(figsize=(cols * size, rows * size))
for i, img in enumerate(imgs):
if img_size is not None:
img = cv2.resize(img, img_size)
fig.add_subplot(rows, cols, i + 1)
plt.imshow(img, cmap=cmap)
plt.suptitle(title)
return fig
def draw_bbox(image, box, label, color):
alpha = 0.1
alpha_font = 0.6
thickness = 8
font_size = 2.0
font_weight = 1
overlay_bbox = image.copy()
overlay_text = image.copy()
output = image.copy()
text_width, text_height = cv2.getTextSize(
label.upper(), cv2.FONT_HERSHEY_SIMPLEX, font_size, font_weight
)[0]
cv2.rectangle(overlay_bbox, (box[0], box[1]), (box[2], box[3]), color, -1)
cv2.addWeighted(overlay_bbox, alpha, output, 1 - alpha, 0, output)
cv2.rectangle(
overlay_text,
(box[0], box[1] - 18 - text_height),
(box[0] + text_width + 8, box[1]),
(0, 0, 0),
-1,
)
cv2.addWeighted(overlay_text, alpha_font, output, 1 - alpha_font, 0, output)
cv2.rectangle(output, (box[0], box[1]), (box[2], box[3]), color, thickness)
cv2.putText(
output,
label.upper(),
(box[0], box[1] - 12),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
(255, 255, 255),
font_weight,
cv2.LINE_AA,
)
return output
def draw_bbox_small(image, box, label, color):
alpha = 0.1
alpha_text = 0.3
thickness = 1
font_size = 0.4
overlay_bbox = image.copy()
overlay_text = image.copy()
output = image.copy()
text_width, text_height = cv2.getTextSize(
label.upper(), cv2.FONT_HERSHEY_SIMPLEX, font_size, thickness
)[0]
cv2.rectangle(overlay_bbox, (box[0], box[1]), (box[2], box[3]), color, -1)
cv2.addWeighted(overlay_bbox, alpha, output, 1 - alpha, 0, output)
cv2.rectangle(
overlay_text,
(box[0], box[1] - 7 - text_height),
(box[0] + text_width + 2, box[1]),
(0, 0, 0),
-1,
)
cv2.addWeighted(overlay_text, alpha_text, output, 1 - alpha_text, 0, output)
cv2.rectangle(output, (box[0], box[1]), (box[2], box[3]), color, thickness)
cv2.putText(
output,
label.upper(),
(box[0], box[1] - 5),
cv2.FONT_HERSHEY_SIMPLEX,
font_size,
(255, 255, 255),
thickness,
cv2.LINE_AA,
)
return output
baseline_cfg_path = "/kaggle/input/siim-mmdetection-cascadercnn-weight-bias/job4_cascade_rcnn_x101_32x4d_fpn_1x_fold0/job4_cascade_rcnn_x101_32x4d_fpn_1x_coco.py"
cfg = Config.fromfile(baseline_cfg_path)
cfg.classes = "Covid_Abnormality"
cfg.data.test.img_prefix = ""
cfg.data.test.classes = cfg.classes
# cfg.model.roi_head.bbox_head.num_classes = 1
# cfg.model.bbox_head.num_classes = 1
for head in cfg.model.roi_head.bbox_head:
head.num_classes = 1
# Set seed thus the results are more reproducible
cfg.seed = 211
set_random_seed(211, deterministic=False)
cfg.gpu_ids = [0]
cfg.data.test.pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip", direction="horizontal"),
dict(
type="Normalize",
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img"]),
],
),
]
cfg.test_pipeline = [
dict(type="LoadImageFromFile"),
dict(
type="MultiScaleFlipAug",
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type="Resize", keep_ratio=True),
dict(type="RandomFlip", direction="horizontal"),
dict(
type="Normalize",
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
),
dict(type="Pad", size_divisor=32),
dict(type="DefaultFormatBundle"),
dict(type="Collect", keys=["img"]),
],
),
]
# cfg.data.samples_per_gpu = 4
# cfg.data.workers_per_gpu = 4
# cfg.model.test_cfg.nms.iou_threshold = 0.3
cfg.model.test_cfg.rcnn.score_thr = 0.001
WEIGHTS_FILE = "/kaggle/input/siim-mmdetection-cascadercnn-weight-bias/job4_cascade_rcnn_x101_32x4d_fpn_1x_fold0/epoch_10.pth"
options = dict(classes=("Covid_Abnormality"))
model = init_detector(cfg, WEIGHTS_FILE, device="cuda:0")
from ensemble_boxes import weighted_boxes_fusion, nms
viz_images = []
results = []
score_threshold = cfg.model.test_cfg.rcnn.score_thr
def format_pred(boxes: np.ndarray, scores: np.ndarray, labels: np.ndarray) -> str:
pred_strings = []
label_str = ["opacity"]
for label, score, bbox in zip(labels, scores, boxes):
xmin, ymin, xmax, ymax = bbox.astype(np.int64)
pred_strings.append(
f"{label_str[int(label)]} {score:.16f} {xmin} {ymin} {xmax} {ymax}"
)
return " ".join(pred_strings)
model.to(device)
model.eval()
viz_images = []
with torch.no_grad():
for index, row in tqdm(image_df.iterrows(), total=image_df.shape[0]):
original_H, original_W = (int(row.dim0), int(row.dim1))
predictions = inference_detector(model, row.image_path)
boxes, scores, labels = (list(), list(), list())
for k, cls_result in enumerate(predictions):
# print("cls_result", cls_result)
if cls_result.size != 0:
if len(labels) == 0:
boxes = np.array(cls_result[:, :4])
scores = np.array(cls_result[:, 4])
labels = np.array([k] * len(cls_result[:, 4]))
else:
boxes = np.concatenate((boxes, np.array(cls_result[:, :4])))
scores = np.concatenate((scores, np.array(cls_result[:, 4])))
labels = np.concatenate((labels, [k] * len(cls_result[:, 4])))
if fast_sub:
img_viz = cv2.imread(row.image_path)
for box, label, score in zip(boxes, labels, scores):
color = label2color[int(label)]
img_viz = draw_bbox_small(
img_viz, box.astype(np.int32), f"opacity_{score:.4f}", color
)
viz_images.append(img_viz)
indexes = np.where(scores > score_threshold)
# print(indexes)
boxes = boxes[indexes]
scores = scores[indexes]
labels = labels[indexes]
if len(labels) != 0:
h_ratio = original_H / IMAGE_DIMS[0]
w_ratio = original_W / IMAGE_DIMS[1]
boxes[:, [0, 2]] *= w_ratio
boxes[:, [1, 3]] *= h_ratio
result = {
"id": row.image_id,
"PredictionString": format_pred(boxes, scores, labels),
}
results.append(result)
del model
gc.collect()
detection_df = pd.DataFrame(results, columns=["id", "PredictionString"])
if fast_sub:
display(detection_df.sample(2))
# Plot sample images
plot_imgs(viz_images, cmap=None)
plt.savefig("viz_fig_siim.png", bbox_inches="tight")
plt.show()
detection_df = detection_df.merge(
image_df[["image_id", "none"]].rename({"image_id": "id"}, axis=1),
on="id",
how="left",
)
for i in range(detection_df.shape[0]):
if detection_df.loc[i, "PredictionString"] != "none 1 0 0 1 1":
detection_df.loc[i, "PredictionString"] = (
detection_df.loc[i, "PredictionString"]
+ " none "
+ str(detection_df.loc[i, "none"])
+ " 0 0 1 1"
)
detection_df = detection_df[["id", "PredictionString"]]
results_df = study_df[["study_id", "PredictionString"]].rename(
{"study_id": "id"}, axis=1
)
results_df = results_df.append(detection_df[["id", "PredictionString"]])
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
sub_df = sub_df.set_index("id")
results_df = results_df.set_index("id")
sub_df.update(results_df)
sub_df = sub_df.reset_index()
sub_df = sub_df.fillna("none 1 0 0 1 1")
sub_df.to_csv("/kaggle/working/submission.csv", index=False)
if fast_sub:
display(sub_df.head(2))
sub_df = pd.read_csv("/kaggle/input/siim-covid19-detection/sample_submission.csv")
sub_df["PredictionString"] = np.nan
# sub_df = sub_df.set_index('id')
sub_df.head(5)
| false | 1 | 7,502 | 0 | 8,213 | 7,502 |
||
69637359
|
<jupyter_start><jupyter_text>siim-covid19 512 img png 600 study png
Kaggle dataset identifier: siimcovid19-512-img-png-600-study-png
<jupyter_script>import os
import efficientnet.tfkeras as efn
import numpy as np
import pandas as pd
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.model_selection import GroupKFold
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("Running on TPU:", tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print(f"Running on {strategy.num_replicas_in_sync} replicas")
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=128,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = "siimcovid19-512-img-png-600-study-png"
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 16
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
load_dir = f"/kaggle/input/{COMPETITION_NAME}/"
df = pd.read_csv("../input/siim-cov19-csv-2class/train.csv")
label_cols = df.columns[4]
gkf = GroupKFold(n_splits=2)
df["fold"] = -1
for fold, (train_idx, val_idx) in enumerate(
gkf.split(df, groups=df.StudyInstanceUID.tolist())
):
df.loc[val_idx, "fold"] = fold
for i in range(2):
valid_paths = (
GCS_DS_PATH + "/image/" + df[df["fold"] == i]["id"] + ".png"
) # "/train/"
train_paths = (
GCS_DS_PATH + "/image/" + df[df["fold"] != i]["id"] + ".png"
) # "/train/"
valid_labels = df[df["fold"] == i][label_cols].values
train_labels = df[df["fold"] != i][label_cols].values
IMSIZE = (224, 240, 260, 300, 380, 456, 528, 600, 512)
IMS = 8
decoder = build_decoder(
with_labels=True, target_size=(IMSIZE[IMS], IMSIZE[IMS]), ext="png"
)
test_decoder = build_decoder(
with_labels=False, target_size=(IMSIZE[IMS], IMSIZE[IMS]), ext="png"
)
train_dataset = build_dataset(
train_paths, train_labels, bsize=BATCH_SIZE, decode_fn=decoder
)
valid_dataset = build_dataset(
valid_paths,
valid_labels,
bsize=BATCH_SIZE,
decode_fn=decoder,
repeat=False,
shuffle=False,
augment=False,
)
try:
n_labels = train_labels.shape[1]
except:
n_labels = 1
with strategy.scope():
model = tf.keras.Sequential(
[
# efn.EfficientNetB7(
# input_shape=(IMSIZE[IMS], IMSIZE[IMS], 3),
# weights='imagenet',include_top=False),
tf.keras.applications.resnet.ResNet101(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(IMSIZE[IMS], IMSIZE[IMS], 3),
pooling=None,
),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(n_labels, activation="sigmoid"),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="binary_crossentropy",
metrics=[tf.keras.metrics.AUC(multi_label=True)],
)
model.summary()
steps_per_epoch = train_paths.shape[0] // BATCH_SIZE
checkpoint = tf.keras.callbacks.ModelCheckpoint(
f"model{i}.h5", save_best_only=True, monitor="val_loss", mode="min"
)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", patience=3, min_lr=1e-6, mode="min"
)
history = model.fit(
train_dataset,
epochs=20,
verbose=1,
callbacks=[checkpoint, lr_reducer],
steps_per_epoch=steps_per_epoch,
validation_data=valid_dataset,
)
hist_df = pd.DataFrame(history.history)
hist_df.to_csv(f"history{i}.csv")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637359.ipynb
|
siimcovid19-512-img-png-600-study-png
|
h053473666
|
[{"Id": 69637359, "ScriptId": 19011885, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4239960, "CreationDate": "08/02/2021 12:41:25", "VersionNumber": 5.0, "Title": "resNet10", "EvaluationDate": "08/02/2021", "IsChange": false, "TotalLines": 162.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 162.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93073360, "KernelVersionId": 69637359, "SourceDatasetVersionId": 2248688}]
|
[{"Id": 2248688, "DatasetId": 1352132, "DatasourceVersionId": 2289605, "CreatorUserId": 6294064, "LicenseName": "Unknown", "CreationDate": "05/19/2021 08:35:45", "VersionNumber": 2.0, "Title": "siim-covid19 512 img png 600 study png", "Slug": "siimcovid19-512-img-png-600-study-png", "Subtitle": NaN, "Description": NaN, "VersionNotes": "2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1352132, "CreatorUserId": 6294064, "OwnerUserId": 6294064.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2248688.0, "CurrentDatasourceVersionId": 2289605.0, "ForumId": 1371171, "Type": 2, "CreationDate": "05/19/2021 06:00:38", "LastActivityDate": "05/19/2021", "TotalViews": 3585, "TotalDownloads": 750, "TotalVotes": 39, "TotalKernels": 55}]
|
[{"Id": 6294064, "UserName": "h053473666", "DisplayName": "Alien", "RegisterDate": "12/02/2020", "PerformanceTier": 2}]
|
import os
import efficientnet.tfkeras as efn
import numpy as np
import pandas as pd
from kaggle_datasets import KaggleDatasets
from sklearn.model_selection import train_test_split
import tensorflow as tf
from sklearn.model_selection import GroupKFold
def auto_select_accelerator():
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
strategy = tf.distribute.experimental.TPUStrategy(tpu)
print("Running on TPU:", tpu.master())
except ValueError:
strategy = tf.distribute.get_strategy()
print(f"Running on {strategy.num_replicas_in_sync} replicas")
return strategy
def build_decoder(with_labels=True, target_size=(256, 256), ext="jpg"):
def decode(path):
file_bytes = tf.io.read_file(path)
if ext == "png":
img = tf.image.decode_png(file_bytes, channels=3)
elif ext in ["jpg", "jpeg"]:
img = tf.image.decode_jpeg(file_bytes, channels=3)
else:
raise ValueError("Image extension not supported")
img = tf.cast(img, tf.float32) / 255.0
img = tf.image.resize(img, target_size)
return img
def decode_with_labels(path, label):
return decode(path), label
return decode_with_labels if with_labels else decode
def build_augmenter(with_labels=True):
def augment(img):
img = tf.image.random_flip_left_right(img)
img = tf.image.random_flip_up_down(img)
return img
def augment_with_labels(img, label):
return augment(img), label
return augment_with_labels if with_labels else augment
def build_dataset(
paths,
labels=None,
bsize=128,
cache=True,
decode_fn=None,
augment_fn=None,
augment=True,
repeat=True,
shuffle=1024,
cache_dir="",
):
if cache_dir != "" and cache is True:
os.makedirs(cache_dir, exist_ok=True)
if decode_fn is None:
decode_fn = build_decoder(labels is not None)
if augment_fn is None:
augment_fn = build_augmenter(labels is not None)
AUTO = tf.data.experimental.AUTOTUNE
slices = paths if labels is None else (paths, labels)
dset = tf.data.Dataset.from_tensor_slices(slices)
dset = dset.map(decode_fn, num_parallel_calls=AUTO)
dset = dset.cache(cache_dir) if cache else dset
dset = dset.map(augment_fn, num_parallel_calls=AUTO) if augment else dset
dset = dset.repeat() if repeat else dset
dset = dset.shuffle(shuffle) if shuffle else dset
dset = dset.batch(bsize).prefetch(AUTO)
return dset
COMPETITION_NAME = "siimcovid19-512-img-png-600-study-png"
strategy = auto_select_accelerator()
BATCH_SIZE = strategy.num_replicas_in_sync * 16
GCS_DS_PATH = KaggleDatasets().get_gcs_path(COMPETITION_NAME)
load_dir = f"/kaggle/input/{COMPETITION_NAME}/"
df = pd.read_csv("../input/siim-cov19-csv-2class/train.csv")
label_cols = df.columns[4]
gkf = GroupKFold(n_splits=2)
df["fold"] = -1
for fold, (train_idx, val_idx) in enumerate(
gkf.split(df, groups=df.StudyInstanceUID.tolist())
):
df.loc[val_idx, "fold"] = fold
for i in range(2):
valid_paths = (
GCS_DS_PATH + "/image/" + df[df["fold"] == i]["id"] + ".png"
) # "/train/"
train_paths = (
GCS_DS_PATH + "/image/" + df[df["fold"] != i]["id"] + ".png"
) # "/train/"
valid_labels = df[df["fold"] == i][label_cols].values
train_labels = df[df["fold"] != i][label_cols].values
IMSIZE = (224, 240, 260, 300, 380, 456, 528, 600, 512)
IMS = 8
decoder = build_decoder(
with_labels=True, target_size=(IMSIZE[IMS], IMSIZE[IMS]), ext="png"
)
test_decoder = build_decoder(
with_labels=False, target_size=(IMSIZE[IMS], IMSIZE[IMS]), ext="png"
)
train_dataset = build_dataset(
train_paths, train_labels, bsize=BATCH_SIZE, decode_fn=decoder
)
valid_dataset = build_dataset(
valid_paths,
valid_labels,
bsize=BATCH_SIZE,
decode_fn=decoder,
repeat=False,
shuffle=False,
augment=False,
)
try:
n_labels = train_labels.shape[1]
except:
n_labels = 1
with strategy.scope():
model = tf.keras.Sequential(
[
# efn.EfficientNetB7(
# input_shape=(IMSIZE[IMS], IMSIZE[IMS], 3),
# weights='imagenet',include_top=False),
tf.keras.applications.resnet.ResNet101(
include_top=False,
weights="imagenet",
input_tensor=None,
input_shape=(IMSIZE[IMS], IMSIZE[IMS], 3),
pooling=None,
),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(n_labels, activation="sigmoid"),
]
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="binary_crossentropy",
metrics=[tf.keras.metrics.AUC(multi_label=True)],
)
model.summary()
steps_per_epoch = train_paths.shape[0] // BATCH_SIZE
checkpoint = tf.keras.callbacks.ModelCheckpoint(
f"model{i}.h5", save_best_only=True, monitor="val_loss", mode="min"
)
lr_reducer = tf.keras.callbacks.ReduceLROnPlateau(
monitor="val_loss", patience=3, min_lr=1e-6, mode="min"
)
history = model.fit(
train_dataset,
epochs=20,
verbose=1,
callbacks=[checkpoint, lr_reducer],
steps_per_epoch=steps_per_epoch,
validation_data=valid_dataset,
)
hist_df = pd.DataFrame(history.history)
hist_df.to_csv(f"history{i}.csv")
| false | 1 | 1,751 | 0 | 1,804 | 1,751 |
||
69637158
|
<jupyter_start><jupyter_text>WordAndDoc2vec sample data
Kaggle dataset identifier: wordanddoc2vec-sample-data
<jupyter_code>import pandas as pd
df = pd.read_csv('wordanddoc2vec-sample-data/wd2v.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 10000 entries, 0 to 9999
Columns: 2001 entries, Unnamed: 0 to c2000
dtypes: int64(2000), object(1)
memory usage: 152.7+ MB
<jupyter_text>Examples:
{
"Unnamed: 0": "r1",
"c1": 0,
"c2": 0,
"c3": 0,
"c4": 0,
"c5": 0,
"c6": 0,
"c7": 0,
"c8": 0,
"c9": 0,
"c10": 0,
"c11": 0,
"c12": 0,
"c13": 0,
"c14": 0,
"c15": 0,
"c16": 0,
"c17": 0,
"c18": 0,
"c19": 0,
"...": "and 1981 more columns"
}
{
"Unnamed: 0": "r2",
"c1": 0,
"c2": 0,
"c3": 0,
"c4": 0,
"c5": 0,
"c6": 0,
"c7": 0,
"c8": 0,
"c9": 0,
"c10": 0,
"c11": 0,
"c12": 0,
"c13": 0,
"c14": 0,
"c15": 0,
"c16": 0,
"c17": 0,
"c18": 0,
"c19": 0,
"...": "and 1981 more columns"
}
{
"Unnamed: 0": "r3",
"c1": 1,
"c2": 0,
"c3": 0,
"c4": 0,
"c5": 0,
"c6": 0,
"c7": 0,
"c8": 1,
"c9": 0,
"c10": 0,
"c11": 0,
"c12": 0,
"c13": 0,
"c14": 0,
"c15": 0,
"c16": 0,
"c17": 0,
"c18": 0,
"c19": 0,
"...": "and 1981 more columns"
}
{
"Unnamed: 0": "r4",
"c1": 0,
"c2": 0,
"c3": 0,
"c4": 0,
"c5": 0,
"c6": 0,
"c7": 0,
"c8": 0,
"c9": 0,
"c10": 0,
"c11": 0,
"c12": 0,
"c13": 0,
"c14": 0,
"c15": 0,
"c16": 0,
"c17": 0,
"c18": 0,
"c19": 0,
"...": "and 1981 more columns"
}
<jupyter_script># num features = 3
# sample data
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from feature_eng import WordAndDoc2vec, calc_gsim
import os.path
import sys
import re
import itertools
import csv
import datetime
import pickle
import random
from collections import defaultdict, Counter
import gc
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import seaborn as sns
import pandas as pd
import numpy as np
import scipy
import gensim
from sklearn.metrics import f1_score, classification_report, confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import gensim
from keras.preprocessing.sequence import skipgrams
import tensorflow as tf
from keras import backend as K
def hexbin(x, y, color, **kwargs):
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(x, y, cmap=cmap, **kwargs)
def scatter(x, y, color, **kwargs):
plt.scatter(x, y, marker=".")
# # Load Sample Data
X_df = pd.read_csv("../input/wordanddoc2vec-sample-data/wd2v.csv", index_col=0)
print(X_df.shape)
X_df.head()
import json
with open("../input/wordanddoc2vec-sample-data/cls_user.json", "r") as f:
cls_user = json.load(f)
with open("../input/wordanddoc2vec-sample-data/cls_prod.json", "r") as f:
cls_prod = json.load(f)
# # Define Original Class for This Methodology
from collections.abc import Sequence
class DocSeq(Sequence):
"""
doc_dic : doc_name (unique)
word_dic : index=0 must be place holder.
"""
def __init__(self, df):
self.df = df
self.cols = self.df.columns.values
self.doc_dic = gensim.corpora.Dictionary(
[df.index.values.tolist()], prune_at=None
)
"""
index=0 must be place holder.
"""
self.word_dic = gensim.corpora.Dictionary([["PL_DUMMY"]], prune_at=None)
self.word_dic.add_documents([list(self.cols)], prune_at=None)
def __getitem__(self, idx):
return self._get(idx)
def _get(self, idx):
try:
ebid = self.doc_dic[idx]
except KeyError:
raise IndexError
irow = self.df.loc[ebid]
res = []
for icol in self.cols:
if irow[icol] == 1:
res.append(icol)
return res
def __len__(self):
return self.df.shape[0]
doc_seq = DocSeq(X_df)
len(doc_seq)
wd2v = WordAndDoc2vec(doc_seq, doc_seq.word_dic, doc_seq.doc_dic, logging=False)
wd2v
# # Create Model
num_features = 3
# wd2v.make_model(num_features=num_features)
wd2v.make_model(num_features=num_features, embeddings_val=0.1, num_neg=4)
wd2v.models["model"].summary()
wgt_prod = wd2v.wgt_col
print(wgt_prod.shape)
df = pd.DataFrame(wgt_prod[:, :5])
sns.pairplot(df, markers=".")
wgt_user = wd2v.wgt_row
print(wgt_user.shape)
df = pd.DataFrame(wgt_user[:, :5])
sns.pairplot(df, markers=".")
# # Train
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
def lr_schedule(epoch):
lr0 = 0.01
epoch1 = 16
epoch2 = 16
epoch3 = 16
epoch4 = 16
if epoch < epoch1:
lr = lr0
elif epoch < epoch1 + epoch2:
lr = lr0 / 2
elif epoch < epoch1 + epoch2 + epoch3:
lr = lr0 / 4
elif epoch < epoch1 + epoch2 + epoch3 + epoch4:
lr = lr0 / 8
else:
lr = lr0 / 16
if divmod(epoch, 4)[1] == 3:
lr *= 1 / 8
elif divmod(epoch, 4)[1] == 2:
lr *= 1 / 4
elif divmod(epoch, 4)[1] == 1:
lr *= 1 / 2
elif divmod(epoch, 4)[1] == 0:
pass
print("Learning rate: ", lr)
return lr
lr_scheduler = LearningRateScheduler(lr_schedule)
callbacks = [lr_scheduler]
hst = wd2v.train(
epochs=64, verbose=1, use_multiprocessing=True, workers=4, callbacks=callbacks
)
hst_history = hst.history
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
ax[0].set_title("loss")
ax[0].plot(
list(range(len(hst_history["loss"]))), hst_history["loss"], label="Train loss"
)
ax[1].set_title("binary_accuracy")
ax[1].plot(
list(range(len(hst_history["loss"]))),
hst_history["binary_accuracy"],
label="accuracy",
)
ax[2].set_title("learning rate")
ax[2].plot(
list(range(len(hst_history["loss"]))), hst_history["lr"], label="learning rate"
)
ax[0].legend()
ax[1].legend()
ax[2].legend()
# # Show Features (col side)
wgt_prod = wd2v.wgt_col[[wd2v.word_dic.token2id[ee] for ee in X_df.columns]]
print(wgt_prod.shape)
df = pd.DataFrame(wgt_prod[:, :5])
sns.set_context("paper")
g = sns.PairGrid(df, size=3.5)
g.map_diag(plt.hist, edgecolor="w")
g.map_lower(scatter)
g.map_upper(hexbin)
df = pd.DataFrame(wgt_prod[:, :5])
df["cls"] = ["zz"] + ["c" + str(ii) for ii in cls_prod[1:]]
sns.pairplot(df, markers="o", hue="cls", height=3.5, diag_kind="hist")
# # Show Features (row side)
wgt_user = wd2v.wgt_row[[wd2v.doc_dic.token2id[ee] for ee in X_df.index]]
print(wgt_user.shape)
df = pd.DataFrame(wgt_user[:, :5])
sns.set_context("paper")
g = sns.PairGrid(df, size=3.5)
g.map_diag(plt.hist, edgecolor="w")
g.map_lower(scatter)
g.map_upper(hexbin)
df = pd.DataFrame(wgt_user[:, :5])
df["cls"] = ["c" + str(ii) for ii in cls_user]
sns.pairplot(df, markers="o", hue="cls", height=3.5, diag_kind="hist")
df1 = pd.DataFrame(wgt_prod)
df1["cls"] = ["ph"] + ["prod" + str(ii) for ii in cls_prod[1:]]
df2 = pd.DataFrame(wgt_user)
df2["cls"] = ["user" + str(ii) for ii in cls_user]
df = pd.concat([df1, df2], axis=0)
df.head()
sns.pairplot(df, markers=["."] * 8 + ["s"] * 7, hue="cls", height=3.5, diag_kind="hist")
# # Similarity
sim = wd2v.sim
print(sim.num_features)
# ## Get Document 'r1'
doc_name = "r1"
"""文書のテキストをIDへ変換"""
doc_id = sim.row_dic.token2id[doc_name]
doc_id
print(doc_name)
query = sim.get_fet_byrow(doc_name)
query
"""
[r1]と似た行を探す
"""
res = sim.get_sim_byrow(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
"""
列側
"""
res = sim.get_sim_bycol(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
# ## Column
word_name = "c1"
query = sim.get_fet_bycol(word_name)
query
res = sim.get_sim_byrow(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
res = sim.get_sim_bycol(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637158.ipynb
|
wordanddoc2vec-sample-data
|
wordroid
|
[{"Id": 69637158, "ScriptId": 19018117, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1187145, "CreationDate": "08/02/2021 12:38:57", "VersionNumber": 1.0, "Title": "usage of WordAndDoc2vec (sample data) v2", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 291.0, "LinesInsertedFromPrevious": 108.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 183.0, "LinesInsertedFromFork": 108.0, "LinesDeletedFromFork": 87.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 183.0, "TotalVotes": 0}]
|
[{"Id": 93072796, "KernelVersionId": 69637158, "SourceDatasetVersionId": 2491375}]
|
[{"Id": 2491375, "DatasetId": 459705, "DatasourceVersionId": 2533952, "CreatorUserId": 1187145, "LicenseName": "Attribution-NonCommercial 4.0 International (CC BY-NC 4.0)", "CreationDate": "08/02/2021 12:08:27", "VersionNumber": 4.0, "Title": "WordAndDoc2vec sample data", "Slug": "wordanddoc2vec-sample-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "add", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 459705, "CreatorUserId": 1187145, "OwnerUserId": 1187145.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2491375.0, "CurrentDatasourceVersionId": 2533952.0, "ForumId": 472586, "Type": 2, "CreationDate": "12/29/2019 15:11:43", "LastActivityDate": "12/29/2019", "TotalViews": 1919, "TotalDownloads": 8, "TotalVotes": 1, "TotalKernels": 4}]
|
[{"Id": 1187145, "UserName": "wordroid", "DisplayName": "nor", "RegisterDate": "07/26/2017", "PerformanceTier": 1}]
|
# num features = 3
# sample data
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from feature_eng import WordAndDoc2vec, calc_gsim
import os.path
import sys
import re
import itertools
import csv
import datetime
import pickle
import random
from collections import defaultdict, Counter
import gc
import matplotlib.pyplot as plt
from matplotlib.ticker import NullFormatter
import seaborn as sns
import pandas as pd
import numpy as np
import scipy
import gensim
from sklearn.metrics import f1_score, classification_report, confusion_matrix, log_loss
from sklearn.model_selection import train_test_split
import gensim
from keras.preprocessing.sequence import skipgrams
import tensorflow as tf
from keras import backend as K
def hexbin(x, y, color, **kwargs):
cmap = sns.light_palette(color, as_cmap=True)
plt.hexbin(x, y, cmap=cmap, **kwargs)
def scatter(x, y, color, **kwargs):
plt.scatter(x, y, marker=".")
# # Load Sample Data
X_df = pd.read_csv("../input/wordanddoc2vec-sample-data/wd2v.csv", index_col=0)
print(X_df.shape)
X_df.head()
import json
with open("../input/wordanddoc2vec-sample-data/cls_user.json", "r") as f:
cls_user = json.load(f)
with open("../input/wordanddoc2vec-sample-data/cls_prod.json", "r") as f:
cls_prod = json.load(f)
# # Define Original Class for This Methodology
from collections.abc import Sequence
class DocSeq(Sequence):
"""
doc_dic : doc_name (unique)
word_dic : index=0 must be place holder.
"""
def __init__(self, df):
self.df = df
self.cols = self.df.columns.values
self.doc_dic = gensim.corpora.Dictionary(
[df.index.values.tolist()], prune_at=None
)
"""
index=0 must be place holder.
"""
self.word_dic = gensim.corpora.Dictionary([["PL_DUMMY"]], prune_at=None)
self.word_dic.add_documents([list(self.cols)], prune_at=None)
def __getitem__(self, idx):
return self._get(idx)
def _get(self, idx):
try:
ebid = self.doc_dic[idx]
except KeyError:
raise IndexError
irow = self.df.loc[ebid]
res = []
for icol in self.cols:
if irow[icol] == 1:
res.append(icol)
return res
def __len__(self):
return self.df.shape[0]
doc_seq = DocSeq(X_df)
len(doc_seq)
wd2v = WordAndDoc2vec(doc_seq, doc_seq.word_dic, doc_seq.doc_dic, logging=False)
wd2v
# # Create Model
num_features = 3
# wd2v.make_model(num_features=num_features)
wd2v.make_model(num_features=num_features, embeddings_val=0.1, num_neg=4)
wd2v.models["model"].summary()
wgt_prod = wd2v.wgt_col
print(wgt_prod.shape)
df = pd.DataFrame(wgt_prod[:, :5])
sns.pairplot(df, markers=".")
wgt_user = wd2v.wgt_row
print(wgt_user.shape)
df = pd.DataFrame(wgt_user[:, :5])
sns.pairplot(df, markers=".")
# # Train
from keras.callbacks import ModelCheckpoint, LearningRateScheduler, ReduceLROnPlateau
def lr_schedule(epoch):
lr0 = 0.01
epoch1 = 16
epoch2 = 16
epoch3 = 16
epoch4 = 16
if epoch < epoch1:
lr = lr0
elif epoch < epoch1 + epoch2:
lr = lr0 / 2
elif epoch < epoch1 + epoch2 + epoch3:
lr = lr0 / 4
elif epoch < epoch1 + epoch2 + epoch3 + epoch4:
lr = lr0 / 8
else:
lr = lr0 / 16
if divmod(epoch, 4)[1] == 3:
lr *= 1 / 8
elif divmod(epoch, 4)[1] == 2:
lr *= 1 / 4
elif divmod(epoch, 4)[1] == 1:
lr *= 1 / 2
elif divmod(epoch, 4)[1] == 0:
pass
print("Learning rate: ", lr)
return lr
lr_scheduler = LearningRateScheduler(lr_schedule)
callbacks = [lr_scheduler]
hst = wd2v.train(
epochs=64, verbose=1, use_multiprocessing=True, workers=4, callbacks=callbacks
)
hst_history = hst.history
fig, ax = plt.subplots(1, 3, figsize=(20, 5))
ax[0].set_title("loss")
ax[0].plot(
list(range(len(hst_history["loss"]))), hst_history["loss"], label="Train loss"
)
ax[1].set_title("binary_accuracy")
ax[1].plot(
list(range(len(hst_history["loss"]))),
hst_history["binary_accuracy"],
label="accuracy",
)
ax[2].set_title("learning rate")
ax[2].plot(
list(range(len(hst_history["loss"]))), hst_history["lr"], label="learning rate"
)
ax[0].legend()
ax[1].legend()
ax[2].legend()
# # Show Features (col side)
wgt_prod = wd2v.wgt_col[[wd2v.word_dic.token2id[ee] for ee in X_df.columns]]
print(wgt_prod.shape)
df = pd.DataFrame(wgt_prod[:, :5])
sns.set_context("paper")
g = sns.PairGrid(df, size=3.5)
g.map_diag(plt.hist, edgecolor="w")
g.map_lower(scatter)
g.map_upper(hexbin)
df = pd.DataFrame(wgt_prod[:, :5])
df["cls"] = ["zz"] + ["c" + str(ii) for ii in cls_prod[1:]]
sns.pairplot(df, markers="o", hue="cls", height=3.5, diag_kind="hist")
# # Show Features (row side)
wgt_user = wd2v.wgt_row[[wd2v.doc_dic.token2id[ee] for ee in X_df.index]]
print(wgt_user.shape)
df = pd.DataFrame(wgt_user[:, :5])
sns.set_context("paper")
g = sns.PairGrid(df, size=3.5)
g.map_diag(plt.hist, edgecolor="w")
g.map_lower(scatter)
g.map_upper(hexbin)
df = pd.DataFrame(wgt_user[:, :5])
df["cls"] = ["c" + str(ii) for ii in cls_user]
sns.pairplot(df, markers="o", hue="cls", height=3.5, diag_kind="hist")
df1 = pd.DataFrame(wgt_prod)
df1["cls"] = ["ph"] + ["prod" + str(ii) for ii in cls_prod[1:]]
df2 = pd.DataFrame(wgt_user)
df2["cls"] = ["user" + str(ii) for ii in cls_user]
df = pd.concat([df1, df2], axis=0)
df.head()
sns.pairplot(df, markers=["."] * 8 + ["s"] * 7, hue="cls", height=3.5, diag_kind="hist")
# # Similarity
sim = wd2v.sim
print(sim.num_features)
# ## Get Document 'r1'
doc_name = "r1"
"""文書のテキストをIDへ変換"""
doc_id = sim.row_dic.token2id[doc_name]
doc_id
print(doc_name)
query = sim.get_fet_byrow(doc_name)
query
"""
[r1]と似た行を探す
"""
res = sim.get_sim_byrow(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
"""
列側
"""
res = sim.get_sim_bycol(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
# ## Column
word_name = "c1"
query = sim.get_fet_bycol(word_name)
query
res = sim.get_sim_byrow(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
res = sim.get_sim_bycol(query)
res
df = pd.DataFrame(res)
df
df.sort_values(1, ascending=False)
df.sort_values(1, ascending=False).head(20)
|
[{"wordanddoc2vec-sample-data/wd2v.csv": {"column_names": "[\"Unnamed: 0\", \"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\", \"c8\", \"c9\", \"c10\", \"c11\", \"c12\", \"c13\", \"c14\", \"c15\", \"c16\", \"c17\", \"c18\", \"c19\", \"c20\", \"c21\", \"c22\", \"c23\", \"c24\", \"c25\", \"c26\", \"c27\", \"c28\", \"c29\", \"c30\", \"c31\", \"c32\", \"c33\", \"c34\", \"c35\", \"c36\", \"c37\", \"c38\", \"c39\", \"c40\", \"c41\", \"c42\", \"c43\", \"c44\", \"c45\", \"c46\", \"c47\", \"c48\", \"c49\", \"c50\", \"c51\", \"c52\", \"c53\", \"c54\", \"c55\", \"c56\", \"c57\", \"c58\", \"c59\", \"c60\", \"c61\", \"c62\", \"c63\", \"c64\", \"c65\", \"c66\", \"c67\", \"c68\", \"c69\", \"c70\", \"c71\", \"c72\", \"c73\", \"c74\", \"c75\", \"c76\", \"c77\", \"c78\", \"c79\", \"c80\", \"c81\", \"c82\", \"c83\", \"c84\", \"c85\", \"c86\", \"c87\", \"c88\", \"c89\", \"c90\", \"c91\", \"c92\", \"c93\", \"c94\", \"c95\", \"c96\", \"c97\", \"c98\", \"c99\", \"c100\", \"c101\", \"c102\", \"c103\", \"c104\", \"c105\", \"c106\", \"c107\", \"c108\", \"c109\", \"c110\", \"c111\", \"c112\", \"c113\", \"c114\", \"c115\", \"c116\", \"c117\", \"c118\", \"c119\", \"c120\", \"c121\", \"c122\", \"c123\", \"c124\", \"c125\", \"c126\", \"c127\", \"c128\", \"c129\", \"c130\", \"c131\", \"c132\", \"c133\", \"c134\", \"c135\", \"c136\", \"c137\", \"c138\", \"c139\", \"c140\", \"c141\", \"c142\", \"c143\", \"c144\", \"c145\", \"c146\", \"c147\", \"c148\", \"c149\", \"c150\", \"c151\", \"c152\", \"c153\", \"c154\", \"c155\", \"c156\", \"c157\", \"c158\", \"c159\", \"c160\", \"c161\", \"c162\", \"c163\", \"c164\", \"c165\", \"c166\", \"c167\", \"c168\", \"c169\", \"c170\", \"c171\", \"c172\", \"c173\", \"c174\", \"c175\", \"c176\", \"c177\", \"c178\", \"c179\", \"c180\", \"c181\", \"c182\", \"c183\", \"c184\", \"c185\", \"c186\", \"c187\", \"c188\", \"c189\", \"c190\", \"c191\", \"c192\", \"c193\", \"c194\", \"c195\", \"c196\", \"c197\", \"c198\", \"c199\", \"c200\", \"c201\", \"c202\", \"c203\", \"c204\", \"c205\", \"c206\", \"c207\", \"c208\", \"c209\", \"c210\", \"c211\", \"c212\", \"c213\", \"c214\", \"c215\", \"c216\", \"c217\", \"c218\", \"c219\", \"c220\", \"c221\", \"c222\", \"c223\", \"c224\", \"c225\", \"c226\", \"c227\", \"c228\", \"c229\", \"c230\", \"c231\", \"c232\", \"c233\", \"c234\", \"c235\", \"c236\", \"c237\", \"c238\", \"c239\", \"c240\", \"c241\", \"c242\", \"c243\", \"c244\", \"c245\", \"c246\", \"c247\", \"c248\", \"c249\", \"c250\", \"c251\", \"c252\", \"c253\", \"c254\", \"c255\", \"c256\", \"c257\", \"c258\", \"c259\", \"c260\", \"c261\", \"c262\", \"c263\", \"c264\", \"c265\", \"c266\", \"c267\", \"c268\", \"c269\", \"c270\", \"c271\", \"c272\", \"c273\", \"c274\", \"c275\", \"c276\", \"c277\", \"c278\", \"c279\", \"c280\", \"c281\", \"c282\", \"c283\", \"c284\", \"c285\", \"c286\", \"c287\", \"c288\", \"c289\", \"c290\", \"c291\", \"c292\", \"c293\", \"c294\", \"c295\", \"c296\", \"c297\", \"c298\", \"c299\", \"c300\", \"c301\", \"c302\", \"c303\", \"c304\", \"c305\", \"c306\", \"c307\", \"c308\", \"c309\", \"c310\", \"c311\", \"c312\", \"c313\", \"c314\", \"c315\", \"c316\", \"c317\", \"c318\", \"c319\", \"c320\", \"c321\", \"c322\", \"c323\", \"c324\", \"c325\", \"c326\", \"c327\", \"c328\", \"c329\", \"c330\", \"c331\", \"c332\", \"c333\", \"c334\", \"c335\", \"c336\", \"c337\", \"c338\", \"c339\", \"c340\", \"c341\", \"c342\", \"c343\", \"c344\", \"c345\", \"c346\", \"c347\", \"c348\", \"c349\", \"c350\", \"c351\", \"c352\", \"c353\", \"c354\", \"c355\", \"c356\", \"c357\", \"c358\", \"c359\", \"c360\", \"c361\", \"c362\", \"c363\", \"c364\", \"c365\", \"c366\", \"c367\", \"c368\", \"c369\", \"c370\", \"c371\", \"c372\", \"c373\", \"c374\", \"c375\", \"c376\", \"c377\", \"c378\", \"c379\", \"c380\", \"c381\", \"c382\", \"c383\", \"c384\", \"c385\", \"c386\", \"c387\", \"c388\", \"c389\", \"c390\", \"c391\", \"c392\", \"c393\", \"c394\", \"c395\", \"c396\", \"c397\", \"c398\", \"c399\", \"c400\", \"c401\", \"c402\", \"c403\", \"c404\", \"c405\", \"c406\", \"c407\", \"c408\", \"c409\", \"c410\", \"c411\", \"c412\", \"c413\", \"c414\", \"c415\", \"c416\", \"c417\", \"c418\", \"c419\", \"c420\", \"c421\", \"c422\", \"c423\", \"c424\", \"c425\", \"c426\", \"c427\", \"c428\", \"c429\", \"c430\", \"c431\", \"c432\", \"c433\", \"c434\", \"c435\", \"c436\", \"c437\", \"c438\", \"c439\", \"c440\", \"c441\", \"c442\", \"c443\", \"c444\", \"c445\", \"c446\", \"c447\", \"c448\", \"c449\", \"c450\", \"c451\", \"c452\", \"c453\", \"c454\", \"c455\", \"c456\", \"c457\", \"c458\", \"c459\", \"c460\", \"c461\", \"c462\", \"c463\", \"c464\", \"c465\", \"c466\", \"c467\", \"c468\", \"c469\", \"c470\", \"c471\", \"c472\", \"c473\", \"c474\", \"c475\", \"c476\", \"c477\", \"c478\", \"c479\", \"c480\", \"c481\", \"c482\", \"c483\", \"c484\", \"c485\", \"c486\", \"c487\", \"c488\", \"c489\", \"c490\", \"c491\", \"c492\", \"c493\", \"c494\", \"c495\", \"c496\", \"c497\", \"c498\", \"c499\", \"c500\", \"c501\", \"c502\", \"c503\", \"c504\", \"c505\", \"c506\", \"c507\", \"c508\", \"c509\", \"c510\", \"c511\", \"c512\", \"c513\", \"c514\", \"c515\", \"c516\", \"c517\", \"c518\", \"c519\", \"c520\", \"c521\", \"c522\", \"c523\", \"c524\", \"c525\", \"c526\", \"c527\", \"c528\", \"c529\", \"c530\", \"c531\", \"c532\", \"c533\", \"c534\", \"c535\", \"c536\", \"c537\", \"c538\", \"c539\", \"c540\", \"c541\", \"c542\", \"c543\", \"c544\", \"c545\", \"c546\", \"c547\", \"c548\", \"c549\", \"c550\", \"c551\", \"c552\", \"c553\", \"c554\", \"c555\", \"c556\", \"c557\", \"c558\", \"c559\", \"c560\", \"c561\", \"c562\", \"c563\", \"c564\", \"c565\", \"c566\", \"c567\", \"c568\", \"c569\", \"c570\", \"c571\", \"c572\", \"c573\", \"c574\", \"c575\", \"c576\", \"c577\", \"c578\", \"c579\", \"c580\", \"c581\", \"c582\", \"c583\", \"c584\", \"c585\", \"c586\", \"c587\", \"c588\", \"c589\", \"c590\", \"c591\", \"c592\", \"c593\", \"c594\", \"c595\", \"c596\", \"c597\", \"c598\", \"c599\", \"c600\", \"c601\", \"c602\", \"c603\", \"c604\", \"c605\", \"c606\", \"c607\", \"c608\", \"c609\", \"c610\", \"c611\", \"c612\", \"c613\", \"c614\", \"c615\", \"c616\", \"c617\", \"c618\", \"c619\", \"c620\", \"c621\", \"c622\", \"c623\", \"c624\", \"c625\", \"c626\", \"c627\", \"c628\", \"c629\", \"c630\", \"c631\", \"c632\", \"c633\", \"c634\", \"c635\", \"c636\", \"c637\", \"c638\", \"c639\", \"c640\", \"c641\", \"c642\", \"c643\", \"c644\", \"c645\", \"c646\", \"c647\", \"c648\", \"c649\", \"c650\", \"c651\", \"c652\", \"c653\", \"c654\", \"c655\", \"c656\", \"c657\", \"c658\", \"c659\", \"c660\", \"c661\", \"c662\", \"c663\", \"c664\", \"c665\", \"c666\", \"c667\", \"c668\", \"c669\", \"c670\", \"c671\", \"c672\", \"c673\", \"c674\", \"c675\", \"c676\", \"c677\", \"c678\", \"c679\", \"c680\", \"c681\", \"c682\", \"c683\", \"c684\", \"c685\", \"c686\", \"c687\", \"c688\", \"c689\", \"c690\", \"c691\", \"c692\", \"c693\", \"c694\", \"c695\", \"c696\", \"c697\", \"c698\", \"c699\", \"c700\", \"c701\", \"c702\", \"c703\", \"c704\", \"c705\", \"c706\", \"c707\", \"c708\", \"c709\", \"c710\", \"c711\", \"c712\", \"c713\", \"c714\", \"c715\", \"c716\", \"c717\", \"c718\", \"c719\", \"c720\", \"c721\", \"c722\", \"c723\", \"c724\", \"c725\", \"c726\", \"c727\", \"c728\", \"c729\", \"c730\", \"c731\", \"c732\", \"c733\", \"c734\", \"c735\", \"c736\", \"c737\", \"c738\", \"c739\", \"c740\", \"c741\", \"c742\", \"c743\", \"c744\", \"c745\", \"c746\", \"c747\", \"c748\", \"c749\", \"c750\", \"c751\", \"c752\", \"c753\", \"c754\", \"c755\", \"c756\", \"c757\", \"c758\", \"c759\", \"c760\", \"c761\", \"c762\", \"c763\", \"c764\", \"c765\", \"c766\", \"c767\", \"c768\", \"c769\", \"c770\", \"c771\", \"c772\", \"c773\", \"c774\", \"c775\", \"c776\", \"c777\", \"c778\", \"c779\", \"c780\", \"c781\", \"c782\", \"c783\", \"c784\", \"c785\", \"c786\", \"c787\", \"c788\", \"c789\", \"c790\", \"c791\", \"c792\", \"c793\", \"c794\", \"c795\", \"c796\", \"c797\", \"c798\", \"c799\", \"c800\", \"c801\", \"c802\", \"c803\", \"c804\", \"c805\", \"c806\", \"c807\", \"c808\", \"c809\", \"c810\", \"c811\", \"c812\", \"c813\", \"c814\", \"c815\", \"c816\", \"c817\", \"c818\", \"c819\", \"c820\", \"c821\", \"c822\", \"c823\", \"c824\", \"c825\", \"c826\", \"c827\", \"c828\", \"c829\", \"c830\", \"c831\", \"c832\", \"c833\", \"c834\", \"c835\", \"c836\", \"c837\", \"c838\", \"c839\", \"c840\", \"c841\", \"c842\", \"c843\", \"c844\", \"c845\", \"c846\", \"c847\", \"c848\", \"c849\", \"c850\", \"c851\", \"c852\", \"c853\", \"c854\", \"c855\", \"c856\", \"c857\", \"c858\", \"c859\", \"c860\", \"c861\", \"c862\", \"c863\", \"c864\", \"c865\", \"c866\", \"c867\", \"c868\", \"c869\", \"c870\", \"c871\", \"c872\", \"c873\", \"c874\", \"c875\", \"c876\", \"c877\", \"c878\", \"c879\", \"c880\", \"c881\", \"c882\", \"c883\", \"c884\", \"c885\", \"c886\", \"c887\", \"c888\", \"c889\", \"c890\", \"c891\", \"c892\", \"c893\", \"c894\", \"c895\", \"c896\", \"c897\", \"c898\", \"c899\", \"c900\", \"c901\", \"c902\", \"c903\", \"c904\", \"c905\", \"c906\", \"c907\", \"c908\", \"c909\", \"c910\", \"c911\", \"c912\", \"c913\", \"c914\", \"c915\", \"c916\", \"c917\", \"c918\", \"c919\", \"c920\", \"c921\", \"c922\", \"c923\", \"c924\", \"c925\", \"c926\", \"c927\", \"c928\", \"c929\", \"c930\", \"c931\", \"c932\", \"c933\", \"c934\", \"c935\", \"c936\", \"c937\", \"c938\", \"c939\", \"c940\", \"c941\", \"c942\", \"c943\", \"c944\", \"c945\", \"c946\", \"c947\", \"c948\", \"c949\", \"c950\", \"c951\", \"c952\", \"c953\", \"c954\", \"c955\", \"c956\", \"c957\", \"c958\", \"c959\", \"c960\", \"c961\", \"c962\", \"c963\", \"c964\", \"c965\", \"c966\", \"c967\", \"c968\", \"c969\", \"c970\", \"c971\", \"c972\", \"c973\", \"c974\", \"c975\", \"c976\", \"c977\", \"c978\", \"c979\", \"c980\", \"c981\", \"c982\", \"c983\", \"c984\", \"c985\", \"c986\", \"c987\", \"c988\", \"c989\", \"c990\", \"c991\", \"c992\", \"c993\", \"c994\", \"c995\", \"c996\", \"c997\", \"c998\", \"c999\", \"c1000\", \"c1001\", \"c1002\", \"c1003\", \"c1004\", \"c1005\", \"c1006\", \"c1007\", \"c1008\", \"c1009\", \"c1010\", \"c1011\", \"c1012\", \"c1013\", \"c1014\", \"c1015\", \"c1016\", \"c1017\", \"c1018\", \"c1019\", \"c1020\", \"c1021\", \"c1022\", \"c1023\", \"c1024\", \"c1025\", \"c1026\", \"c1027\", \"c1028\", \"c1029\", \"c1030\", \"c1031\", \"c1032\", \"c1033\", \"c1034\", \"c1035\", \"c1036\", \"c1037\", \"c1038\", \"c1039\", \"c1040\", \"c1041\", \"c1042\", \"c1043\", \"c1044\", \"c1045\", \"c1046\", \"c1047\", \"c1048\", \"c1049\", \"c1050\", \"c1051\", \"c1052\", \"c1053\", \"c1054\", \"c1055\", \"c1056\", \"c1057\", \"c1058\", \"c1059\", \"c1060\", \"c1061\", \"c1062\", \"c1063\", \"c1064\", \"c1065\", \"c1066\", \"c1067\", \"c1068\", \"c1069\", \"c1070\", \"c1071\", \"c1072\", \"c1073\", \"c1074\", \"c1075\", \"c1076\", \"c1077\", \"c1078\", \"c1079\", \"c1080\", \"c1081\", \"c1082\", \"c1083\", \"c1084\", \"c1085\", \"c1086\", \"c1087\", \"c1088\", \"c1089\", \"c1090\", \"c1091\", \"c1092\", \"c1093\", \"c1094\", \"c1095\", \"c1096\", \"c1097\", \"c1098\", \"c1099\", \"c1100\", \"c1101\", \"c1102\", \"c1103\", \"c1104\", \"c1105\", \"c1106\", \"c1107\", \"c1108\", \"c1109\", \"c1110\", \"c1111\", \"c1112\", \"c1113\", \"c1114\", \"c1115\", \"c1116\", \"c1117\", \"c1118\", \"c1119\", \"c1120\", \"c1121\", \"c1122\", \"c1123\", \"c1124\", \"c1125\", \"c1126\", \"c1127\", \"c1128\", \"c1129\", \"c1130\", \"c1131\", \"c1132\", \"c1133\", \"c1134\", \"c1135\", \"c1136\", \"c1137\", \"c1138\", \"c1139\", \"c1140\", \"c1141\", \"c1142\", \"c1143\", \"c1144\", \"c1145\", \"c1146\", \"c1147\", \"c1148\", \"c1149\", \"c1150\", \"c1151\", \"c1152\", \"c1153\", \"c1154\", \"c1155\", \"c1156\", \"c1157\", \"c1158\", \"c1159\", \"c1160\", \"c1161\", \"c1162\", \"c1163\", \"c1164\", \"c1165\", \"c1166\", \"c1167\", \"c1168\", \"c1169\", \"c1170\", \"c1171\", \"c1172\", \"c1173\", \"c1174\", \"c1175\", \"c1176\", \"c1177\", \"c1178\", \"c1179\", \"c1180\", \"c1181\", \"c1182\", \"c1183\", \"c1184\", \"c1185\", \"c1186\", \"c1187\", \"c1188\", \"c1189\", \"c1190\", \"c1191\", \"c1192\", \"c1193\", \"c1194\", \"c1195\", \"c1196\", \"c1197\", \"c1198\", \"c1199\", \"c1200\", \"c1201\", \"c1202\", \"c1203\", \"c1204\", \"c1205\", \"c1206\", \"c1207\", \"c1208\", \"c1209\", \"c1210\", \"c1211\", \"c1212\", \"c1213\", \"c1214\", \"c1215\", \"c1216\", \"c1217\", \"c1218\", \"c1219\", \"c1220\", \"c1221\", \"c1222\", \"c1223\", \"c1224\", \"c1225\", \"c1226\", \"c1227\", \"c1228\", \"c1229\", \"c1230\", \"c1231\", \"c1232\", \"c1233\", \"c1234\", \"c1235\", \"c1236\", \"c1237\", \"c1238\", \"c1239\", \"c1240\", \"c1241\", \"c1242\", \"c1243\", \"c1244\", \"c1245\", \"c1246\", \"c1247\", \"c1248\", \"c1249\", \"c1250\", \"c1251\", \"c1252\", \"c1253\", \"c1254\", \"c1255\", \"c1256\", \"c1257\", \"c1258\", \"c1259\", \"c1260\", \"c1261\", \"c1262\", \"c1263\", \"c1264\", \"c1265\", \"c1266\", \"c1267\", \"c1268\", \"c1269\", \"c1270\", \"c1271\", \"c1272\", \"c1273\", \"c1274\", \"c1275\", \"c1276\", \"c1277\", \"c1278\", \"c1279\", \"c1280\", \"c1281\", \"c1282\", \"c1283\", \"c1284\", \"c1285\", \"c1286\", \"c1287\", \"c1288\", \"c1289\", \"c1290\", \"c1291\", \"c1292\", \"c1293\", \"c1294\", \"c1295\", \"c1296\", \"c1297\", \"c1298\", \"c1299\", \"c1300\", \"c1301\", \"c1302\", \"c1303\", \"c1304\", \"c1305\", \"c1306\", \"c1307\", \"c1308\", \"c1309\", \"c1310\", \"c1311\", \"c1312\", \"c1313\", \"c1314\", \"c1315\", \"c1316\", \"c1317\", \"c1318\", \"c1319\", \"c1320\", \"c1321\", \"c1322\", \"c1323\", \"c1324\", \"c1325\", \"c1326\", \"c1327\", \"c1328\", \"c1329\", \"c1330\", \"c1331\", \"c1332\", \"c1333\", \"c1334\", \"c1335\", \"c1336\", \"c1337\", \"c1338\", \"c1339\", \"c1340\", \"c1341\", \"c1342\", \"c1343\", \"c1344\", \"c1345\", \"c1346\", \"c1347\", \"c1348\", \"c1349\", \"c1350\", \"c1351\", \"c1352\", \"c1353\", \"c1354\", \"c1355\", \"c1356\", \"c1357\", \"c1358\", \"c1359\", \"c1360\", \"c1361\", \"c1362\", \"c1363\", \"c1364\", \"c1365\", \"c1366\", \"c1367\", \"c1368\", \"c1369\", \"c1370\", \"c1371\", \"c1372\", \"c1373\", \"c1374\", \"c1375\", \"c1376\", \"c1377\", \"c1378\", \"c1379\", \"c1380\", \"c1381\", \"c1382\", \"c1383\", \"c1384\", \"c1385\", \"c1386\", \"c1387\", \"c1388\", \"c1389\", \"c1390\", \"c1391\", \"c1392\", \"c1393\", \"c1394\", \"c1395\", \"c1396\", \"c1397\", \"c1398\", \"c1399\", \"c1400\", \"c1401\", \"c1402\", \"c1403\", \"c1404\", \"c1405\", \"c1406\", \"c1407\", \"c1408\", \"c1409\", \"c1410\", \"c1411\", \"c1412\", \"c1413\", \"c1414\", \"c1415\", \"c1416\", \"c1417\", \"c1418\", \"c1419\", \"c1420\", \"c1421\", \"c1422\", \"c1423\", \"c1424\", \"c1425\", \"c1426\", \"c1427\", \"c1428\", \"c1429\", \"c1430\", \"c1431\", \"c1432\", \"c1433\", \"c1434\", \"c1435\", \"c1436\", \"c1437\", \"c1438\", \"c1439\", \"c1440\", \"c1441\", \"c1442\", \"c1443\", \"c1444\", \"c1445\", \"c1446\", \"c1447\", \"c1448\", \"c1449\", \"c1450\", \"c1451\", \"c1452\", \"c1453\", \"c1454\", \"c1455\", \"c1456\", \"c1457\", \"c1458\", \"c1459\", \"c1460\", \"c1461\", \"c1462\", \"c1463\", \"c1464\", \"c1465\", \"c1466\", \"c1467\", \"c1468\", \"c1469\", \"c1470\", \"c1471\", \"c1472\", \"c1473\", \"c1474\", \"c1475\", \"c1476\", \"c1477\", \"c1478\", \"c1479\", \"c1480\", \"c1481\", \"c1482\", \"c1483\", \"c1484\", \"c1485\", \"c1486\", \"c1487\", \"c1488\", \"c1489\", \"c1490\", \"c1491\", \"c1492\", \"c1493\", \"c1494\", \"c1495\", \"c1496\", \"c1497\", \"c1498\", \"c1499\", \"c1500\", \"c1501\", \"c1502\", \"c1503\", \"c1504\", \"c1505\", \"c1506\", \"c1507\", \"c1508\", \"c1509\", \"c1510\", \"c1511\", \"c1512\", \"c1513\", \"c1514\", \"c1515\", \"c1516\", \"c1517\", \"c1518\", \"c1519\", \"c1520\", \"c1521\", \"c1522\", \"c1523\", \"c1524\", \"c1525\", \"c1526\", \"c1527\", \"c1528\", \"c1529\", \"c1530\", \"c1531\", \"c1532\", \"c1533\", \"c1534\", \"c1535\", \"c1536\", \"c1537\", \"c1538\", \"c1539\", \"c1540\", \"c1541\", \"c1542\", \"c1543\", \"c1544\", \"c1545\", \"c1546\", \"c1547\", \"c1548\", \"c1549\", \"c1550\", \"c1551\", \"c1552\", \"c1553\", \"c1554\", \"c1555\", \"c1556\", \"c1557\", \"c1558\", \"c1559\", \"c1560\", \"c1561\", \"c1562\", \"c1563\", \"c1564\", \"c1565\", \"c1566\", \"c1567\", \"c1568\", \"c1569\", \"c1570\", \"c1571\", \"c1572\", \"c1573\", \"c1574\", \"c1575\", \"c1576\", \"c1577\", \"c1578\", \"c1579\", \"c1580\", \"c1581\", \"c1582\", \"c1583\", \"c1584\", \"c1585\", \"c1586\", \"c1587\", \"c1588\", \"c1589\", \"c1590\", \"c1591\", \"c1592\", \"c1593\", \"c1594\", \"c1595\", \"c1596\", \"c1597\", \"c1598\", \"c1599\", \"c1600\", \"c1601\", \"c1602\", \"c1603\", \"c1604\", \"c1605\", \"c1606\", \"c1607\", \"c1608\", \"c1609\", \"c1610\", \"c1611\", \"c1612\", \"c1613\", \"c1614\", \"c1615\", \"c1616\", \"c1617\", \"c1618\", \"c1619\", \"c1620\", \"c1621\", \"c1622\", \"c1623\", \"c1624\", \"c1625\", \"c1626\", \"c1627\", \"c1628\", \"c1629\", \"c1630\", \"c1631\", \"c1632\", \"c1633\", \"c1634\", \"c1635\", \"c1636\", \"c1637\", \"c1638\", \"c1639\", \"c1640\", \"c1641\", \"c1642\", \"c1643\", \"c1644\", \"c1645\", \"c1646\", \"c1647\", \"c1648\", \"c1649\", \"c1650\", \"c1651\", \"c1652\", \"c1653\", \"c1654\", \"c1655\", \"c1656\", \"c1657\", \"c1658\", \"c1659\", \"c1660\", \"c1661\", \"c1662\", \"c1663\", \"c1664\", \"c1665\", \"c1666\", \"c1667\", \"c1668\", \"c1669\", \"c1670\", \"c1671\", \"c1672\", \"c1673\", \"c1674\", \"c1675\", \"c1676\", \"c1677\", \"c1678\", \"c1679\", \"c1680\", \"c1681\", \"c1682\", \"c1683\", \"c1684\", \"c1685\", \"c1686\", \"c1687\", \"c1688\", \"c1689\", \"c1690\", \"c1691\", \"c1692\", \"c1693\", \"c1694\", \"c1695\", \"c1696\", \"c1697\", \"c1698\", \"c1699\", \"c1700\", \"c1701\", \"c1702\", \"c1703\", \"c1704\", \"c1705\", \"c1706\", \"c1707\", \"c1708\", \"c1709\", \"c1710\", \"c1711\", \"c1712\", \"c1713\", \"c1714\", \"c1715\", \"c1716\", \"c1717\", \"c1718\", \"c1719\", \"c1720\", \"c1721\", \"c1722\", \"c1723\", \"c1724\", \"c1725\", \"c1726\", \"c1727\", \"c1728\", \"c1729\", \"c1730\", \"c1731\", \"c1732\", \"c1733\", \"c1734\", \"c1735\", \"c1736\", \"c1737\", \"c1738\", \"c1739\", \"c1740\", \"c1741\", \"c1742\", \"c1743\", \"c1744\", \"c1745\", \"c1746\", \"c1747\", \"c1748\", \"c1749\", \"c1750\", \"c1751\", \"c1752\", \"c1753\", \"c1754\", \"c1755\", \"c1756\", \"c1757\", \"c1758\", \"c1759\", \"c1760\", \"c1761\", \"c1762\", \"c1763\", \"c1764\", \"c1765\", \"c1766\", \"c1767\", \"c1768\", \"c1769\", \"c1770\", \"c1771\", \"c1772\", \"c1773\", \"c1774\", \"c1775\", \"c1776\", \"c1777\", \"c1778\", \"c1779\", \"c1780\", \"c1781\", \"c1782\", \"c1783\", \"c1784\", \"c1785\", \"c1786\", \"c1787\", \"c1788\", \"c1789\", \"c1790\", \"c1791\", \"c1792\", \"c1793\", \"c1794\", \"c1795\", \"c1796\", \"c1797\", \"c1798\", \"c1799\", \"c1800\", \"c1801\", \"c1802\", \"c1803\", \"c1804\", \"c1805\", \"c1806\", \"c1807\", \"c1808\", \"c1809\", \"c1810\", \"c1811\", \"c1812\", \"c1813\", \"c1814\", \"c1815\", \"c1816\", \"c1817\", \"c1818\", \"c1819\", \"c1820\", \"c1821\", \"c1822\", \"c1823\", \"c1824\", \"c1825\", \"c1826\", \"c1827\", \"c1828\", \"c1829\", \"c1830\", \"c1831\", \"c1832\", \"c1833\", \"c1834\", \"c1835\", \"c1836\", \"c1837\", \"c1838\", \"c1839\", \"c1840\", \"c1841\", \"c1842\", \"c1843\", \"c1844\", \"c1845\", \"c1846\", \"c1847\", \"c1848\", \"c1849\", \"c1850\", \"c1851\", \"c1852\", \"c1853\", \"c1854\", \"c1855\", \"c1856\", \"c1857\", \"c1858\", \"c1859\", \"c1860\", \"c1861\", \"c1862\", \"c1863\", \"c1864\", \"c1865\", \"c1866\", \"c1867\", \"c1868\", \"c1869\", \"c1870\", \"c1871\", \"c1872\", \"c1873\", \"c1874\", \"c1875\", \"c1876\", \"c1877\", \"c1878\", \"c1879\", \"c1880\", \"c1881\", \"c1882\", \"c1883\", \"c1884\", \"c1885\", \"c1886\", \"c1887\", \"c1888\", \"c1889\", \"c1890\", \"c1891\", \"c1892\", \"c1893\", \"c1894\", \"c1895\", \"c1896\", \"c1897\", \"c1898\", \"c1899\", \"c1900\", \"c1901\", \"c1902\", \"c1903\", \"c1904\", \"c1905\", \"c1906\", \"c1907\", \"c1908\", \"c1909\", \"c1910\", \"c1911\", \"c1912\", \"c1913\", \"c1914\", \"c1915\", \"c1916\", \"c1917\", \"c1918\", \"c1919\", \"c1920\", \"c1921\", \"c1922\", \"c1923\", \"c1924\", \"c1925\", \"c1926\", \"c1927\", \"c1928\", \"c1929\", \"c1930\", \"c1931\", \"c1932\", \"c1933\", \"c1934\", \"c1935\", \"c1936\", \"c1937\", \"c1938\", \"c1939\", \"c1940\", \"c1941\", \"c1942\", \"c1943\", \"c1944\", \"c1945\", \"c1946\", \"c1947\", \"c1948\", \"c1949\", \"c1950\", \"c1951\", \"c1952\", \"c1953\", \"c1954\", \"c1955\", \"c1956\", \"c1957\", \"c1958\", \"c1959\", \"c1960\", \"c1961\", \"c1962\", \"c1963\", \"c1964\", \"c1965\", \"c1966\", \"c1967\", \"c1968\", \"c1969\", \"c1970\", \"c1971\", \"c1972\", \"c1973\", \"c1974\", \"c1975\", \"c1976\", \"c1977\", \"c1978\", \"c1979\", \"c1980\", \"c1981\", \"c1982\", \"c1983\", \"c1984\", \"c1985\", \"c1986\", \"c1987\", \"c1988\", \"c1989\", \"c1990\", \"c1991\", \"c1992\", \"c1993\", \"c1994\", \"c1995\", \"c1996\", \"c1997\", \"c1998\", \"c1999\", \"c2000\"]", "column_data_types": "{\"Unnamed: 0\": \"object\", \"c1\": \"int64\", \"c2\": \"int64\", \"c3\": \"int64\", \"c4\": \"int64\", \"c5\": \"int64\", \"c6\": \"int64\", \"c7\": \"int64\", \"c8\": \"int64\", \"c9\": \"int64\", \"c10\": \"int64\", \"c11\": \"int64\", \"c12\": \"int64\", \"c13\": \"int64\", \"c14\": \"int64\", \"c15\": \"int64\", \"c16\": \"int64\", \"c17\": \"int64\", \"c18\": \"int64\", \"c19\": \"int64\", \"c20\": \"int64\", \"c21\": \"int64\", \"c22\": \"int64\", \"c23\": \"int64\", \"c24\": \"int64\", \"c25\": \"int64\", \"c26\": \"int64\", \"c27\": \"int64\", \"c28\": \"int64\", \"c29\": \"int64\", \"c30\": \"int64\", \"c31\": \"int64\", \"c32\": \"int64\", \"c33\": \"int64\", \"c34\": \"int64\", \"c35\": \"int64\", \"c36\": \"int64\", \"c37\": \"int64\", \"c38\": \"int64\", \"c39\": \"int64\", \"c40\": \"int64\", \"c41\": \"int64\", \"c42\": \"int64\", \"c43\": \"int64\", \"c44\": \"int64\", \"c45\": \"int64\", \"c46\": \"int64\", \"c47\": \"int64\", \"c48\": \"int64\", \"c49\": \"int64\", \"c50\": \"int64\", \"c51\": \"int64\", \"c52\": \"int64\", \"c53\": \"int64\", \"c54\": \"int64\", \"c55\": \"int64\", \"c56\": \"int64\", \"c57\": \"int64\", \"c58\": \"int64\", \"c59\": \"int64\", \"c60\": \"int64\", \"c61\": \"int64\", \"c62\": \"int64\", \"c63\": \"int64\", \"c64\": \"int64\", \"c65\": \"int64\", \"c66\": \"int64\", \"c67\": \"int64\", \"c68\": \"int64\", \"c69\": \"int64\", \"c70\": \"int64\", \"c71\": \"int64\", \"c72\": \"int64\", \"c73\": \"int64\", \"c74\": \"int64\", \"c75\": \"int64\", \"c76\": \"int64\", \"c77\": \"int64\", \"c78\": \"int64\", \"c79\": \"int64\", \"c80\": \"int64\", \"c81\": \"int64\", \"c82\": \"int64\", \"c83\": \"int64\", \"c84\": \"int64\", \"c85\": \"int64\", \"c86\": \"int64\", \"c87\": \"int64\", \"c88\": \"int64\", \"c89\": \"int64\", \"c90\": \"int64\", \"c91\": \"int64\", \"c92\": \"int64\", \"c93\": \"int64\", \"c94\": \"int64\", \"c95\": \"int64\", \"c96\": \"int64\", \"c97\": \"int64\", \"c98\": \"int64\", \"c99\": \"int64\", \"c100\": \"int64\", \"c101\": \"int64\", \"c102\": \"int64\", \"c103\": \"int64\", \"c104\": \"int64\", \"c105\": \"int64\", \"c106\": \"int64\", \"c107\": \"int64\", \"c108\": \"int64\", \"c109\": \"int64\", \"c110\": \"int64\", \"c111\": \"int64\", \"c112\": \"int64\", \"c113\": \"int64\", \"c114\": \"int64\", \"c115\": \"int64\", \"c116\": \"int64\", \"c117\": \"int64\", \"c118\": \"int64\", \"c119\": \"int64\", \"c120\": \"int64\", \"c121\": \"int64\", \"c122\": \"int64\", \"c123\": \"int64\", \"c124\": \"int64\", \"c125\": \"int64\", \"c126\": \"int64\", \"c127\": \"int64\", \"c128\": \"int64\", \"c129\": \"int64\", \"c130\": \"int64\", \"c131\": \"int64\", \"c132\": \"int64\", \"c133\": \"int64\", \"c134\": \"int64\", \"c135\": \"int64\", \"c136\": \"int64\", \"c137\": \"int64\", \"c138\": \"int64\", \"c139\": \"int64\", \"c140\": \"int64\", \"c141\": \"int64\", \"c142\": \"int64\", \"c143\": \"int64\", \"c144\": \"int64\", \"c145\": \"int64\", \"c146\": \"int64\", \"c147\": \"int64\", \"c148\": \"int64\", \"c149\": \"int64\", \"c150\": \"int64\", \"c151\": \"int64\", \"c152\": \"int64\", \"c153\": \"int64\", \"c154\": \"int64\", \"c155\": \"int64\", \"c156\": \"int64\", \"c157\": \"int64\", \"c158\": \"int64\", \"c159\": \"int64\", \"c160\": \"int64\", \"c161\": \"int64\", \"c162\": \"int64\", \"c163\": \"int64\", \"c164\": \"int64\", \"c165\": \"int64\", \"c166\": \"int64\", \"c167\": \"int64\", \"c168\": \"int64\", \"c169\": \"int64\", \"c170\": \"int64\", \"c171\": \"int64\", \"c172\": \"int64\", \"c173\": \"int64\", \"c174\": \"int64\", \"c175\": \"int64\", \"c176\": \"int64\", \"c177\": \"int64\", \"c178\": \"int64\", \"c179\": \"int64\", \"c180\": \"int64\", \"c181\": \"int64\", \"c182\": \"int64\", \"c183\": \"int64\", \"c184\": \"int64\", \"c185\": \"int64\", \"c186\": \"int64\", \"c187\": \"int64\", \"c188\": \"int64\", \"c189\": \"int64\", \"c190\": \"int64\", \"c191\": \"int64\", \"c192\": \"int64\", \"c193\": \"int64\", \"c194\": \"int64\", \"c195\": \"int64\", \"c196\": \"int64\", \"c197\": \"int64\", \"c198\": \"int64\", \"c199\": \"int64\", \"c200\": \"int64\", \"c201\": \"int64\", \"c202\": \"int64\", \"c203\": \"int64\", \"c204\": \"int64\", \"c205\": \"int64\", \"c206\": \"int64\", \"c207\": \"int64\", \"c208\": \"int64\", \"c209\": \"int64\", \"c210\": \"int64\", \"c211\": \"int64\", \"c212\": \"int64\", \"c213\": \"int64\", \"c214\": \"int64\", \"c215\": \"int64\", \"c216\": \"int64\", \"c217\": \"int64\", \"c218\": \"int64\", \"c219\": \"int64\", \"c220\": \"int64\", \"c221\": \"int64\", \"c222\": \"int64\", \"c223\": \"int64\", \"c224\": \"int64\", \"c225\": \"int64\", \"c226\": \"int64\", \"c227\": \"int64\", \"c228\": \"int64\", \"c229\": \"int64\", \"c230\": \"int64\", \"c231\": \"int64\", \"c232\": \"int64\", \"c233\": \"int64\", \"c234\": \"int64\", \"c235\": \"int64\", \"c236\": \"int64\", \"c237\": \"int64\", \"c238\": \"int64\", \"c239\": \"int64\", \"c240\": \"int64\", \"c241\": \"int64\", \"c242\": \"int64\", \"c243\": \"int64\", \"c244\": \"int64\", \"c245\": \"int64\", \"c246\": \"int64\", \"c247\": \"int64\", \"c248\": \"int64\", \"c249\": \"int64\", \"c250\": \"int64\", \"c251\": \"int64\", \"c252\": \"int64\", \"c253\": \"int64\", \"c254\": \"int64\", \"c255\": \"int64\", \"c256\": \"int64\", \"c257\": \"int64\", \"c258\": \"int64\", \"c259\": \"int64\", \"c260\": \"int64\", \"c261\": \"int64\", \"c262\": \"int64\", \"c263\": \"int64\", \"c264\": \"int64\", \"c265\": \"int64\", \"c266\": \"int64\", \"c267\": \"int64\", \"c268\": \"int64\", \"c269\": \"int64\", \"c270\": \"int64\", \"c271\": \"int64\", \"c272\": \"int64\", \"c273\": \"int64\", \"c274\": \"int64\", \"c275\": \"int64\", \"c276\": \"int64\", \"c277\": \"int64\", \"c278\": \"int64\", \"c279\": \"int64\", \"c280\": \"int64\", \"c281\": \"int64\", \"c282\": \"int64\", \"c283\": \"int64\", \"c284\": \"int64\", \"c285\": \"int64\", \"c286\": \"int64\", \"c287\": \"int64\", \"c288\": \"int64\", \"c289\": \"int64\", \"c290\": \"int64\", \"c291\": \"int64\", \"c292\": \"int64\", \"c293\": \"int64\", \"c294\": \"int64\", \"c295\": \"int64\", \"c296\": \"int64\", \"c297\": \"int64\", \"c298\": \"int64\", \"c299\": \"int64\", \"c300\": \"int64\", \"c301\": \"int64\", \"c302\": \"int64\", \"c303\": \"int64\", \"c304\": \"int64\", \"c305\": \"int64\", \"c306\": \"int64\", \"c307\": \"int64\", \"c308\": \"int64\", \"c309\": \"int64\", \"c310\": \"int64\", \"c311\": \"int64\", \"c312\": \"int64\", \"c313\": \"int64\", \"c314\": \"int64\", \"c315\": \"int64\", \"c316\": \"int64\", \"c317\": \"int64\", \"c318\": \"int64\", \"c319\": \"int64\", \"c320\": \"int64\", \"c321\": \"int64\", \"c322\": \"int64\", \"c323\": \"int64\", \"c324\": \"int64\", \"c325\": \"int64\", \"c326\": \"int64\", \"c327\": \"int64\", \"c328\": \"int64\", \"c329\": \"int64\", \"c330\": \"int64\", \"c331\": \"int64\", \"c332\": \"int64\", \"c333\": \"int64\", \"c334\": \"int64\", \"c335\": \"int64\", \"c336\": \"int64\", \"c337\": \"int64\", \"c338\": \"int64\", \"c339\": \"int64\", \"c340\": \"int64\", \"c341\": \"int64\", \"c342\": \"int64\", \"c343\": \"int64\", \"c344\": \"int64\", \"c345\": \"int64\", \"c346\": \"int64\", \"c347\": \"int64\", \"c348\": \"int64\", \"c349\": \"int64\", \"c350\": \"int64\", \"c351\": \"int64\", \"c352\": \"int64\", \"c353\": \"int64\", \"c354\": \"int64\", \"c355\": \"int64\", \"c356\": \"int64\", \"c357\": \"int64\", \"c358\": \"int64\", \"c359\": \"int64\", \"c360\": \"int64\", \"c361\": \"int64\", \"c362\": \"int64\", \"c363\": \"int64\", \"c364\": \"int64\", \"c365\": \"int64\", \"c366\": \"int64\", \"c367\": \"int64\", \"c368\": \"int64\", \"c369\": \"int64\", \"c370\": \"int64\", \"c371\": \"int64\", \"c372\": \"int64\", \"c373\": \"int64\", \"c374\": \"int64\", \"c375\": \"int64\", \"c376\": \"int64\", \"c377\": \"int64\", \"c378\": \"int64\", \"c379\": \"int64\", \"c380\": \"int64\", \"c381\": \"int64\", \"c382\": \"int64\", \"c383\": \"int64\", \"c384\": \"int64\", \"c385\": \"int64\", \"c386\": \"int64\", \"c387\": \"int64\", \"c388\": \"int64\", \"c389\": \"int64\", \"c390\": \"int64\", \"c391\": \"int64\", \"c392\": \"int64\", \"c393\": \"int64\", \"c394\": \"int64\", \"c395\": \"int64\", \"c396\": \"int64\", \"c397\": \"int64\", \"c398\": \"int64\", \"c399\": \"int64\", \"c400\": \"int64\", \"c401\": \"int64\", \"c402\": \"int64\", \"c403\": \"int64\", \"c404\": \"int64\", \"c405\": \"int64\", \"c406\": \"int64\", \"c407\": \"int64\", \"c408\": \"int64\", \"c409\": \"int64\", \"c410\": \"int64\", \"c411\": \"int64\", \"c412\": \"int64\", \"c413\": \"int64\", \"c414\": \"int64\", \"c415\": \"int64\", \"c416\": \"int64\", \"c417\": \"int64\", \"c418\": \"int64\", \"c419\": \"int64\", \"c420\": \"int64\", \"c421\": \"int64\", \"c422\": \"int64\", \"c423\": \"int64\", \"c424\": \"int64\", \"c425\": \"int64\", \"c426\": \"int64\", \"c427\": \"int64\", \"c428\": \"int64\", \"c429\": \"int64\", \"c430\": \"int64\", \"c431\": \"int64\", \"c432\": \"int64\", \"c433\": \"int64\", \"c434\": \"int64\", \"c435\": \"int64\", \"c436\": \"int64\", \"c437\": \"int64\", \"c438\": \"int64\", \"c439\": \"int64\", \"c440\": \"int64\", \"c441\": \"int64\", \"c442\": \"int64\", \"c443\": \"int64\", \"c444\": \"int64\", \"c445\": \"int64\", \"c446\": \"int64\", \"c447\": \"int64\", \"c448\": \"int64\", \"c449\": \"int64\", \"c450\": \"int64\", \"c451\": \"int64\", \"c452\": \"int64\", \"c453\": \"int64\", \"c454\": \"int64\", \"c455\": \"int64\", \"c456\": \"int64\", \"c457\": \"int64\", \"c458\": \"int64\", \"c459\": \"int64\", \"c460\": \"int64\", \"c461\": \"int64\", \"c462\": \"int64\", \"c463\": \"int64\", \"c464\": \"int64\", \"c465\": \"int64\", \"c466\": \"int64\", \"c467\": \"int64\", \"c468\": \"int64\", \"c469\": \"int64\", \"c470\": \"int64\", \"c471\": \"int64\", \"c472\": \"int64\", \"c473\": \"int64\", \"c474\": \"int64\", \"c475\": \"int64\", \"c476\": \"int64\", \"c477\": \"int64\", \"c478\": \"int64\", \"c479\": \"int64\", \"c480\": \"int64\", \"c481\": \"int64\", \"c482\": \"int64\", \"c483\": \"int64\", \"c484\": \"int64\", \"c485\": \"int64\", \"c486\": \"int64\", \"c487\": \"int64\", \"c488\": \"int64\", \"c489\": \"int64\", \"c490\": \"int64\", \"c491\": \"int64\", \"c492\": \"int64\", \"c493\": \"int64\", \"c494\": \"int64\", \"c495\": \"int64\", \"c496\": \"int64\", \"c497\": \"int64\", \"c498\": \"int64\", \"c499\": \"int64\", \"c500\": \"int64\", \"c501\": \"int64\", \"c502\": \"int64\", \"c503\": \"int64\", \"c504\": \"int64\", \"c505\": \"int64\", \"c506\": \"int64\", \"c507\": \"int64\", \"c508\": \"int64\", \"c509\": \"int64\", \"c510\": \"int64\", \"c511\": \"int64\", \"c512\": \"int64\", \"c513\": \"int64\", \"c514\": \"int64\", \"c515\": \"int64\", \"c516\": \"int64\", \"c517\": \"int64\", \"c518\": \"int64\", \"c519\": \"int64\", \"c520\": \"int64\", \"c521\": \"int64\", \"c522\": \"int64\", \"c523\": \"int64\", \"c524\": \"int64\", \"c525\": \"int64\", \"c526\": \"int64\", \"c527\": \"int64\", \"c528\": \"int64\", \"c529\": \"int64\", \"c530\": \"int64\", \"c531\": \"int64\", \"c532\": \"int64\", \"c533\": \"int64\", \"c534\": \"int64\", \"c535\": \"int64\", \"c536\": \"int64\", \"c537\": \"int64\", \"c538\": \"int64\", \"c539\": \"int64\", \"c540\": \"int64\", \"c541\": \"int64\", \"c542\": \"int64\", \"c543\": \"int64\", \"c544\": \"int64\", \"c545\": \"int64\", \"c546\": \"int64\", \"c547\": \"int64\", \"c548\": \"int64\", \"c549\": \"int64\", \"c550\": \"int64\", \"c551\": \"int64\", \"c552\": \"int64\", \"c553\": \"int64\", \"c554\": \"int64\", \"c555\": \"int64\", \"c556\": \"int64\", \"c557\": \"int64\", \"c558\": \"int64\", \"c559\": \"int64\", \"c560\": \"int64\", \"c561\": \"int64\", \"c562\": \"int64\", \"c563\": \"int64\", \"c564\": \"int64\", \"c565\": \"int64\", \"c566\": \"int64\", \"c567\": \"int64\", \"c568\": \"int64\", \"c569\": \"int64\", \"c570\": \"int64\", \"c571\": \"int64\", \"c572\": \"int64\", \"c573\": \"int64\", \"c574\": \"int64\", \"c575\": \"int64\", \"c576\": \"int64\", \"c577\": \"int64\", \"c578\": \"int64\", \"c579\": \"int64\", \"c580\": \"int64\", \"c581\": \"int64\", \"c582\": \"int64\", \"c583\": \"int64\", \"c584\": \"int64\", \"c585\": \"int64\", \"c586\": \"int64\", \"c587\": \"int64\", \"c588\": \"int64\", \"c589\": \"int64\", \"c590\": \"int64\", \"c591\": \"int64\", \"c592\": \"int64\", \"c593\": \"int64\", \"c594\": \"int64\", \"c595\": \"int64\", \"c596\": \"int64\", \"c597\": \"int64\", \"c598\": \"int64\", \"c599\": \"int64\", \"c600\": \"int64\", \"c601\": \"int64\", \"c602\": \"int64\", \"c603\": \"int64\", \"c604\": \"int64\", \"c605\": \"int64\", \"c606\": \"int64\", \"c607\": \"int64\", \"c608\": \"int64\", \"c609\": \"int64\", \"c610\": \"int64\", \"c611\": \"int64\", \"c612\": \"int64\", \"c613\": \"int64\", \"c614\": \"int64\", \"c615\": \"int64\", \"c616\": \"int64\", \"c617\": \"int64\", \"c618\": \"int64\", \"c619\": \"int64\", \"c620\": \"int64\", \"c621\": \"int64\", \"c622\": \"int64\", \"c623\": \"int64\", \"c624\": \"int64\", \"c625\": \"int64\", \"c626\": \"int64\", \"c627\": \"int64\", \"c628\": \"int64\", \"c629\": \"int64\", \"c630\": \"int64\", \"c631\": \"int64\", \"c632\": \"int64\", \"c633\": \"int64\", \"c634\": \"int64\", \"c635\": \"int64\", \"c636\": \"int64\", \"c637\": \"int64\", \"c638\": \"int64\", \"c639\": \"int64\", \"c640\": \"int64\", \"c641\": \"int64\", \"c642\": \"int64\", \"c643\": \"int64\", \"c644\": \"int64\", \"c645\": \"int64\", \"c646\": \"int64\", \"c647\": \"int64\", \"c648\": \"int64\", \"c649\": \"int64\", \"c650\": \"int64\", \"c651\": \"int64\", \"c652\": \"int64\", \"c653\": \"int64\", \"c654\": \"int64\", \"c655\": \"int64\", \"c656\": \"int64\", \"c657\": \"int64\", \"c658\": \"int64\", \"c659\": \"int64\", \"c660\": \"int64\", \"c661\": \"int64\", \"c662\": \"int64\", \"c663\": \"int64\", \"c664\": \"int64\", \"c665\": \"int64\", \"c666\": \"int64\", \"c667\": \"int64\", \"c668\": \"int64\", \"c669\": \"int64\", \"c670\": \"int64\", \"c671\": \"int64\", \"c672\": \"int64\", \"c673\": \"int64\", \"c674\": \"int64\", \"c675\": \"int64\", \"c676\": \"int64\", \"c677\": \"int64\", \"c678\": \"int64\", \"c679\": \"int64\", \"c680\": \"int64\", \"c681\": \"int64\", \"c682\": \"int64\", \"c683\": \"int64\", \"c684\": \"int64\", \"c685\": \"int64\", \"c686\": \"int64\", \"c687\": \"int64\", \"c688\": \"int64\", \"c689\": \"int64\", \"c690\": \"int64\", \"c691\": \"int64\", \"c692\": \"int64\", \"c693\": \"int64\", \"c694\": \"int64\", \"c695\": \"int64\", \"c696\": \"int64\", \"c697\": \"int64\", \"c698\": \"int64\", \"c699\": \"int64\", \"c700\": \"int64\", \"c701\": \"int64\", \"c702\": \"int64\", \"c703\": \"int64\", \"c704\": \"int64\", \"c705\": \"int64\", \"c706\": \"int64\", \"c707\": \"int64\", \"c708\": \"int64\", \"c709\": \"int64\", \"c710\": \"int64\", \"c711\": \"int64\", \"c712\": \"int64\", \"c713\": \"int64\", \"c714\": \"int64\", \"c715\": \"int64\", \"c716\": \"int64\", \"c717\": \"int64\", \"c718\": \"int64\", \"c719\": \"int64\", \"c720\": \"int64\", \"c721\": \"int64\", \"c722\": \"int64\", \"c723\": \"int64\", \"c724\": \"int64\", \"c725\": \"int64\", \"c726\": \"int64\", \"c727\": \"int64\", \"c728\": \"int64\", \"c729\": \"int64\", \"c730\": \"int64\", \"c731\": \"int64\", \"c732\": \"int64\", \"c733\": \"int64\", \"c734\": \"int64\", \"c735\": \"int64\", \"c736\": \"int64\", \"c737\": \"int64\", \"c738\": \"int64\", \"c739\": \"int64\", \"c740\": \"int64\", \"c741\": \"int64\", \"c742\": \"int64\", \"c743\": \"int64\", \"c744\": \"int64\", \"c745\": \"int64\", \"c746\": \"int64\", \"c747\": \"int64\", \"c748\": \"int64\", \"c749\": \"int64\", \"c750\": \"int64\", \"c751\": \"int64\", \"c752\": \"int64\", \"c753\": \"int64\", \"c754\": \"int64\", \"c755\": \"int64\", \"c756\": \"int64\", \"c757\": \"int64\", \"c758\": \"int64\", \"c759\": \"int64\", \"c760\": \"int64\", \"c761\": \"int64\", \"c762\": \"int64\", \"c763\": \"int64\", \"c764\": \"int64\", \"c765\": \"int64\", \"c766\": \"int64\", \"c767\": \"int64\", \"c768\": \"int64\", \"c769\": \"int64\", \"c770\": \"int64\", \"c771\": \"int64\", \"c772\": \"int64\", \"c773\": \"int64\", \"c774\": \"int64\", \"c775\": \"int64\", \"c776\": \"int64\", \"c777\": \"int64\", \"c778\": \"int64\", \"c779\": \"int64\", \"c780\": \"int64\", \"c781\": \"int64\", \"c782\": \"int64\", \"c783\": \"int64\", \"c784\": \"int64\", \"c785\": \"int64\", \"c786\": \"int64\", \"c787\": \"int64\", \"c788\": \"int64\", \"c789\": \"int64\", \"c790\": \"int64\", \"c791\": \"int64\", \"c792\": \"int64\", \"c793\": \"int64\", \"c794\": \"int64\", \"c795\": \"int64\", \"c796\": \"int64\", \"c797\": \"int64\", \"c798\": \"int64\", \"c799\": \"int64\", \"c800\": \"int64\", \"c801\": \"int64\", \"c802\": \"int64\", \"c803\": \"int64\", \"c804\": \"int64\", \"c805\": \"int64\", \"c806\": \"int64\", \"c807\": \"int64\", \"c808\": \"int64\", \"c809\": \"int64\", \"c810\": \"int64\", \"c811\": \"int64\", \"c812\": \"int64\", \"c813\": \"int64\", \"c814\": \"int64\", \"c815\": \"int64\", \"c816\": \"int64\", \"c817\": \"int64\", \"c818\": \"int64\", \"c819\": \"int64\", \"c820\": \"int64\", \"c821\": \"int64\", \"c822\": \"int64\", \"c823\": \"int64\", \"c824\": \"int64\", \"c825\": \"int64\", \"c826\": \"int64\", \"c827\": \"int64\", \"c828\": \"int64\", \"c829\": \"int64\", \"c830\": \"int64\", \"c831\": \"int64\", \"c832\": \"int64\", \"c833\": \"int64\", \"c834\": \"int64\", \"c835\": \"int64\", \"c836\": \"int64\", \"c837\": \"int64\", \"c838\": \"int64\", \"c839\": \"int64\", \"c840\": \"int64\", \"c841\": \"int64\", \"c842\": \"int64\", \"c843\": \"int64\", \"c844\": \"int64\", \"c845\": \"int64\", \"c846\": \"int64\", \"c847\": \"int64\", \"c848\": \"int64\", \"c849\": \"int64\", \"c850\": \"int64\", \"c851\": \"int64\", \"c852\": \"int64\", \"c853\": \"int64\", \"c854\": \"int64\", \"c855\": \"int64\", \"c856\": \"int64\", \"c857\": \"int64\", \"c858\": \"int64\", \"c859\": \"int64\", \"c860\": \"int64\", \"c861\": \"int64\", \"c862\": \"int64\", \"c863\": \"int64\", \"c864\": \"int64\", \"c865\": \"int64\", \"c866\": \"int64\", \"c867\": \"int64\", \"c868\": \"int64\", \"c869\": \"int64\", \"c870\": \"int64\", \"c871\": \"int64\", \"c872\": \"int64\", \"c873\": \"int64\", \"c874\": \"int64\", \"c875\": \"int64\", \"c876\": \"int64\", \"c877\": \"int64\", \"c878\": \"int64\", \"c879\": \"int64\", \"c880\": \"int64\", \"c881\": \"int64\", \"c882\": \"int64\", \"c883\": \"int64\", \"c884\": \"int64\", \"c885\": \"int64\", \"c886\": \"int64\", \"c887\": \"int64\", \"c888\": \"int64\", \"c889\": \"int64\", \"c890\": \"int64\", \"c891\": \"int64\", \"c892\": \"int64\", \"c893\": \"int64\", \"c894\": \"int64\", \"c895\": \"int64\", \"c896\": \"int64\", \"c897\": \"int64\", \"c898\": \"int64\", \"c899\": \"int64\", \"c900\": \"int64\", \"c901\": \"int64\", \"c902\": \"int64\", \"c903\": \"int64\", \"c904\": \"int64\", \"c905\": \"int64\", \"c906\": \"int64\", \"c907\": \"int64\", \"c908\": \"int64\", \"c909\": \"int64\", \"c910\": \"int64\", \"c911\": \"int64\", \"c912\": \"int64\", \"c913\": \"int64\", \"c914\": \"int64\", \"c915\": \"int64\", \"c916\": \"int64\", \"c917\": \"int64\", \"c918\": \"int64\", \"c919\": \"int64\", \"c920\": \"int64\", \"c921\": \"int64\", \"c922\": \"int64\", \"c923\": \"int64\", \"c924\": \"int64\", \"c925\": \"int64\", \"c926\": \"int64\", \"c927\": \"int64\", \"c928\": \"int64\", \"c929\": \"int64\", \"c930\": \"int64\", \"c931\": \"int64\", \"c932\": \"int64\", \"c933\": \"int64\", \"c934\": \"int64\", \"c935\": \"int64\", \"c936\": \"int64\", \"c937\": \"int64\", \"c938\": \"int64\", \"c939\": \"int64\", \"c940\": \"int64\", \"c941\": \"int64\", \"c942\": \"int64\", \"c943\": \"int64\", \"c944\": \"int64\", \"c945\": \"int64\", \"c946\": \"int64\", \"c947\": \"int64\", \"c948\": \"int64\", \"c949\": \"int64\", \"c950\": \"int64\", \"c951\": \"int64\", \"c952\": \"int64\", \"c953\": \"int64\", \"c954\": \"int64\", \"c955\": \"int64\", \"c956\": \"int64\", \"c957\": \"int64\", \"c958\": \"int64\", \"c959\": \"int64\", \"c960\": \"int64\", \"c961\": \"int64\", \"c962\": \"int64\", \"c963\": \"int64\", \"c964\": \"int64\", \"c965\": \"int64\", \"c966\": \"int64\", \"c967\": \"int64\", \"c968\": \"int64\", \"c969\": \"int64\", \"c970\": \"int64\", \"c971\": \"int64\", \"c972\": \"int64\", \"c973\": \"int64\", \"c974\": \"int64\", \"c975\": \"int64\", \"c976\": \"int64\", \"c977\": \"int64\", \"c978\": \"int64\", \"c979\": \"int64\", \"c980\": \"int64\", \"c981\": \"int64\", \"c982\": \"int64\", \"c983\": \"int64\", \"c984\": \"int64\", \"c985\": \"int64\", \"c986\": \"int64\", \"c987\": \"int64\", \"c988\": \"int64\", \"c989\": \"int64\", \"c990\": \"int64\", \"c991\": \"int64\", \"c992\": \"int64\", \"c993\": \"int64\", \"c994\": \"int64\", \"c995\": \"int64\", \"c996\": \"int64\", \"c997\": \"int64\", \"c998\": \"int64\", \"c999\": \"int64\", \"c1000\": \"int64\", \"c1001\": \"int64\", \"c1002\": \"int64\", \"c1003\": \"int64\", \"c1004\": \"int64\", \"c1005\": \"int64\", \"c1006\": \"int64\", \"c1007\": \"int64\", \"c1008\": \"int64\", \"c1009\": \"int64\", \"c1010\": \"int64\", \"c1011\": \"int64\", \"c1012\": \"int64\", \"c1013\": \"int64\", \"c1014\": \"int64\", \"c1015\": \"int64\", \"c1016\": \"int64\", \"c1017\": \"int64\", \"c1018\": \"int64\", \"c1019\": \"int64\", \"c1020\": \"int64\", \"c1021\": \"int64\", \"c1022\": \"int64\", \"c1023\": \"int64\", \"c1024\": \"int64\", \"c1025\": \"int64\", \"c1026\": \"int64\", \"c1027\": \"int64\", \"c1028\": \"int64\", \"c1029\": \"int64\", \"c1030\": \"int64\", \"c1031\": \"int64\", \"c1032\": \"int64\", \"c1033\": \"int64\", \"c1034\": \"int64\", \"c1035\": \"int64\", \"c1036\": \"int64\", \"c1037\": \"int64\", \"c1038\": \"int64\", \"c1039\": \"int64\", \"c1040\": \"int64\", \"c1041\": \"int64\", \"c1042\": \"int64\", \"c1043\": \"int64\", \"c1044\": \"int64\", \"c1045\": \"int64\", \"c1046\": \"int64\", \"c1047\": \"int64\", \"c1048\": \"int64\", \"c1049\": \"int64\", \"c1050\": \"int64\", \"c1051\": \"int64\", \"c1052\": \"int64\", \"c1053\": \"int64\", \"c1054\": \"int64\", \"c1055\": \"int64\", \"c1056\": \"int64\", \"c1057\": \"int64\", \"c1058\": \"int64\", \"c1059\": \"int64\", \"c1060\": \"int64\", \"c1061\": \"int64\", \"c1062\": \"int64\", \"c1063\": \"int64\", \"c1064\": \"int64\", \"c1065\": \"int64\", \"c1066\": \"int64\", \"c1067\": \"int64\", \"c1068\": \"int64\", \"c1069\": \"int64\", \"c1070\": \"int64\", \"c1071\": \"int64\", \"c1072\": \"int64\", \"c1073\": \"int64\", \"c1074\": \"int64\", \"c1075\": \"int64\", \"c1076\": \"int64\", \"c1077\": \"int64\", \"c1078\": \"int64\", \"c1079\": \"int64\", \"c1080\": \"int64\", \"c1081\": \"int64\", \"c1082\": \"int64\", \"c1083\": \"int64\", \"c1084\": \"int64\", \"c1085\": \"int64\", \"c1086\": \"int64\", \"c1087\": \"int64\", \"c1088\": \"int64\", \"c1089\": \"int64\", \"c1090\": \"int64\", \"c1091\": \"int64\", \"c1092\": \"int64\", \"c1093\": \"int64\", \"c1094\": \"int64\", \"c1095\": \"int64\", \"c1096\": \"int64\", \"c1097\": \"int64\", \"c1098\": \"int64\", \"c1099\": \"int64\", \"c1100\": \"int64\", \"c1101\": \"int64\", \"c1102\": \"int64\", \"c1103\": \"int64\", \"c1104\": \"int64\", \"c1105\": \"int64\", \"c1106\": \"int64\", \"c1107\": \"int64\", \"c1108\": \"int64\", \"c1109\": \"int64\", \"c1110\": \"int64\", \"c1111\": \"int64\", \"c1112\": \"int64\", \"c1113\": \"int64\", \"c1114\": \"int64\", \"c1115\": \"int64\", \"c1116\": \"int64\", \"c1117\": \"int64\", \"c1118\": \"int64\", \"c1119\": \"int64\", \"c1120\": \"int64\", \"c1121\": \"int64\", \"c1122\": \"int64\", \"c1123\": \"int64\", \"c1124\": \"int64\", \"c1125\": \"int64\", \"c1126\": \"int64\", \"c1127\": \"int64\", \"c1128\": \"int64\", \"c1129\": \"int64\", \"c1130\": \"int64\", \"c1131\": \"int64\", \"c1132\": \"int64\", \"c1133\": \"int64\", \"c1134\": \"int64\", \"c1135\": \"int64\", \"c1136\": \"int64\", \"c1137\": \"int64\", \"c1138\": \"int64\", \"c1139\": \"int64\", \"c1140\": \"int64\", \"c1141\": \"int64\", \"c1142\": \"int64\", \"c1143\": \"int64\", \"c1144\": \"int64\", \"c1145\": \"int64\", \"c1146\": \"int64\", \"c1147\": \"int64\", \"c1148\": \"int64\", \"c1149\": \"int64\", \"c1150\": \"int64\", \"c1151\": \"int64\", \"c1152\": \"int64\", \"c1153\": \"int64\", \"c1154\": \"int64\", \"c1155\": \"int64\", \"c1156\": \"int64\", \"c1157\": \"int64\", \"c1158\": \"int64\", \"c1159\": \"int64\", \"c1160\": \"int64\", \"c1161\": \"int64\", \"c1162\": \"int64\", \"c1163\": \"int64\", \"c1164\": \"int64\", \"c1165\": \"int64\", \"c1166\": \"int64\", \"c1167\": \"int64\", \"c1168\": \"int64\", \"c1169\": \"int64\", \"c1170\": \"int64\", \"c1171\": \"int64\", \"c1172\": \"int64\", \"c1173\": \"int64\", \"c1174\": \"int64\", \"c1175\": \"int64\", \"c1176\": \"int64\", \"c1177\": \"int64\", \"c1178\": \"int64\", \"c1179\": \"int64\", \"c1180\": \"int64\", \"c1181\": \"int64\", \"c1182\": \"int64\", \"c1183\": \"int64\", \"c1184\": \"int64\", \"c1185\": \"int64\", \"c1186\": \"int64\", \"c1187\": \"int64\", \"c1188\": \"int64\", \"c1189\": \"int64\", \"c1190\": \"int64\", \"c1191\": \"int64\", \"c1192\": \"int64\", \"c1193\": \"int64\", \"c1194\": \"int64\", \"c1195\": \"int64\", \"c1196\": \"int64\", \"c1197\": \"int64\", \"c1198\": \"int64\", \"c1199\": \"int64\", \"c1200\": \"int64\", \"c1201\": \"int64\", \"c1202\": \"int64\", \"c1203\": \"int64\", \"c1204\": \"int64\", \"c1205\": \"int64\", \"c1206\": \"int64\", \"c1207\": \"int64\", \"c1208\": \"int64\", \"c1209\": \"int64\", \"c1210\": \"int64\", \"c1211\": \"int64\", \"c1212\": \"int64\", \"c1213\": \"int64\", \"c1214\": \"int64\", \"c1215\": \"int64\", \"c1216\": \"int64\", \"c1217\": \"int64\", \"c1218\": \"int64\", \"c1219\": \"int64\", \"c1220\": \"int64\", \"c1221\": \"int64\", \"c1222\": \"int64\", \"c1223\": \"int64\", \"c1224\": \"int64\", \"c1225\": \"int64\", \"c1226\": \"int64\", \"c1227\": \"int64\", \"c1228\": \"int64\", \"c1229\": \"int64\", \"c1230\": \"int64\", \"c1231\": \"int64\", \"c1232\": \"int64\", \"c1233\": \"int64\", \"c1234\": \"int64\", \"c1235\": \"int64\", \"c1236\": \"int64\", \"c1237\": \"int64\", \"c1238\": \"int64\", \"c1239\": \"int64\", \"c1240\": \"int64\", \"c1241\": \"int64\", \"c1242\": \"int64\", \"c1243\": \"int64\", \"c1244\": \"int64\", \"c1245\": \"int64\", \"c1246\": \"int64\", \"c1247\": \"int64\", \"c1248\": \"int64\", \"c1249\": \"int64\", \"c1250\": \"int64\", \"c1251\": \"int64\", \"c1252\": \"int64\", \"c1253\": \"int64\", \"c1254\": \"int64\", \"c1255\": \"int64\", \"c1256\": \"int64\", \"c1257\": \"int64\", \"c1258\": \"int64\", \"c1259\": \"int64\", \"c1260\": \"int64\", \"c1261\": \"int64\", \"c1262\": \"int64\", \"c1263\": \"int64\", \"c1264\": \"int64\", \"c1265\": \"int64\", \"c1266\": \"int64\", \"c1267\": \"int64\", \"c1268\": \"int64\", \"c1269\": \"int64\", \"c1270\": \"int64\", \"c1271\": \"int64\", \"c1272\": \"int64\", \"c1273\": \"int64\", \"c1274\": \"int64\", \"c1275\": \"int64\", \"c1276\": \"int64\", \"c1277\": \"int64\", \"c1278\": \"int64\", \"c1279\": \"int64\", \"c1280\": \"int64\", \"c1281\": \"int64\", \"c1282\": \"int64\", \"c1283\": \"int64\", \"c1284\": \"int64\", \"c1285\": \"int64\", \"c1286\": \"int64\", \"c1287\": \"int64\", \"c1288\": \"int64\", \"c1289\": \"int64\", \"c1290\": \"int64\", \"c1291\": \"int64\", \"c1292\": \"int64\", \"c1293\": \"int64\", \"c1294\": \"int64\", \"c1295\": \"int64\", \"c1296\": \"int64\", \"c1297\": \"int64\", \"c1298\": \"int64\", \"c1299\": \"int64\", \"c1300\": \"int64\", \"c1301\": \"int64\", \"c1302\": \"int64\", \"c1303\": \"int64\", \"c1304\": \"int64\", \"c1305\": \"int64\", \"c1306\": \"int64\", \"c1307\": \"int64\", \"c1308\": \"int64\", \"c1309\": \"int64\", \"c1310\": \"int64\", \"c1311\": \"int64\", \"c1312\": \"int64\", \"c1313\": \"int64\", \"c1314\": \"int64\", \"c1315\": \"int64\", \"c1316\": \"int64\", \"c1317\": \"int64\", \"c1318\": \"int64\", \"c1319\": \"int64\", \"c1320\": \"int64\", \"c1321\": \"int64\", \"c1322\": \"int64\", \"c1323\": \"int64\", \"c1324\": \"int64\", \"c1325\": \"int64\", \"c1326\": \"int64\", \"c1327\": \"int64\", \"c1328\": \"int64\", \"c1329\": \"int64\", \"c1330\": \"int64\", \"c1331\": \"int64\", \"c1332\": \"int64\", \"c1333\": \"int64\", \"c1334\": \"int64\", \"c1335\": \"int64\", \"c1336\": \"int64\", \"c1337\": \"int64\", \"c1338\": \"int64\", \"c1339\": \"int64\", \"c1340\": \"int64\", \"c1341\": \"int64\", \"c1342\": \"int64\", \"c1343\": \"int64\", \"c1344\": \"int64\", \"c1345\": \"int64\", \"c1346\": \"int64\", \"c1347\": \"int64\", \"c1348\": \"int64\", \"c1349\": \"int64\", \"c1350\": \"int64\", \"c1351\": \"int64\", \"c1352\": \"int64\", \"c1353\": \"int64\", \"c1354\": \"int64\", \"c1355\": \"int64\", \"c1356\": \"int64\", \"c1357\": \"int64\", \"c1358\": \"int64\", \"c1359\": \"int64\", \"c1360\": \"int64\", \"c1361\": \"int64\", \"c1362\": \"int64\", \"c1363\": \"int64\", \"c1364\": \"int64\", \"c1365\": \"int64\", \"c1366\": \"int64\", \"c1367\": \"int64\", \"c1368\": \"int64\", \"c1369\": \"int64\", \"c1370\": \"int64\", \"c1371\": \"int64\", \"c1372\": \"int64\", \"c1373\": \"int64\", \"c1374\": \"int64\", \"c1375\": \"int64\", \"c1376\": \"int64\", \"c1377\": \"int64\", \"c1378\": \"int64\", \"c1379\": \"int64\", \"c1380\": \"int64\", \"c1381\": \"int64\", \"c1382\": \"int64\", \"c1383\": \"int64\", \"c1384\": \"int64\", \"c1385\": \"int64\", \"c1386\": \"int64\", \"c1387\": \"int64\", \"c1388\": \"int64\", \"c1389\": \"int64\", \"c1390\": \"int64\", \"c1391\": \"int64\", \"c1392\": \"int64\", \"c1393\": \"int64\", \"c1394\": \"int64\", \"c1395\": \"int64\", \"c1396\": \"int64\", \"c1397\": \"int64\", \"c1398\": \"int64\", \"c1399\": \"int64\", \"c1400\": \"int64\", \"c1401\": \"int64\", \"c1402\": \"int64\", \"c1403\": \"int64\", \"c1404\": \"int64\", \"c1405\": \"int64\", \"c1406\": \"int64\", \"c1407\": \"int64\", \"c1408\": \"int64\", \"c1409\": \"int64\", \"c1410\": \"int64\", \"c1411\": \"int64\", \"c1412\": \"int64\", \"c1413\": \"int64\", \"c1414\": \"int64\", \"c1415\": \"int64\", \"c1416\": \"int64\", \"c1417\": \"int64\", \"c1418\": \"int64\", \"c1419\": \"int64\", \"c1420\": \"int64\", \"c1421\": \"int64\", \"c1422\": \"int64\", \"c1423\": \"int64\", \"c1424\": \"int64\", \"c1425\": \"int64\", \"c1426\": \"int64\", \"c1427\": \"int64\", \"c1428\": \"int64\", \"c1429\": \"int64\", \"c1430\": \"int64\", \"c1431\": \"int64\", \"c1432\": \"int64\", \"c1433\": \"int64\", \"c1434\": \"int64\", \"c1435\": \"int64\", \"c1436\": \"int64\", \"c1437\": \"int64\", \"c1438\": \"int64\", \"c1439\": \"int64\", \"c1440\": \"int64\", \"c1441\": \"int64\", \"c1442\": \"int64\", \"c1443\": \"int64\", \"c1444\": \"int64\", \"c1445\": \"int64\", \"c1446\": \"int64\", \"c1447\": \"int64\", \"c1448\": \"int64\", \"c1449\": \"int64\", \"c1450\": \"int64\", \"c1451\": \"int64\", \"c1452\": \"int64\", \"c1453\": \"int64\", \"c1454\": \"int64\", \"c1455\": \"int64\", \"c1456\": \"int64\", \"c1457\": \"int64\", \"c1458\": \"int64\", \"c1459\": \"int64\", \"c1460\": \"int64\", \"c1461\": \"int64\", \"c1462\": \"int64\", \"c1463\": \"int64\", \"c1464\": \"int64\", \"c1465\": \"int64\", \"c1466\": \"int64\", \"c1467\": \"int64\", \"c1468\": \"int64\", \"c1469\": \"int64\", \"c1470\": \"int64\", \"c1471\": \"int64\", \"c1472\": \"int64\", \"c1473\": \"int64\", \"c1474\": \"int64\", \"c1475\": \"int64\", \"c1476\": \"int64\", \"c1477\": \"int64\", \"c1478\": \"int64\", \"c1479\": \"int64\", \"c1480\": \"int64\", \"c1481\": \"int64\", \"c1482\": \"int64\", \"c1483\": \"int64\", \"c1484\": \"int64\", \"c1485\": \"int64\", \"c1486\": \"int64\", \"c1487\": \"int64\", \"c1488\": \"int64\", \"c1489\": \"int64\", \"c1490\": \"int64\", \"c1491\": \"int64\", \"c1492\": \"int64\", \"c1493\": \"int64\", \"c1494\": \"int64\", \"c1495\": \"int64\", \"c1496\": \"int64\", \"c1497\": \"int64\", \"c1498\": \"int64\", \"c1499\": \"int64\", \"c1500\": \"int64\", \"c1501\": \"int64\", \"c1502\": \"int64\", \"c1503\": \"int64\", \"c1504\": \"int64\", \"c1505\": \"int64\", \"c1506\": \"int64\", \"c1507\": \"int64\", \"c1508\": \"int64\", \"c1509\": \"int64\", \"c1510\": \"int64\", \"c1511\": \"int64\", \"c1512\": \"int64\", \"c1513\": \"int64\", \"c1514\": \"int64\", \"c1515\": \"int64\", \"c1516\": \"int64\", \"c1517\": \"int64\", \"c1518\": \"int64\", \"c1519\": \"int64\", \"c1520\": \"int64\", \"c1521\": \"int64\", \"c1522\": \"int64\", \"c1523\": \"int64\", \"c1524\": \"int64\", \"c1525\": \"int64\", \"c1526\": \"int64\", \"c1527\": \"int64\", \"c1528\": \"int64\", \"c1529\": \"int64\", \"c1530\": \"int64\", \"c1531\": \"int64\", \"c1532\": \"int64\", \"c1533\": \"int64\", \"c1534\": \"int64\", \"c1535\": \"int64\", \"c1536\": \"int64\", \"c1537\": \"int64\", \"c1538\": \"int64\", \"c1539\": \"int64\", \"c1540\": \"int64\", \"c1541\": \"int64\", \"c1542\": \"int64\", \"c1543\": \"int64\", \"c1544\": \"int64\", \"c1545\": \"int64\", \"c1546\": \"int64\", \"c1547\": \"int64\", \"c1548\": \"int64\", \"c1549\": \"int64\", \"c1550\": \"int64\", \"c1551\": \"int64\", \"c1552\": \"int64\", \"c1553\": \"int64\", \"c1554\": \"int64\", \"c1555\": \"int64\", \"c1556\": \"int64\", \"c1557\": \"int64\", \"c1558\": \"int64\", \"c1559\": \"int64\", \"c1560\": \"int64\", \"c1561\": \"int64\", \"c1562\": \"int64\", \"c1563\": \"int64\", \"c1564\": \"int64\", \"c1565\": \"int64\", \"c1566\": \"int64\", \"c1567\": \"int64\", \"c1568\": \"int64\", \"c1569\": \"int64\", \"c1570\": \"int64\", \"c1571\": \"int64\", \"c1572\": \"int64\", \"c1573\": \"int64\", \"c1574\": \"int64\", \"c1575\": \"int64\", \"c1576\": \"int64\", \"c1577\": \"int64\", \"c1578\": \"int64\", \"c1579\": \"int64\", \"c1580\": \"int64\", \"c1581\": \"int64\", \"c1582\": \"int64\", \"c1583\": \"int64\", \"c1584\": \"int64\", \"c1585\": \"int64\", \"c1586\": \"int64\", \"c1587\": \"int64\", \"c1588\": \"int64\", \"c1589\": \"int64\", \"c1590\": \"int64\", \"c1591\": \"int64\", \"c1592\": \"int64\", \"c1593\": \"int64\", \"c1594\": \"int64\", \"c1595\": \"int64\", \"c1596\": \"int64\", \"c1597\": \"int64\", \"c1598\": \"int64\", \"c1599\": \"int64\", \"c1600\": \"int64\", \"c1601\": \"int64\", \"c1602\": \"int64\", \"c1603\": \"int64\", \"c1604\": \"int64\", \"c1605\": \"int64\", \"c1606\": \"int64\", \"c1607\": \"int64\", \"c1608\": \"int64\", \"c1609\": \"int64\", \"c1610\": \"int64\", \"c1611\": \"int64\", \"c1612\": \"int64\", \"c1613\": \"int64\", \"c1614\": \"int64\", \"c1615\": \"int64\", \"c1616\": \"int64\", \"c1617\": \"int64\", \"c1618\": \"int64\", \"c1619\": \"int64\", \"c1620\": \"int64\", \"c1621\": \"int64\", \"c1622\": \"int64\", \"c1623\": \"int64\", \"c1624\": \"int64\", \"c1625\": \"int64\", \"c1626\": \"int64\", \"c1627\": \"int64\", \"c1628\": \"int64\", \"c1629\": \"int64\", \"c1630\": \"int64\", \"c1631\": \"int64\", \"c1632\": \"int64\", \"c1633\": \"int64\", \"c1634\": \"int64\", \"c1635\": \"int64\", \"c1636\": \"int64\", \"c1637\": \"int64\", \"c1638\": \"int64\", \"c1639\": \"int64\", \"c1640\": \"int64\", \"c1641\": \"int64\", \"c1642\": \"int64\", \"c1643\": \"int64\", \"c1644\": \"int64\", \"c1645\": \"int64\", \"c1646\": \"int64\", \"c1647\": \"int64\", \"c1648\": \"int64\", \"c1649\": \"int64\", \"c1650\": \"int64\", \"c1651\": \"int64\", \"c1652\": \"int64\", \"c1653\": \"int64\", \"c1654\": \"int64\", \"c1655\": \"int64\", \"c1656\": \"int64\", \"c1657\": \"int64\", \"c1658\": \"int64\", \"c1659\": \"int64\", \"c1660\": \"int64\", \"c1661\": \"int64\", \"c1662\": \"int64\", \"c1663\": \"int64\", \"c1664\": \"int64\", \"c1665\": \"int64\", \"c1666\": \"int64\", \"c1667\": \"int64\", \"c1668\": \"int64\", \"c1669\": \"int64\", \"c1670\": \"int64\", \"c1671\": \"int64\", \"c1672\": \"int64\", \"c1673\": \"int64\", \"c1674\": \"int64\", \"c1675\": \"int64\", \"c1676\": \"int64\", \"c1677\": \"int64\", \"c1678\": \"int64\", \"c1679\": \"int64\", \"c1680\": \"int64\", \"c1681\": \"int64\", \"c1682\": \"int64\", \"c1683\": \"int64\", \"c1684\": \"int64\", \"c1685\": \"int64\", \"c1686\": \"int64\", \"c1687\": \"int64\", \"c1688\": \"int64\", \"c1689\": \"int64\", \"c1690\": \"int64\", \"c1691\": \"int64\", \"c1692\": \"int64\", \"c1693\": \"int64\", \"c1694\": \"int64\", \"c1695\": \"int64\", \"c1696\": \"int64\", \"c1697\": \"int64\", \"c1698\": \"int64\", \"c1699\": \"int64\", \"c1700\": \"int64\", \"c1701\": \"int64\", \"c1702\": \"int64\", \"c1703\": \"int64\", \"c1704\": \"int64\", \"c1705\": \"int64\", \"c1706\": \"int64\", \"c1707\": \"int64\", \"c1708\": \"int64\", \"c1709\": \"int64\", \"c1710\": \"int64\", \"c1711\": \"int64\", \"c1712\": \"int64\", \"c1713\": \"int64\", \"c1714\": \"int64\", \"c1715\": \"int64\", \"c1716\": \"int64\", \"c1717\": \"int64\", \"c1718\": \"int64\", \"c1719\": \"int64\", \"c1720\": \"int64\", \"c1721\": \"int64\", \"c1722\": \"int64\", \"c1723\": \"int64\", \"c1724\": \"int64\", \"c1725\": \"int64\", \"c1726\": \"int64\", \"c1727\": \"int64\", \"c1728\": \"int64\", \"c1729\": \"int64\", \"c1730\": \"int64\", \"c1731\": \"int64\", \"c1732\": \"int64\", \"c1733\": \"int64\", \"c1734\": \"int64\", \"c1735\": \"int64\", \"c1736\": \"int64\", \"c1737\": \"int64\", \"c1738\": \"int64\", \"c1739\": \"int64\", \"c1740\": \"int64\", \"c1741\": \"int64\", \"c1742\": \"int64\", \"c1743\": \"int64\", \"c1744\": \"int64\", \"c1745\": \"int64\", \"c1746\": \"int64\", \"c1747\": \"int64\", \"c1748\": \"int64\", \"c1749\": \"int64\", \"c1750\": \"int64\", \"c1751\": \"int64\", \"c1752\": \"int64\", \"c1753\": \"int64\", \"c1754\": \"int64\", \"c1755\": \"int64\", \"c1756\": \"int64\", \"c1757\": \"int64\", \"c1758\": \"int64\", \"c1759\": \"int64\", \"c1760\": \"int64\", \"c1761\": \"int64\", \"c1762\": \"int64\", \"c1763\": \"int64\", \"c1764\": \"int64\", \"c1765\": \"int64\", \"c1766\": \"int64\", \"c1767\": \"int64\", \"c1768\": \"int64\", \"c1769\": \"int64\", \"c1770\": \"int64\", \"c1771\": \"int64\", \"c1772\": \"int64\", \"c1773\": \"int64\", \"c1774\": \"int64\", \"c1775\": \"int64\", \"c1776\": \"int64\", \"c1777\": \"int64\", \"c1778\": \"int64\", \"c1779\": \"int64\", \"c1780\": \"int64\", \"c1781\": \"int64\", \"c1782\": \"int64\", \"c1783\": \"int64\", \"c1784\": \"int64\", \"c1785\": \"int64\", \"c1786\": \"int64\", \"c1787\": \"int64\", \"c1788\": \"int64\", \"c1789\": \"int64\", \"c1790\": \"int64\", \"c1791\": \"int64\", \"c1792\": \"int64\", \"c1793\": \"int64\", \"c1794\": \"int64\", \"c1795\": \"int64\", \"c1796\": \"int64\", \"c1797\": \"int64\", \"c1798\": \"int64\", \"c1799\": \"int64\", \"c1800\": \"int64\", \"c1801\": \"int64\", \"c1802\": \"int64\", \"c1803\": \"int64\", \"c1804\": \"int64\", \"c1805\": \"int64\", \"c1806\": \"int64\", \"c1807\": \"int64\", \"c1808\": \"int64\", \"c1809\": \"int64\", \"c1810\": \"int64\", \"c1811\": \"int64\", \"c1812\": \"int64\", \"c1813\": \"int64\", \"c1814\": \"int64\", \"c1815\": \"int64\", \"c1816\": \"int64\", \"c1817\": \"int64\", \"c1818\": \"int64\", \"c1819\": \"int64\", \"c1820\": \"int64\", \"c1821\": \"int64\", \"c1822\": \"int64\", \"c1823\": \"int64\", \"c1824\": \"int64\", \"c1825\": \"int64\", \"c1826\": \"int64\", \"c1827\": \"int64\", \"c1828\": \"int64\", \"c1829\": \"int64\", \"c1830\": \"int64\", \"c1831\": \"int64\", \"c1832\": \"int64\", \"c1833\": \"int64\", \"c1834\": \"int64\", \"c1835\": \"int64\", \"c1836\": \"int64\", \"c1837\": \"int64\", \"c1838\": \"int64\", \"c1839\": \"int64\", \"c1840\": \"int64\", \"c1841\": \"int64\", \"c1842\": \"int64\", \"c1843\": \"int64\", \"c1844\": \"int64\", \"c1845\": \"int64\", \"c1846\": \"int64\", \"c1847\": \"int64\", \"c1848\": \"int64\", \"c1849\": \"int64\", \"c1850\": \"int64\", \"c1851\": \"int64\", \"c1852\": \"int64\", \"c1853\": \"int64\", \"c1854\": \"int64\", \"c1855\": \"int64\", \"c1856\": \"int64\", \"c1857\": \"int64\", \"c1858\": \"int64\", \"c1859\": \"int64\", \"c1860\": \"int64\", \"c1861\": \"int64\", \"c1862\": \"int64\", \"c1863\": \"int64\", \"c1864\": \"int64\", \"c1865\": \"int64\", \"c1866\": \"int64\", \"c1867\": \"int64\", \"c1868\": \"int64\", \"c1869\": \"int64\", \"c1870\": \"int64\", \"c1871\": \"int64\", \"c1872\": \"int64\", \"c1873\": \"int64\", \"c1874\": \"int64\", \"c1875\": \"int64\", \"c1876\": \"int64\", \"c1877\": \"int64\", \"c1878\": \"int64\", \"c1879\": \"int64\", \"c1880\": \"int64\", \"c1881\": \"int64\", \"c1882\": \"int64\", \"c1883\": \"int64\", \"c1884\": \"int64\", \"c1885\": \"int64\", \"c1886\": \"int64\", \"c1887\": \"int64\", \"c1888\": \"int64\", \"c1889\": \"int64\", \"c1890\": \"int64\", \"c1891\": \"int64\", \"c1892\": \"int64\", \"c1893\": \"int64\", \"c1894\": \"int64\", \"c1895\": \"int64\", \"c1896\": \"int64\", \"c1897\": \"int64\", \"c1898\": \"int64\", \"c1899\": \"int64\", \"c1900\": \"int64\", \"c1901\": \"int64\", \"c1902\": \"int64\", \"c1903\": \"int64\", \"c1904\": \"int64\", \"c1905\": \"int64\", \"c1906\": \"int64\", \"c1907\": \"int64\", \"c1908\": \"int64\", \"c1909\": \"int64\", \"c1910\": \"int64\", \"c1911\": \"int64\", \"c1912\": \"int64\", \"c1913\": \"int64\", \"c1914\": \"int64\", \"c1915\": \"int64\", \"c1916\": \"int64\", \"c1917\": \"int64\", \"c1918\": \"int64\", \"c1919\": \"int64\", \"c1920\": \"int64\", \"c1921\": \"int64\", \"c1922\": \"int64\", \"c1923\": \"int64\", \"c1924\": \"int64\", \"c1925\": \"int64\", \"c1926\": \"int64\", \"c1927\": \"int64\", \"c1928\": \"int64\", \"c1929\": \"int64\", \"c1930\": \"int64\", \"c1931\": \"int64\", \"c1932\": \"int64\", \"c1933\": \"int64\", \"c1934\": \"int64\", \"c1935\": \"int64\", \"c1936\": \"int64\", \"c1937\": \"int64\", \"c1938\": \"int64\", \"c1939\": \"int64\", \"c1940\": \"int64\", \"c1941\": \"int64\", \"c1942\": \"int64\", \"c1943\": \"int64\", \"c1944\": \"int64\", \"c1945\": \"int64\", \"c1946\": \"int64\", \"c1947\": \"int64\", \"c1948\": \"int64\", \"c1949\": \"int64\", \"c1950\": \"int64\", \"c1951\": \"int64\", \"c1952\": \"int64\", \"c1953\": \"int64\", \"c1954\": \"int64\", \"c1955\": \"int64\", \"c1956\": \"int64\", \"c1957\": \"int64\", \"c1958\": \"int64\", \"c1959\": \"int64\", \"c1960\": \"int64\", \"c1961\": \"int64\", \"c1962\": \"int64\", \"c1963\": \"int64\", \"c1964\": \"int64\", \"c1965\": \"int64\", \"c1966\": \"int64\", \"c1967\": \"int64\", \"c1968\": \"int64\", \"c1969\": \"int64\", \"c1970\": \"int64\", \"c1971\": \"int64\", \"c1972\": \"int64\", \"c1973\": \"int64\", \"c1974\": \"int64\", \"c1975\": \"int64\", \"c1976\": \"int64\", \"c1977\": \"int64\", \"c1978\": \"int64\", \"c1979\": \"int64\", \"c1980\": \"int64\", \"c1981\": \"int64\", \"c1982\": \"int64\", \"c1983\": \"int64\", \"c1984\": \"int64\", \"c1985\": \"int64\", \"c1986\": \"int64\", \"c1987\": \"int64\", \"c1988\": \"int64\", \"c1989\": \"int64\", \"c1990\": \"int64\", \"c1991\": \"int64\", \"c1992\": \"int64\", \"c1993\": \"int64\", \"c1994\": \"int64\", \"c1995\": \"int64\", \"c1996\": \"int64\", \"c1997\": \"int64\", \"c1998\": \"int64\", \"c1999\": \"int64\", \"c2000\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 10000 entries, 0 to 9999\nColumns: 2001 entries, Unnamed: 0 to c2000\ndtypes: int64(2000), object(1)\nmemory usage: 152.7+ MB\n", "summary": "{\"c1\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c2\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c3\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c4\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c5\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c6\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c7\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c8\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c9\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c10\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c11\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c12\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c13\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c14\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c15\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c16\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c17\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c18\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c19\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c20\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c21\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c22\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c23\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c24\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c25\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c26\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c27\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c28\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c29\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c30\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c31\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c32\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c33\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c34\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c35\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c36\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c37\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c38\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c39\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c40\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c41\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c42\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c43\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c44\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c45\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c46\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c47\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c48\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c49\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392178, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c50\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c51\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c52\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c53\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c54\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c55\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c56\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c57\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c58\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c59\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c60\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c61\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c62\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c63\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c64\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c65\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c66\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c67\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c68\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c69\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c70\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c71\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c72\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c73\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c74\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c75\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c76\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c77\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c78\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c79\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c80\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c81\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c82\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c83\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c84\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c85\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c86\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c87\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c88\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c89\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c90\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c91\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c92\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c93\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c94\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c95\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c96\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c97\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c98\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.0851318667919032, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c99\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c100\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c101\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c102\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c103\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392178, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c104\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c105\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c106\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c107\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c108\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c109\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c110\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c111\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c112\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c113\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c114\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c115\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c116\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c117\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c118\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c119\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c120\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c121\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c122\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c123\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c124\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c125\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c126\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c127\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c128\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c129\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c130\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c131\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c132\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c133\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c134\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c135\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c136\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c137\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c138\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c139\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c140\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c141\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c142\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c143\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c144\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c145\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c146\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c147\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c148\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c149\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c150\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c151\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c152\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c153\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c154\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c155\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c156\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c157\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c158\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c159\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c160\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.08036414523644209, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c161\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c162\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c163\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c164\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c165\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c166\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c167\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c168\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c169\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c170\": {\"count\": 10000.0, \"mean\": 0.0086, \"std\": 0.09234117548130372, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c171\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c172\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c173\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c174\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346915, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c175\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c176\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c177\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c178\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c179\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c180\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c181\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c182\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c183\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c184\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c185\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c186\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c187\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c188\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c189\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c190\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c191\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c192\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c193\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c194\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c195\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c196\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c197\": {\"count\": 10000.0, \"mean\": 0.0083, \"std\": 0.09073000161644071, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c198\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c199\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c200\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c201\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c202\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c203\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c204\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c205\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c206\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970884, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c207\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c208\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c209\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c210\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c211\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c212\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c213\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c214\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c215\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.0851318667919032, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c216\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c217\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c218\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c219\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c220\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c221\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c222\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c223\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c224\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c225\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c226\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c227\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c228\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c229\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c230\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c231\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c232\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c233\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c234\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c235\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c236\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c237\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c238\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c239\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c240\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c241\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c242\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c243\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c244\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346915, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c245\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c246\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c247\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c248\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c249\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c250\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c251\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c252\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c253\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c254\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c255\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c256\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c257\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c258\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c259\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c260\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c261\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c262\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c263\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c264\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c265\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c266\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c267\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c268\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c269\": {\"count\": 10000.0, \"mean\": 0.0082, \"std\": 0.09018632577800104, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c270\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c271\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c272\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c273\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c274\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c275\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c276\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c277\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c278\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c279\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c280\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c281\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209603, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c282\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c283\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c284\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c285\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c286\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c287\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c288\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c289\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c290\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c291\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c292\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086433, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c293\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c294\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c295\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c296\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c297\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c298\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c299\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c300\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c301\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c302\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c303\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c304\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c305\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c306\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c307\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c308\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c309\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c310\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c311\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c312\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c313\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c314\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c315\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c316\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c317\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c318\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c319\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c320\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c321\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c322\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c323\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c324\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c325\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c326\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c327\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c328\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c329\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c330\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c331\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c332\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c333\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c334\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c335\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c336\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c337\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c338\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c339\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c340\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c341\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c342\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c343\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c344\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c345\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c346\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c347\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c348\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c349\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c350\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c351\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c352\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c353\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c354\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c355\": {\"count\": 10000.0, \"mean\": 0.0084, \"std\": 0.09127032939188251, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c356\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c357\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c358\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c359\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c360\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c361\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c362\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c363\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c364\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c365\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c366\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c367\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c368\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c369\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c370\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c371\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c372\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c373\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c374\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c375\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c376\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c377\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c378\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c379\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.07786781324548651, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c380\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c381\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c382\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c383\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c384\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c385\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c386\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c387\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c388\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c389\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c390\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c391\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c392\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c393\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c394\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c395\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c396\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c397\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c398\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c399\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c400\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c401\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c402\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c403\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c404\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c405\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c406\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c407\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c408\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c409\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c410\": {\"count\": 10000.0, \"mean\": 0.0043, \"std\": 0.06543652033703642, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c411\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c412\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.07786781324548651, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c413\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c414\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c415\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c416\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c417\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c418\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c419\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c420\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c421\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c422\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c423\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c424\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c425\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c426\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c427\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c428\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c429\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c430\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c431\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c432\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c433\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c434\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c435\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c436\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c437\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c438\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c439\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c440\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c441\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c442\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c443\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c444\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c445\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c446\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c447\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c448\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c449\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c450\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c451\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c452\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c453\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c454\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c455\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c456\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c457\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c458\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c459\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c460\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c461\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c462\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c463\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.074627052197524, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c464\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c465\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c466\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c467\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c468\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c469\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c470\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c471\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c472\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c473\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c474\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c475\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c476\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c477\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c478\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c479\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c480\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c481\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c482\": {\"count\": 10000.0, \"mean\": 0.0089, \"std\": 0.09392375720347179, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c483\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c484\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c485\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c486\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c487\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c488\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c489\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c490\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c491\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c492\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c493\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c494\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c495\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c496\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c497\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c498\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c499\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c500\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c501\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c502\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c503\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c504\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c505\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c506\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c507\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c508\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c509\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c510\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c511\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c512\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c513\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c514\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c515\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c516\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c517\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c518\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c519\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c520\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c521\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c522\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c523\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c524\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c525\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c526\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c527\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c528\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c529\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c530\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c531\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c532\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c533\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c534\": {\"count\": 10000.0, \"mean\": 0.004, \"std\": 0.06312209153572135, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c535\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c536\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c537\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c538\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c539\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c540\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c541\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c542\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c543\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c544\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c545\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c546\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c547\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c548\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c549\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c550\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c551\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c552\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c553\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c554\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c555\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c556\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c557\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c558\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c559\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c560\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c561\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c562\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c563\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c564\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c565\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c566\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c567\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c568\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c569\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c570\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c571\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c572\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c573\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c574\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c575\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c576\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c577\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c578\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c579\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c580\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c581\": {\"count\": 10000.0, \"mean\": 0.004, \"std\": 0.06312209153572135, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c582\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c583\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c584\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c585\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c586\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c587\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c588\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c589\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c590\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c591\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c592\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c593\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c594\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c595\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c596\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c597\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c598\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c599\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c600\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c601\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c602\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c603\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c604\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c605\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c606\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c607\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c608\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c609\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c610\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c611\": {\"count\": 10000.0, \"mean\": 0.0082, \"std\": 0.09018632577800105, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c612\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c613\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c614\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c615\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c616\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c617\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c618\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c619\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c620\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c621\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c622\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c623\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c624\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c625\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c626\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c627\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c628\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c629\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c630\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209603, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c631\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c632\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c633\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c634\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c635\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c636\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c637\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c638\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c639\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c640\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c641\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c642\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c643\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c644\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c645\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c646\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c647\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c648\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c649\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c650\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c651\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c652\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c653\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c654\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c655\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c656\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c657\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c658\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c659\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c660\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c661\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c662\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c663\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c664\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c665\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c666\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c667\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c668\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c669\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c670\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c671\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c672\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c673\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c674\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c675\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c676\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c677\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c678\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c679\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c680\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c681\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c682\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c683\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c684\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c685\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c686\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c687\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c688\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c689\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c690\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c691\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c692\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c693\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c694\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c695\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c696\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c697\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c698\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c699\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c700\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c701\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c702\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c703\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c704\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c705\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c706\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c707\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c708\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c709\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c710\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c711\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c712\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.06618971300595572, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c713\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c714\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209603, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c715\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c716\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c717\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c718\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c719\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c720\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c721\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c722\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c723\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c724\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c725\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c726\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c727\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c728\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c729\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c730\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c731\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c732\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c733\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c734\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c735\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c736\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c737\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c738\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c739\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c740\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c741\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c742\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c743\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c744\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c745\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c746\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c747\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c748\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c749\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c750\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c751\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c752\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c753\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c754\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c755\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c756\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c757\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c758\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c759\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c760\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c761\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c762\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c763\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c764\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c765\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c766\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.0661897130059557, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c767\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c768\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c769\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c770\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c771\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190317, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c772\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c773\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c774\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c775\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c776\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c777\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c778\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c779\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c780\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c781\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c782\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c783\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c784\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c785\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c786\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c787\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c788\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c789\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c790\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c791\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c792\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c793\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c794\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c795\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c796\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c797\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c798\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c799\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c800\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c801\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c802\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c803\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c804\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c805\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c806\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c807\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c808\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c809\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c810\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c811\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c812\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c813\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c814\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c815\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c816\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c817\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c818\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c819\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c820\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c821\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c822\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c823\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c824\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c825\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c826\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c827\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c828\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c829\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c830\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c831\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c832\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c833\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c834\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c835\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c836\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c837\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c838\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c839\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c840\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c841\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c842\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c843\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c844\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c845\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c846\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649111, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c847\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c848\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c849\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c850\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c851\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c852\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c853\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.0661897130059557, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c854\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c855\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c856\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c857\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c858\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c859\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c860\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c861\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c862\": {\"count\": 10000.0, \"mean\": 0.0043, \"std\": 0.06543652033703642, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c863\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c864\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c865\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c866\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c867\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c868\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c869\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c870\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c871\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c872\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c873\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c874\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c875\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c876\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c877\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c878\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c879\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c880\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c881\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c882\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346915, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c883\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c884\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c885\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c886\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c887\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c888\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c889\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c890\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c891\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c892\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c893\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c894\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c895\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c896\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c897\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c898\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c899\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c900\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c901\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c902\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c903\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c904\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c905\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c906\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c907\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c908\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c909\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c910\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c911\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c912\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c913\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c914\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c915\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c916\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c917\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c918\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c919\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c920\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c921\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c922\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c923\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c924\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c925\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c926\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c927\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c928\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c929\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c930\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c931\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c932\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c933\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c934\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970886, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c935\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c936\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c937\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c938\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c939\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c940\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c941\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c942\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c943\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c944\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c945\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c946\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c947\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c948\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c949\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c950\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c951\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c952\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c953\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c954\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.0661897130059557, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c955\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c956\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c957\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c958\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c959\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c960\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c961\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c962\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c963\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c964\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c965\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c966\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c967\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c968\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c969\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c970\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c971\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c972\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c973\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c974\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c975\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c976\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c977\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c978\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c979\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c980\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c981\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c982\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c983\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c984\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c985\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c986\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c987\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c988\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c989\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c990\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c991\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c992\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c993\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c994\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c995\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c996\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c997\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c998\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c999\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1000\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1001\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1002\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1003\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1004\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1005\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1006\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1007\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1008\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1009\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1010\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1011\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1012\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1013\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1014\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1015\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1016\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1017\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1018\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1019\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1020\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1021\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1022\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1023\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1024\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1025\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1026\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1027\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1028\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1029\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1030\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1031\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.0851318667919032, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1032\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1033\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1034\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1035\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1036\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1037\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1038\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1039\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1040\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1041\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1042\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1043\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1044\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1045\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1046\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1047\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1048\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1049\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1050\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1051\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1052\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1053\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1054\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1055\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1056\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1057\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1058\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1059\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1060\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1061\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1062\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1063\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1064\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1065\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1066\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1067\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1068\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1069\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1070\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1071\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1072\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1073\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1074\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1075\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1076\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1077\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1078\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1079\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1080\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1081\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1082\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1083\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1084\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1085\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1086\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1087\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1088\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1089\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1090\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1091\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1092\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1093\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1094\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1095\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1096\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1097\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1098\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1099\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1100\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1101\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1102\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1103\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1104\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1105\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1106\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1107\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1108\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1109\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1110\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1111\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1112\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1113\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1114\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1115\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1116\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1117\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1118\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1119\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1120\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1121\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1122\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1123\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1124\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1125\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1126\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1127\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1128\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1129\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1130\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1131\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1132\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1133\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1134\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1135\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1136\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1137\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1138\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1139\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1140\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1141\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1142\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1143\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1144\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1145\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.074627052197524, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1146\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1147\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1148\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1149\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1150\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1151\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1152\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1153\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1154\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1155\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1156\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1157\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1158\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1159\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1160\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1161\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1162\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1163\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1164\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1165\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1166\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1167\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1168\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1169\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1170\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1171\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1172\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1173\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1174\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1175\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1176\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1177\": {\"count\": 10000.0, \"mean\": 0.0043, \"std\": 0.06543652033703642, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1178\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1179\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1180\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1181\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1182\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1183\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1184\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1185\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392178, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1186\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970884, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1187\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1188\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1189\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1190\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1191\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1192\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1193\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1194\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1195\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1196\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1197\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1198\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1199\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1200\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1201\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1202\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1203\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1204\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1205\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1206\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1207\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1208\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1209\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1210\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1211\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1212\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1213\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1214\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970884, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1215\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1216\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1217\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1218\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1219\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1220\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1221\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1222\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1223\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1224\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1225\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1226\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1227\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1228\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1229\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1230\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1231\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1232\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1233\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1234\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1235\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1236\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1237\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1238\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1239\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1240\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1241\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1242\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1243\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1244\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1245\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1246\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1247\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1248\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1249\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1250\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1251\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1252\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1253\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1254\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1255\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1256\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1257\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.06983178107255159, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1258\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1259\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1260\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1261\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1262\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1263\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1264\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1265\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.06983178107255159, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1266\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1267\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1268\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.0661897130059557, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1269\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.08853459119114819, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1270\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1271\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1272\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1273\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1274\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1275\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1276\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1277\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1278\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1279\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.06983178107255159, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1280\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1281\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1282\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1283\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1284\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1285\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1286\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1287\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1288\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1289\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1290\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1291\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1292\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1293\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1294\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1295\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649111, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1296\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1297\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1298\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1299\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1300\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1301\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1302\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1303\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1304\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1305\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1306\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1307\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1308\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1309\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1310\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1311\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1312\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1313\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1314\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1315\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1316\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1317\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1318\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1319\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1320\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1321\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1322\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1323\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1324\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1325\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1326\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1327\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1328\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1329\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1330\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1331\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1332\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1333\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1334\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1335\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1336\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1337\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1338\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1339\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1340\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1341\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1342\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1343\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1344\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1345\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1346\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1347\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1348\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1349\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1350\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1351\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1352\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1353\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1354\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1355\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1356\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1357\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1358\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.0885345911911482, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1359\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1360\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1361\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1362\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1363\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1364\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1365\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1366\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1367\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1368\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1369\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1370\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1371\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1372\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1373\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1374\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1375\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1376\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1377\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1378\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1379\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1380\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1381\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1382\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1383\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1384\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1385\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1386\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1387\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1388\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1389\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1390\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1391\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1392\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1393\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1394\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1395\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1396\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1397\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1398\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.0845510194349468, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1399\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1400\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1401\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1402\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1403\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1404\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1405\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1406\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1407\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1408\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1409\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1410\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1411\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1412\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1413\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1414\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1415\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1416\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.06983178107255159, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1417\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1418\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1419\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1420\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1421\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1422\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1423\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1424\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1425\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1426\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970886, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1427\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1428\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1429\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1430\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1431\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1432\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1433\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1434\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1435\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1436\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1437\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1438\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1439\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1440\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963212, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1441\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1442\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1443\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1444\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1445\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1446\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1447\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1448\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1449\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1450\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1451\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1452\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1453\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1454\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1455\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1456\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1457\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1458\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1459\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1460\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1461\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1462\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1463\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1464\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1465\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1466\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1467\": {\"count\": 10000.0, \"mean\": 0.0047, \"std\": 0.06839866839189034, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1468\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1469\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1470\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1471\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1472\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1473\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1474\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1475\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1476\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1477\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.06983178107255159, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1478\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1479\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1480\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1481\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1482\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1483\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1484\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1485\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1486\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1487\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1488\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1489\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1490\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1491\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1492\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1493\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.07053720684684771, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1494\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1495\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1496\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1497\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1498\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1499\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1500\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1501\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1502\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1503\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1504\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1505\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1506\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1507\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1508\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1509\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1510\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1511\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1512\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1513\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649114, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1514\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1515\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1516\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1517\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1518\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1519\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1520\": {\"count\": 10000.0, \"mean\": 0.0083, \"std\": 0.09073000161644071, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1521\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1522\": {\"count\": 10000.0, \"mean\": 0.0079, \"std\": 0.0885345911911482, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1523\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1524\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1525\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1526\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1527\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1528\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1529\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346915, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1530\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1531\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1532\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1533\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1534\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1535\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1536\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1537\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1538\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1539\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1540\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1541\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.07658835797729162, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1542\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1543\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1544\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1545\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1546\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1547\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1548\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1549\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1550\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1551\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1552\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1553\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1554\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1555\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1556\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1557\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1558\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1559\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1560\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1561\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1562\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1563\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1564\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1565\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1566\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1567\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1568\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1569\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1570\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1571\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1572\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1573\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1574\": {\"count\": 10000.0, \"mean\": 0.0081, \"std\": 0.08963924095702695, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1575\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970884, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1576\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1577\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1578\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1579\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1580\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1581\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1582\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1583\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1584\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1585\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1586\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1587\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1588\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1589\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1590\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1591\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1592\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1593\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1594\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1595\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1596\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1597\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1598\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1599\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1600\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1601\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1602\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1603\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1604\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1605\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1606\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1607\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1608\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1609\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1610\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1611\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1612\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1613\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1614\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1615\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1616\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1617\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1618\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1619\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1620\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1621\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1622\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1623\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1624\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1625\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1626\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1627\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1628\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1629\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1630\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1631\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1632\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1633\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1634\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1635\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1636\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1637\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1638\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1639\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1640\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1641\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1642\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1643\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1644\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1645\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1646\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1647\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1648\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1649\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1650\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1651\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1652\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1653\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1654\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1655\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1656\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1657\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1658\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1659\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1660\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1661\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1662\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1663\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1664\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1665\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1666\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1667\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1668\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1669\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1670\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1671\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1672\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1673\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1674\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1675\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1676\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1677\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1678\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1679\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1680\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1681\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1682\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1683\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900171, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1684\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1685\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1686\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1687\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1688\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1689\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1690\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1691\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1692\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1693\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1694\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1695\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1696\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1697\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1698\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1699\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1700\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1701\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1702\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1703\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1704\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1705\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1706\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1707\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1708\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1709\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1710\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1711\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1712\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1713\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1714\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1715\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1716\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1717\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1718\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1719\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1720\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1721\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1722\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1723\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1724\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1725\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1726\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1727\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1728\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1729\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1730\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1731\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1732\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1733\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1734\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1735\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1736\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1737\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1738\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1739\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1740\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1741\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1742\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1743\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1744\": {\"count\": 10000.0, \"mean\": 0.0078, \"std\": 0.08797689465649113, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1745\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1746\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1747\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1748\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1749\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1750\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1751\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1752\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1753\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1754\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1755\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1756\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1757\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1758\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1759\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1760\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.07396145637900169, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1761\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1762\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1763\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1764\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1765\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1766\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1767\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1768\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1769\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1770\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1771\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1772\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1773\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1774\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1775\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547654, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1776\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1777\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1778\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1779\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1780\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1781\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1782\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1783\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1784\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1785\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1786\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1787\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1788\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1789\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1790\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1791\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573503, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1792\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1793\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1794\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1795\": {\"count\": 10000.0, \"mean\": 0.0044, \"std\": 0.0661897130059557, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1796\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1797\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1798\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1799\": {\"count\": 10000.0, \"mean\": 0.0083, \"std\": 0.09073000161644071, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1800\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1801\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963213, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1802\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1803\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1804\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1805\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1806\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1807\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1808\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1809\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1810\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1811\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1812\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1813\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1814\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1815\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1816\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1817\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1818\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1819\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1820\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1821\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190317, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1822\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1823\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1824\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1825\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1826\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1827\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1828\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1829\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1830\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1831\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1832\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1833\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1834\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1835\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1836\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1837\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1838\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1839\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1840\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1841\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1842\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1843\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190318, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1844\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1845\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1846\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1847\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1848\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1849\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1850\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1851\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1852\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1853\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1854\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1855\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713951, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1856\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1857\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1858\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1859\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1860\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1861\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1862\": {\"count\": 10000.0, \"mean\": 0.008, \"std\": 0.08908868435086431, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1863\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1864\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1865\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1866\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1867\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1868\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1869\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1870\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1871\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1872\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1873\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1874\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1875\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1876\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1877\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1878\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1879\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1880\": {\"count\": 10000.0, \"mean\": 0.0083, \"std\": 0.0907300016164407, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1881\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1882\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1883\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1884\": {\"count\": 10000.0, \"mean\": 0.0073, \"std\": 0.08513186679190317, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1885\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1886\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1887\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.08278330331371629, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1888\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494678, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1889\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1890\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1891\": {\"count\": 10000.0, \"mean\": 0.0075, \"std\": 0.08628148381573501, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1892\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1893\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1894\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1895\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1896\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1897\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1898\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1899\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1900\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1901\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1902\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1903\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1904\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1905\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1906\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1907\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1908\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1909\": {\"count\": 10000.0, \"mean\": 0.0053, \"std\": 0.07261155034651424, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1910\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1911\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1912\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1913\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1914\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346918, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1915\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1916\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1917\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1918\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.0815829368039528, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1919\": {\"count\": 10000.0, \"mean\": 0.0067, \"std\": 0.08158293680395279, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1920\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1921\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1922\": {\"count\": 10000.0, \"mean\": 0.0054, \"std\": 0.07328967961257418, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1923\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1924\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1925\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1926\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1927\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1928\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1929\": {\"count\": 10000.0, \"mean\": 0.005, \"std\": 0.0705372068468477, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1930\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1931\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1932\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1933\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1934\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1935\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531585, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1936\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1937\": {\"count\": 10000.0, \"mean\": 0.0049, \"std\": 0.0698317810725516, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1938\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1939\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1940\": {\"count\": 10000.0, \"mean\": 0.0074, \"std\": 0.08570866115778351, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1941\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1942\": {\"count\": 10000.0, \"mean\": 0.0077, \"std\": 0.08741552578011953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1943\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1944\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1945\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1946\": {\"count\": 10000.0, \"mean\": 0.0072, \"std\": 0.08455101943494679, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1947\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1948\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.07723079994177172, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1949\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1950\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1951\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1952\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1953\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1954\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1955\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1956\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1957\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547653, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1958\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1959\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1960\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1961\": {\"count\": 10000.0, \"mean\": 0.0065, \"std\": 0.0803641452364421, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1962\": {\"count\": 10000.0, \"mean\": 0.0062, \"std\": 0.07849953004713953, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1963\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1964\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1965\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1966\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.0797475765625311, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1967\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1968\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1969\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626908, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1970\": {\"count\": 10000.0, \"mean\": 0.007, \"std\": 0.08337682633392177, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1971\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563316, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1972\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1973\": {\"count\": 10000.0, \"mean\": 0.0041, \"std\": 0.06390303873710532, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1974\": {\"count\": 10000.0, \"mean\": 0.0048, \"std\": 0.06911901144963213, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1975\": {\"count\": 10000.0, \"mean\": 0.0068, \"std\": 0.08218537244269417, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1976\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1977\": {\"count\": 10000.0, \"mean\": 0.0059, \"std\": 0.0765883579772916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1978\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1979\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1980\": {\"count\": 10000.0, \"mean\": 0.0076, \"std\": 0.08685041335209605, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1981\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1982\": {\"count\": 10000.0, \"mean\": 0.0066, \"std\": 0.08097589585531584, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1983\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232849, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1984\": {\"count\": 10000.0, \"mean\": 0.0057, \"std\": 0.07528663100232848, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1985\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1986\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1987\": {\"count\": 10000.0, \"mean\": 0.0052, \"std\": 0.07192688890626907, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1988\": {\"count\": 10000.0, \"mean\": 0.0046, \"std\": 0.06767051004531426, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1989\": {\"count\": 10000.0, \"mean\": 0.0071, \"std\": 0.08396603497547656, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1990\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1991\": {\"count\": 10000.0, \"mean\": 0.0045, \"std\": 0.06693428134970886, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1992\": {\"count\": 10000.0, \"mean\": 0.0069, \"std\": 0.0827833033137163, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1993\": {\"count\": 10000.0, \"mean\": 0.0064, \"std\": 0.07974757656253109, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1994\": {\"count\": 10000.0, \"mean\": 0.0061, \"std\": 0.0778678132454865, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1995\": {\"count\": 10000.0, \"mean\": 0.0063, \"std\": 0.07912607720346916, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1996\": {\"count\": 10000.0, \"mean\": 0.006, \"std\": 0.0772307999417717, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1997\": {\"count\": 10000.0, \"mean\": 0.0056, \"std\": 0.07462705219752398, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1998\": {\"count\": 10000.0, \"mean\": 0.0051, \"std\": 0.07123550694523746, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c1999\": {\"count\": 10000.0, \"mean\": 0.0055, \"std\": 0.0739614563790017, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}, \"c2000\": {\"count\": 10000.0, \"mean\": 0.0058, \"std\": 0.07594034957563314, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.0, \"max\": 1.0}}", "examples": "{\"Unnamed: 0\":{\"0\":\"r1\",\"1\":\"r2\",\"2\":\"r3\",\"3\":\"r4\"},\"c1\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c2\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c3\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c4\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c5\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c6\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c7\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c8\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c9\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c10\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c11\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c12\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c13\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c14\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c15\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c16\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c17\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c18\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c19\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c20\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c21\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c22\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c23\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c24\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c25\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c26\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c27\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c28\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c29\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c30\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c31\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c32\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c33\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c34\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c35\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c36\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c37\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c38\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c39\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c40\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c41\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c42\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c43\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c44\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c45\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c46\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c47\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c48\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c49\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c50\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c51\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c52\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c53\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c54\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c55\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c56\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c57\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c58\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c59\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c60\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c61\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c62\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c63\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c64\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c65\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c66\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c67\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c68\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c69\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c70\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c71\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c72\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c73\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c74\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c75\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c76\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c77\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c78\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c79\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c80\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c81\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c82\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c83\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c84\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c85\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c86\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c87\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c88\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c89\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c90\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c91\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c92\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c93\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c94\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c95\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c96\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c97\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c98\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c99\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c100\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c101\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c102\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c103\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c104\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c105\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c106\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c107\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c108\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c109\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c110\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c111\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c112\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c113\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c114\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c115\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c116\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c117\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c118\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c119\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c120\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c121\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c122\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c123\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c124\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c125\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c126\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c127\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c128\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c129\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c130\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c131\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c132\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c133\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c134\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c135\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c136\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c137\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c138\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c139\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c140\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c141\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c142\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c143\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c144\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c145\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c146\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c147\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c148\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c149\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c150\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c151\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c152\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c153\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c154\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c155\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c156\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c157\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c158\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c159\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c160\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c161\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c162\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c163\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c164\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c165\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c166\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c167\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c168\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c169\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c170\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c171\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c172\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c173\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c174\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c175\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c176\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c177\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c178\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c179\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c180\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c181\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c182\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c183\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c184\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c185\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c186\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c187\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c188\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c189\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c190\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c191\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c192\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c193\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c194\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c195\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c196\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c197\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c198\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c199\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c200\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c201\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c202\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c203\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c204\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c205\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c206\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c207\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c208\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c209\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c210\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c211\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c212\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c213\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c214\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c215\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c216\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c217\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c218\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c219\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c220\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c221\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c222\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c223\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c224\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c225\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c226\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c227\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c228\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c229\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c230\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c231\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c232\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c233\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c234\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c235\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c236\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c237\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c238\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c239\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c240\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c241\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c242\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c243\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c244\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c245\":{\"0\":1,\"1\":1,\"2\":0,\"3\":0},\"c246\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c247\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c248\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c249\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c250\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c251\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c252\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c253\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c254\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c255\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c256\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c257\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c258\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c259\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c260\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c261\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c262\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c263\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c264\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c265\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c266\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c267\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c268\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c269\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c270\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c271\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c272\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c273\":{\"0\":1,\"1\":0,\"2\":0,\"3\":0},\"c274\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c275\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c276\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c277\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c278\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c279\":{\"0\":0,\"1\":0,\"2\":0,\"3\":1},\"c280\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c281\":{\"0\":0,\"1\":0,\"2\":1,\"3\":0},\"c282\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c283\":{\"0\":0,\"1\":1,\"2\":0,\"3\":0},\"c284\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c285\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c286\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c287\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c288\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c289\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c290\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c291\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c292\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c293\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c294\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c295\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c296\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c297\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c298\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c299\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c300\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c301\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c302\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c303\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c304\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c305\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c306\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c307\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c308\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c309\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c310\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c311\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c312\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c313\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c314\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c315\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c316\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c317\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c318\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c319\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c320\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c321\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c322\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c323\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c324\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c325\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c326\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c327\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c328\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c329\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c330\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c331\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c332\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c333\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c334\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c335\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c336\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c337\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c338\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c339\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c340\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c341\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c342\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c343\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c344\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c345\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c346\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c347\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c348\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c349\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c350\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c351\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c352\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c353\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c354\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c355\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c356\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c357\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c358\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c359\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c360\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c361\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c362\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c363\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c364\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c365\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c366\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c367\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c368\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c369\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c370\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c371\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c372\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c373\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c374\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c375\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c376\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c377\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c378\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c379\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c380\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c381\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c382\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c383\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c384\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c385\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c386\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c387\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c388\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c389\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c390\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c391\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c392\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c393\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c394\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c395\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c396\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c397\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c398\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c399\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c400\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c401\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c402\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c403\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c404\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c405\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c406\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c407\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c408\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c409\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c410\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c411\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c412\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c413\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c414\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c415\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c416\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c417\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c418\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c419\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c420\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c421\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c422\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c423\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c424\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c425\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c426\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c427\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c428\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c429\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c430\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c431\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c432\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c433\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c434\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c435\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c436\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c437\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c438\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c439\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c440\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c441\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c442\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c443\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c444\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c445\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c446\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c447\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c448\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c449\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c450\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c451\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c452\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c453\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c454\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c455\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c456\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c457\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c458\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c459\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c460\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c461\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c462\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c463\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c464\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c465\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c466\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c467\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c468\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c469\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c470\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c471\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c472\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c473\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c474\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c475\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c476\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c477\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c478\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c479\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c480\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c481\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c482\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c483\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c484\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c485\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c486\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c487\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c488\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c489\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c490\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c491\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c492\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c493\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c494\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c495\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c496\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c497\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c498\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c499\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c500\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c501\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c502\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c503\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c504\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c505\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c506\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c507\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c508\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c509\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c510\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c511\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c512\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c513\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c514\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c515\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c516\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c517\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c518\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c519\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c520\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c521\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c522\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c523\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c524\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c525\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c526\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c527\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c528\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c529\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c530\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c531\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c532\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c533\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c534\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c535\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c536\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c537\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c538\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c539\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c540\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c541\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c542\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c543\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c544\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c545\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c546\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c547\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c548\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c549\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c550\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c551\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c552\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c553\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c554\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c555\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c556\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c557\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c558\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c559\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c560\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c561\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c562\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c563\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c564\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c565\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c566\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c567\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c568\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c569\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c570\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c571\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c572\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c573\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c574\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c575\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c576\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c577\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c578\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c579\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c580\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c581\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c582\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c583\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c584\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c585\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c586\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c587\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c588\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c589\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c590\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c591\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c592\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c593\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c594\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c595\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c596\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c597\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c598\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c599\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c600\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c601\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c602\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c603\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c604\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c605\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c606\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c607\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c608\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c609\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c610\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c611\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c612\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c613\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c614\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c615\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c616\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c617\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c618\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c619\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c620\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c621\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c622\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c623\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c624\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c625\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c626\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c627\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c628\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c629\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c630\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c631\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c632\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c633\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c634\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c635\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c636\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c637\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c638\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c639\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c640\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c641\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c642\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c643\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c644\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c645\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c646\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c647\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c648\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c649\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c650\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c651\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c652\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c653\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c654\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c655\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c656\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c657\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c658\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c659\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c660\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c661\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c662\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c663\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c664\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c665\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c666\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c667\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c668\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c669\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c670\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c671\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c672\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c673\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c674\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c675\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c676\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c677\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c678\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c679\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c680\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c681\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c682\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c683\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c684\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c685\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c686\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c687\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c688\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c689\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c690\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c691\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c692\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c693\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c694\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c695\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c696\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c697\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c698\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c699\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c700\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c701\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c702\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c703\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c704\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c705\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c706\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c707\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c708\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c709\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c710\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c711\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c712\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c713\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c714\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c715\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c716\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c717\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c718\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c719\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c720\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c721\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c722\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c723\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c724\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c725\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c726\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c727\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c728\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c729\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c730\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c731\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c732\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c733\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c734\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c735\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c736\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c737\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c738\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c739\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c740\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c741\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c742\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c743\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c744\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c745\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c746\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c747\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c748\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c749\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c750\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c751\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c752\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c753\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c754\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c755\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c756\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c757\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c758\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c759\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c760\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c761\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c762\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c763\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c764\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c765\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c766\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c767\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c768\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c769\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c770\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c771\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c772\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c773\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c774\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c775\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c776\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c777\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c778\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c779\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c780\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c781\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c782\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c783\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c784\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c785\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c786\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c787\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c788\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c789\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c790\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c791\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c792\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c793\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c794\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c795\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c796\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c797\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c798\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c799\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c800\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c801\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c802\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c803\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c804\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c805\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c806\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c807\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c808\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c809\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c810\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c811\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c812\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c813\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c814\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c815\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c816\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c817\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c818\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c819\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c820\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c821\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c822\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c823\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c824\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c825\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c826\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c827\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c828\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c829\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c830\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c831\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c832\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c833\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c834\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c835\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c836\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c837\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c838\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c839\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c840\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c841\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c842\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c843\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c844\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c845\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c846\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c847\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c848\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c849\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c850\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c851\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c852\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c853\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c854\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c855\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c856\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c857\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c858\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c859\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c860\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c861\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c862\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c863\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c864\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c865\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c866\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c867\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c868\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c869\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c870\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c871\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c872\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c873\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c874\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c875\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c876\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c877\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c878\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c879\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c880\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c881\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c882\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c883\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c884\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c885\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c886\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c887\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c888\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c889\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c890\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c891\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c892\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c893\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c894\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c895\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c896\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c897\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c898\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c899\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c900\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c901\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c902\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c903\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c904\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c905\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c906\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c907\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c908\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c909\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c910\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c911\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c912\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c913\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c914\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c915\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c916\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c917\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c918\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c919\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c920\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c921\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c922\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c923\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c924\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c925\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c926\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c927\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c928\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c929\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c930\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c931\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c932\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c933\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c934\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c935\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c936\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c937\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c938\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c939\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c940\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c941\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c942\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c943\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c944\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c945\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c946\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c947\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c948\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c949\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c950\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c951\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c952\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c953\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c954\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c955\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c956\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c957\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c958\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c959\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c960\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c961\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c962\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c963\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c964\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c965\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c966\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c967\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c968\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c969\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c970\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c971\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c972\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c973\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c974\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c975\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c976\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c977\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c978\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c979\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c980\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c981\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c982\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c983\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c984\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c985\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c986\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c987\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c988\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c989\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c990\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c991\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c992\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c993\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c994\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c995\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c996\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c997\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c998\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c999\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1000\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1001\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1002\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1003\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1004\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1005\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1006\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1007\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1008\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1009\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1010\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1011\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1012\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1013\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1014\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1015\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1016\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1017\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1018\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1019\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1020\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1021\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1022\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1023\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1024\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1025\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1026\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1027\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1028\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1029\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1030\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1031\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1032\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1033\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1034\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1035\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1036\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1037\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1038\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1039\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1040\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1041\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1042\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1043\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1044\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1045\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1046\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1047\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1048\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1049\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1050\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1051\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1052\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1053\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1054\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1055\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1056\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1057\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1058\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1059\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1060\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1061\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1062\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1063\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1064\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1065\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1066\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1067\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1068\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1069\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1070\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1071\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1072\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1073\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1074\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1075\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1076\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1077\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1078\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1079\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1080\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1081\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1082\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1083\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1084\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1085\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1086\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1087\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1088\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1089\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1090\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1091\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1092\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1093\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1094\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1095\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1096\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1097\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1098\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1099\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1100\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1101\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1102\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1103\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1104\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1105\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1106\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1107\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1108\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1109\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1110\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1111\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1112\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1113\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1114\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1115\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1116\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1117\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1118\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1119\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1120\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1121\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1122\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1123\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1124\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1125\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1126\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1127\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1128\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1129\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1130\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1131\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1132\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1133\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1134\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1135\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1136\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1137\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1138\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1139\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1140\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1141\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1142\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1143\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1144\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1145\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1146\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1147\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1148\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1149\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1150\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1151\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1152\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1153\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1154\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1155\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1156\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1157\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1158\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1159\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1160\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1161\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1162\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1163\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1164\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1165\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1166\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1167\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1168\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1169\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1170\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1171\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1172\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1173\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1174\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1175\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1176\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1177\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1178\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1179\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1180\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1181\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1182\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1183\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1184\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1185\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1186\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1187\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1188\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1189\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1190\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1191\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1192\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1193\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1194\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1195\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1196\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1197\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1198\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1199\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1200\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1201\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1202\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1203\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1204\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1205\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1206\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1207\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1208\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1209\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1210\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1211\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1212\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1213\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1214\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1215\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1216\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1217\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1218\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1219\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1220\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1221\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1222\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1223\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1224\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1225\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1226\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1227\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1228\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1229\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1230\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1231\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1232\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1233\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1234\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1235\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1236\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1237\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1238\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1239\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1240\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1241\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1242\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1243\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1244\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1245\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1246\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1247\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1248\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1249\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1250\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1251\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1252\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1253\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1254\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1255\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1256\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1257\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1258\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1259\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1260\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1261\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1262\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1263\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1264\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1265\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1266\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1267\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1268\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1269\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1270\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1271\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1272\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1273\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1274\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1275\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1276\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1277\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1278\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1279\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1280\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1281\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1282\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1283\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1284\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1285\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1286\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1287\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1288\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1289\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1290\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1291\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1292\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1293\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1294\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1295\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1296\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1297\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1298\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1299\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1300\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1301\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1302\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1303\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1304\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1305\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1306\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1307\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1308\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1309\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1310\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1311\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1312\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1313\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1314\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1315\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1316\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1317\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1318\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1319\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1320\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1321\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1322\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1323\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1324\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1325\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1326\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1327\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1328\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1329\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1330\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1331\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1332\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1333\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1334\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1335\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1336\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1337\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1338\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1339\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1340\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1341\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1342\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1343\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1344\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1345\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1346\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1347\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1348\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1349\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1350\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1351\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1352\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1353\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1354\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1355\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1356\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1357\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1358\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1359\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1360\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1361\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1362\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1363\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1364\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1365\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1366\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1367\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1368\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1369\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1370\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1371\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1372\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1373\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1374\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1375\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1376\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1377\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1378\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1379\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1380\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1381\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1382\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1383\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1384\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1385\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1386\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1387\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1388\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1389\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1390\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1391\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1392\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1393\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1394\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1395\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1396\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1397\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1398\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1399\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1400\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1401\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1402\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1403\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1404\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1405\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1406\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1407\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1408\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1409\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1410\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1411\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1412\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1413\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1414\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1415\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1416\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1417\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1418\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1419\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1420\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1421\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1422\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1423\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1424\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1425\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1426\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1427\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1428\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1429\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1430\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1431\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1432\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1433\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1434\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1435\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1436\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1437\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1438\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1439\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1440\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1441\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1442\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1443\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1444\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1445\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1446\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1447\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1448\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1449\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1450\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1451\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1452\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1453\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1454\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1455\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1456\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1457\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1458\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1459\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1460\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1461\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1462\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1463\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1464\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1465\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1466\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1467\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1468\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1469\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1470\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1471\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1472\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1473\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1474\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1475\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1476\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1477\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1478\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1479\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1480\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1481\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1482\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1483\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1484\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1485\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1486\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1487\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1488\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1489\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1490\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1491\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1492\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1493\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1494\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1495\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1496\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1497\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1498\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1499\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1500\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1501\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1502\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1503\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1504\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1505\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1506\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1507\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1508\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1509\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1510\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1511\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1512\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1513\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1514\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1515\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1516\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1517\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1518\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1519\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1520\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1521\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1522\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1523\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1524\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1525\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1526\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1527\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1528\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1529\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1530\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1531\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1532\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1533\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1534\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1535\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1536\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1537\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1538\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1539\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1540\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1541\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1542\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1543\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1544\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1545\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1546\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1547\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1548\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1549\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1550\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1551\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1552\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1553\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1554\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1555\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1556\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1557\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1558\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1559\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1560\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1561\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1562\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1563\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1564\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1565\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1566\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1567\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1568\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1569\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1570\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1571\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1572\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1573\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1574\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1575\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1576\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1577\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1578\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1579\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1580\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1581\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1582\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1583\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1584\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1585\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1586\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1587\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1588\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1589\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1590\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1591\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1592\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1593\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1594\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1595\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1596\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1597\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1598\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1599\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1600\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1601\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1602\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1603\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1604\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1605\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1606\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1607\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1608\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1609\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1610\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1611\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1612\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1613\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1614\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1615\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1616\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1617\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1618\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1619\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1620\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1621\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1622\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1623\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1624\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1625\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1626\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1627\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1628\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1629\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1630\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1631\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1632\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1633\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1634\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1635\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1636\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1637\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1638\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1639\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1640\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1641\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1642\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1643\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1644\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1645\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1646\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1647\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1648\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1649\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1650\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1651\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1652\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1653\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1654\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1655\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1656\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1657\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1658\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1659\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1660\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1661\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1662\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1663\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1664\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1665\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1666\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1667\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1668\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1669\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1670\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1671\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1672\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1673\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1674\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1675\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1676\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1677\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1678\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1679\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1680\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1681\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1682\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1683\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1684\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1685\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1686\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1687\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1688\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1689\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1690\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1691\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1692\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1693\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1694\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1695\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1696\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1697\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1698\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1699\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1700\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1701\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1702\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1703\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1704\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1705\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1706\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1707\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1708\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1709\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1710\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1711\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1712\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1713\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1714\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1715\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1716\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1717\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1718\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1719\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1720\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1721\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1722\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1723\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1724\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1725\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1726\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1727\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1728\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1729\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1730\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1731\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1732\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1733\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1734\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1735\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1736\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1737\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1738\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1739\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1740\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1741\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1742\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1743\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1744\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1745\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1746\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1747\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1748\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1749\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1750\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1751\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1752\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1753\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1754\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1755\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1756\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1757\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1758\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1759\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1760\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1761\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1762\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1763\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1764\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1765\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1766\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1767\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1768\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1769\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1770\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1771\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1772\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1773\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1774\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1775\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1776\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1777\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1778\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1779\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1780\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1781\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1782\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1783\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1784\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1785\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1786\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1787\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1788\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1789\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1790\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1791\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1792\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1793\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1794\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1795\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1796\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1797\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1798\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1799\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1800\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1801\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1802\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1803\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1804\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1805\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1806\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1807\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1808\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1809\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1810\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1811\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1812\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1813\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1814\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1815\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1816\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1817\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1818\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1819\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1820\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1821\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1822\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1823\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1824\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1825\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1826\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1827\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1828\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1829\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1830\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1831\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1832\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1833\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1834\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1835\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1836\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1837\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1838\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1839\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1840\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1841\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1842\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1843\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1844\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1845\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1846\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1847\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1848\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1849\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1850\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1851\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1852\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1853\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1854\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1855\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1856\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1857\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1858\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1859\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1860\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1861\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1862\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1863\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1864\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1865\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1866\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1867\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1868\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1869\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1870\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1871\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1872\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1873\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1874\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1875\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1876\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1877\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1878\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1879\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1880\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1881\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1882\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1883\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1884\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1885\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1886\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1887\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1888\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1889\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1890\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1891\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1892\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1893\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1894\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1895\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1896\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1897\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1898\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1899\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1900\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1901\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1902\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1903\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1904\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1905\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1906\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1907\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1908\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1909\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1910\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1911\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1912\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1913\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1914\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1915\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1916\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1917\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1918\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1919\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1920\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1921\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1922\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1923\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1924\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1925\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1926\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1927\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1928\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1929\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1930\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1931\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1932\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1933\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1934\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1935\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1936\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1937\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1938\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1939\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1940\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1941\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1942\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1943\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1944\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1945\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1946\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1947\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1948\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1949\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1950\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1951\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1952\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1953\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1954\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1955\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1956\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1957\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1958\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1959\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1960\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1961\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1962\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1963\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1964\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1965\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1966\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1967\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1968\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1969\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1970\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1971\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1972\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1973\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1974\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1975\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1976\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1977\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1978\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1979\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1980\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1981\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1982\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1983\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1984\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1985\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1986\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1987\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1988\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1989\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1990\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1991\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1992\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1993\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1994\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1995\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1996\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1997\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1998\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c1999\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0},\"c2000\":{\"0\":0,\"1\":0,\"2\":0,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>wordanddoc2vec-sample-data/wd2v.csv:
<column_names>
['Unnamed: 0', 'c1', 'c2', 'c3', 'c4', 'c5', 'c6', 'c7', 'c8', 'c9', 'c10', 'c11', 'c12', 'c13', 'c14', 'c15', 'c16', 'c17', 'c18', 'c19', 'c20', 'c21', 'c22', 'c23', 'c24', 'c25', 'c26', 'c27', 'c28', 'c29', 'c30', 'c31', 'c32', 'c33', 'c34', 'c35', 'c36', 'c37', 'c38', 'c39', 'c40', 'c41', 'c42', 'c43', 'c44', 'c45', 'c46', 'c47', 'c48', 'c49', 'c50', 'c51', 'c52', 'c53', 'c54', 'c55', 'c56', 'c57', 'c58', 'c59', 'c60', 'c61', 'c62', 'c63', 'c64', 'c65', 'c66', 'c67', 'c68', 'c69', 'c70', 'c71', 'c72', 'c73', 'c74', 'c75', 'c76', 'c77', 'c78', 'c79', 'c80', 'c81', 'c82', 'c83', 'c84', 'c85', 'c86', 'c87', 'c88', 'c89', 'c90', 'c91', 'c92', 'c93', 'c94', 'c95', 'c96', 'c97', 'c98', 'c99', 'c100', 'c101', 'c102', 'c103', 'c104', 'c105', 'c106', 'c107', 'c108', 'c109', 'c110', 'c111', 'c112', 'c113', 'c114', 'c115', 'c116', 'c117', 'c118', 'c119', 'c120', 'c121', 'c122', 'c123', 'c124', 'c125', 'c126', 'c127', 'c128', 'c129', 'c130', 'c131', 'c132', 'c133', 'c134', 'c135', 'c136', 'c137', 'c138', 'c139', 'c140', 'c141', 'c142', 'c143', 'c144', 'c145', 'c146', 'c147', 'c148', 'c149', 'c150', 'c151', 'c152', 'c153', 'c154', 'c155', 'c156', 'c157', 'c158', 'c159', 'c160', 'c161', 'c162', 'c163', 'c164', 'c165', 'c166', 'c167', 'c168', 'c169', 'c170', 'c171', 'c172', 'c173', 'c174', 'c175', 'c176', 'c177', 'c178', 'c179', 'c180', 'c181', 'c182', 'c183', 'c184', 'c185', 'c186', 'c187', 'c188', 'c189', 'c190', 'c191', 'c192', 'c193', 'c194', 'c195', 'c196', 'c197', 'c198', 'c199', 'c200', 'c201', 'c202', 'c203', 'c204', 'c205', 'c206', 'c207', 'c208', 'c209', 'c210', 'c211', 'c212', 'c213', 'c214', 'c215', 'c216', 'c217', 'c218', 'c219', 'c220', 'c221', 'c222', 'c223', 'c224', 'c225', 'c226', 'c227', 'c228', 'c229', 'c230', 'c231', 'c232', 'c233', 'c234', 'c235', 'c236', 'c237', 'c238', 'c239', 'c240', 'c241', 'c242', 'c243', 'c244', 'c245', 'c246', 'c247', 'c248', 'c249', 'c250', 'c251', 'c252', 'c253', 'c254', 'c255', 'c256', 'c257', 'c258', 'c259', 'c260', 'c261', 'c262', 'c263', 'c264', 'c265', 'c266', 'c267', 'c268', 'c269', 'c270', 'c271', 'c272', 'c273', 'c274', 'c275', 'c276', 'c277', 'c278', 'c279', 'c280', 'c281', 'c282', 'c283', 'c284', 'c285', 'c286', 'c287', 'c288', 'c289', 'c290', 'c291', 'c292', 'c293', 'c294', 'c295', 'c296', 'c297', 'c298', 'c299', 'c300', 'c301', 'c302', 'c303', 'c304', 'c305', 'c306', 'c307', 'c308', 'c309', 'c310', 'c311', 'c312', 'c313', 'c314', 'c315', 'c316', 'c317', 'c318', 'c319', 'c320', 'c321', 'c322', 'c323', 'c324', 'c325', 'c326', 'c327', 'c328', 'c329', 'c330', 'c331', 'c332', 'c333', 'c334', 'c335', 'c336', 'c337', 'c338', 'c339', 'c340', 'c341', 'c342', 'c343', 'c344', 'c345', 'c346', 'c347', 'c348', 'c349', 'c350', 'c351', 'c352', 'c353', 'c354', 'c355', 'c356', 'c357', 'c358', 'c359', 'c360', 'c361', 'c362', 'c363', 'c364', 'c365', 'c366', 'c367', 'c368', 'c369', 'c370', 'c371', 'c372', 'c373', 'c374', 'c375', 'c376', 'c377', 'c378', 'c379', 'c380', 'c381', 'c382', 'c383', 'c384', 'c385', 'c386', 'c387', 'c388', 'c389', 'c390', 'c391', 'c392', 'c393', 'c394', 'c395', 'c396', 'c397', 'c398', 'c399', 'c400', 'c401', 'c402', 'c403', 'c404', 'c405', 'c406', 'c407', 'c408', 'c409', 'c410', 'c411', 'c412', 'c413', 'c414', 'c415', 'c416', 'c417', 'c418', 'c419', 'c420', 'c421', 'c422', 'c423', 'c424', 'c425', 'c426', 'c427', 'c428', 'c429', 'c430', 'c431', 'c432', 'c433', 'c434', 'c435', 'c436', 'c437', 'c438', 'c439', 'c440', 'c441', 'c442', 'c443', 'c444', 'c445', 'c446', 'c447', 'c448', 'c449', 'c450', 'c451', 'c452', 'c453', 'c454', 'c455', 'c456', 'c457', 'c458', 'c459', 'c460', 'c461', 'c462', 'c463', 'c464', 'c465', 'c466', 'c467', 'c468', 'c469', 'c470', 'c471', 'c472', 'c473', 'c474', 'c475', 'c476', 'c477', 'c478', 'c479', 'c480', 'c481', 'c482', 'c483', 'c484', 'c485', 'c486', 'c487', 'c488', 'c489', 'c490', 'c491', 'c492', 'c493', 'c494', 'c495', 'c496', 'c497', 'c498', 'c499', 'c500', 'c501', 'c502', 'c503', 'c504', 'c505', 'c506', 'c507', 'c508', 'c509', 'c510', 'c511', 'c512', 'c513', 'c514', 'c515', 'c516', 'c517', 'c518', 'c519', 'c520', 'c521', 'c522', 'c523', 'c524', 'c525', 'c526', 'c527', 'c528', 'c529', 'c530', 'c531', 'c532', 'c533', 'c534', 'c535', 'c536', 'c537', 'c538', 'c539', 'c540', 'c541', 'c542', 'c543', 'c544', 'c545', 'c546', 'c547', 'c548', 'c549', 'c550', 'c551', 'c552', 'c553', 'c554', 'c555', 'c556', 'c557', 'c558', 'c559', 'c560', 'c561', 'c562', 'c563', 'c564', 'c565', 'c566', 'c567', 'c568', 'c569', 'c570', 'c571', 'c572', 'c573', 'c574', 'c575', 'c576', 'c577', 'c578', 'c579', 'c580', 'c581', 'c582', 'c583', 'c584', 'c585', 'c586', 'c587', 'c588', 'c589', 'c590', 'c591', 'c592', 'c593', 'c594', 'c595', 'c596', 'c597', 'c598', 'c599', 'c600', 'c601', 'c602', 'c603', 'c604', 'c605', 'c606', 'c607', 'c608', 'c609', 'c610', 'c611', 'c612', 'c613', 'c614', 'c615', 'c616', 'c617', 'c618', 'c619', 'c620', 'c621', 'c622', 'c623', 'c624', 'c625', 'c626', 'c627', 'c628', 'c629', 'c630', 'c631', 'c632', 'c633', 'c634', 'c635', 'c636', 'c637', 'c638', 'c639', 'c640', 'c641', 'c642', 'c643', 'c644', 'c645', 'c646', 'c647', 'c648', 'c649', 'c650', 'c651', 'c652', 'c653', 'c654', 'c655', 'c656', 'c657', 'c658', 'c659', 'c660', 'c661', 'c662', 'c663', 'c664', 'c665', 'c666', 'c667', 'c668', 'c669', 'c670', 'c671', 'c672', 'c673', 'c674', 'c675', 'c676', 'c677', 'c678', 'c679', 'c680', 'c681', 'c682', 'c683', 'c684', 'c685', 'c686', 'c687', 'c688', 'c689', 'c690', 'c691', 'c692', 'c693', 'c694', 'c695', 'c696', 'c697', 'c698', 'c699', 'c700', 'c701', 'c702', 'c703', 'c704', 'c705', 'c706', 'c707', 'c708', 'c709', 'c710', 'c711', 'c712', 'c713', 'c714', 'c715', 'c716', 'c717', 'c718', 'c719', 'c720', 'c721', 'c722', 'c723', 'c724', 'c725', 'c726', 'c727', 'c728', 'c729', 'c730', 'c731', 'c732', 'c733', 'c734', 'c735', 'c736', 'c737', 'c738', 'c739', 'c740', 'c741', 'c742', 'c743', 'c744', 'c745', 'c746', 'c747', 'c748', 'c749', 'c750', 'c751', 'c752', 'c753', 'c754', 'c755', 'c756', 'c757', 'c758', 'c759', 'c760', 'c761', 'c762', 'c763', 'c764', 'c765', 'c766', 'c767', 'c768', 'c769', 'c770', 'c771', 'c772', 'c773', 'c774', 'c775', 'c776', 'c777', 'c778', 'c779', 'c780', 'c781', 'c782', 'c783', 'c784', 'c785', 'c786', 'c787', 'c788', 'c789', 'c790', 'c791', 'c792', 'c793', 'c794', 'c795', 'c796', 'c797', 'c798', 'c799', 'c800', 'c801', 'c802', 'c803', 'c804', 'c805', 'c806', 'c807', 'c808', 'c809', 'c810', 'c811', 'c812', 'c813', 'c814', 'c815', 'c816', 'c817', 'c818', 'c819', 'c820', 'c821', 'c822', 'c823', 'c824', 'c825', 'c826', 'c827', 'c828', 'c829', 'c830', 'c831', 'c832', 'c833', 'c834', 'c835', 'c836', 'c837', 'c838', 'c839', 'c840', 'c841', 'c842', 'c843', 'c844', 'c845', 'c846', 'c847', 'c848', 'c849', 'c850', 'c851', 'c852', 'c853', 'c854', 'c855', 'c856', 'c857', 'c858', 'c859', 'c860', 'c861', 'c862', 'c863', 'c864', 'c865', 'c866', 'c867', 'c868', 'c869', 'c870', 'c871', 'c872', 'c873', 'c874', 'c875', 'c876', 'c877', 'c878', 'c879', 'c880', 'c881', 'c882', 'c883', 'c884', 'c885', 'c886', 'c887', 'c888', 'c889', 'c890', 'c891', 'c892', 'c893', 'c894', 'c895', 'c896', 'c897', 'c898', 'c899', 'c900', 'c901', 'c902', 'c903', 'c904', 'c905', 'c906', 'c907', 'c908', 'c909', 'c910', 'c911', 'c912', 'c913', 'c914', 'c915', 'c916', 'c917', 'c918', 'c919', 'c920', 'c921', 'c922', 'c923', 'c924', 'c925', 'c926', 'c927', 'c928', 'c929', 'c930', 'c931', 'c932', 'c933', 'c934', 'c935', 'c936', 'c937', 'c938', 'c939', 'c940', 'c941', 'c942', 'c943', 'c944', 'c945', 'c946', 'c947', 'c948', 'c949', 'c950', 'c951', 'c952', 'c953', 'c954', 'c955', 'c956', 'c957', 'c958', 'c959', 'c960', 'c961', 'c962', 'c963', 'c964', 'c965', 'c966', 'c967', 'c968', 'c969', 'c970', 'c971', 'c972', 'c973', 'c974', 'c975', 'c976', 'c977', 'c978', 'c979', 'c980', 'c981', 'c982', 'c983', 'c984', 'c985', 'c986', 'c987', 'c988', 'c989', 'c990', 'c991', 'c992', 'c993', 'c994', 'c995', 'c996', 'c997', 'c998', 'c999', 'c1000', 'c1001', 'c1002', 'c1003', 'c1004', 'c1005', 'c1006', 'c1007', 'c1008', 'c1009', 'c1010', 'c1011', 'c1012', 'c1013', 'c1014', 'c1015', 'c1016', 'c1017', 'c1018', 'c1019', 'c1020', 'c1021', 'c1022', 'c1023', 'c1024', 'c1025', 'c1026', 'c1027', 'c1028', 'c1029', 'c1030', 'c1031', 'c1032', 'c1033', 'c1034', 'c1035', 'c1036', 'c1037', 'c1038', 'c1039', 'c1040', 'c1041', 'c1042', 'c1043', 'c1044', 'c1045', 'c1046', 'c1047', 'c1048', 'c1049', 'c1050', 'c1051', 'c1052', 'c1053', 'c1054', 'c1055', 'c1056', 'c1057', 'c1058', 'c1059', 'c1060', 'c1061', 'c1062', 'c1063', 'c1064', 'c1065', 'c1066', 'c1067', 'c1068', 'c1069', 'c1070', 'c1071', 'c1072', 'c1073', 'c1074', 'c1075', 'c1076', 'c1077', 'c1078', 'c1079', 'c1080', 'c1081', 'c1082', 'c1083', 'c1084', 'c1085', 'c1086', 'c1087', 'c1088', 'c1089', 'c1090', 'c1091', 'c1092', 'c1093', 'c1094', 'c1095', 'c1096', 'c1097', 'c1098', 'c1099', 'c1100', 'c1101', 'c1102', 'c1103', 'c1104', 'c1105', 'c1106', 'c1107', 'c1108', 'c1109', 'c1110', 'c1111', 'c1112', 'c1113', 'c1114', 'c1115', 'c1116', 'c1117', 'c1118', 'c1119', 'c1120', 'c1121', 'c1122', 'c1123', 'c1124', 'c1125', 'c1126', 'c1127', 'c1128', 'c1129', 'c1130', 'c1131', 'c1132', 'c1133', 'c1134', 'c1135', 'c1136', 'c1137', 'c1138', 'c1139', 'c1140', 'c1141', 'c1142', 'c1143', 'c1144', 'c1145', 'c1146', 'c1147', 'c1148', 'c1149', 'c1150', 'c1151', 'c1152', 'c1153', 'c1154', 'c1155', 'c1156', 'c1157', 'c1158', 'c1159', 'c1160', 'c1161', 'c1162', 'c1163', 'c1164', 'c1165', 'c1166', 'c1167', 'c1168', 'c1169', 'c1170', 'c1171', 'c1172', 'c1173', 'c1174', 'c1175', 'c1176', 'c1177', 'c1178', 'c1179', 'c1180', 'c1181', 'c1182', 'c1183', 'c1184', 'c1185', 'c1186', 'c1187', 'c1188', 'c1189', 'c1190', 'c1191', 'c1192', 'c1193', 'c1194', 'c1195', 'c1196', 'c1197', 'c1198', 'c1199', 'c1200', 'c1201', 'c1202', 'c1203', 'c1204', 'c1205', 'c1206', 'c1207', 'c1208', 'c1209', 'c1210', 'c1211', 'c1212', 'c1213', 'c1214', 'c1215', 'c1216', 'c1217', 'c1218', 'c1219', 'c1220', 'c1221', 'c1222', 'c1223', 'c1224', 'c1225', 'c1226', 'c1227', 'c1228', 'c1229', 'c1230', 'c1231', 'c1232', 'c1233', 'c1234', 'c1235', 'c1236', 'c1237', 'c1238', 'c1239', 'c1240', 'c1241', 'c1242', 'c1243', 'c1244', 'c1245', 'c1246', 'c1247', 'c1248', 'c1249', 'c1250', 'c1251', 'c1252', 'c1253', 'c1254', 'c1255', 'c1256', 'c1257', 'c1258', 'c1259', 'c1260', 'c1261', 'c1262', 'c1263', 'c1264', 'c1265', 'c1266', 'c1267', 'c1268', 'c1269', 'c1270', 'c1271', 'c1272', 'c1273', 'c1274', 'c1275', 'c1276', 'c1277', 'c1278', 'c1279', 'c1280', 'c1281', 'c1282', 'c1283', 'c1284', 'c1285', 'c1286', 'c1287', 'c1288', 'c1289', 'c1290', 'c1291', 'c1292', 'c1293', 'c1294', 'c1295', 'c1296', 'c1297', 'c1298', 'c1299', 'c1300', 'c1301', 'c1302', 'c1303', 'c1304', 'c1305', 'c1306', 'c1307', 'c1308', 'c1309', 'c1310', 'c1311', 'c1312', 'c1313', 'c1314', 'c1315', 'c1316', 'c1317', 'c1318', 'c1319', 'c1320', 'c1321', 'c1322', 'c1323', 'c1324', 'c1325', 'c1326', 'c1327', 'c1328', 'c1329', 'c1330', 'c1331', 'c1332', 'c1333', 'c1334', 'c1335', 'c1336', 'c1337', 'c1338', 'c1339', 'c1340', 'c1341', 'c1342', 'c1343', 'c1344', 'c1345', 'c1346', 'c1347', 'c1348', 'c1349', 'c1350', 'c1351', 'c1352', 'c1353', 'c1354', 'c1355', 'c1356', 'c1357', 'c1358', 'c1359', 'c1360', 'c1361', 'c1362', 'c1363', 'c1364', 'c1365', 'c1366', 'c1367', 'c1368', 'c1369', 'c1370', 'c1371', 'c1372', 'c1373', 'c1374', 'c1375', 'c1376', 'c1377', 'c1378', 'c1379', 'c1380', 'c1381', 'c1382', 'c1383', 'c1384', 'c1385', 'c1386', 'c1387', 'c1388', 'c1389', 'c1390', 'c1391', 'c1392', 'c1393', 'c1394', 'c1395', 'c1396', 'c1397', 'c1398', 'c1399', 'c1400', 'c1401', 'c1402', 'c1403', 'c1404', 'c1405', 'c1406', 'c1407', 'c1408', 'c1409', 'c1410', 'c1411', 'c1412', 'c1413', 'c1414', 'c1415', 'c1416', 'c1417', 'c1418', 'c1419', 'c1420', 'c1421', 'c1422', 'c1423', 'c1424', 'c1425', 'c1426', 'c1427', 'c1428', 'c1429', 'c1430', 'c1431', 'c1432', 'c1433', 'c1434', 'c1435', 'c1436', 'c1437', 'c1438', 'c1439', 'c1440', 'c1441', 'c1442', 'c1443', 'c1444', 'c1445', 'c1446', 'c1447', 'c1448', 'c1449', 'c1450', 'c1451', 'c1452', 'c1453', 'c1454', 'c1455', 'c1456', 'c1457', 'c1458', 'c1459', 'c1460', 'c1461', 'c1462', 'c1463', 'c1464', 'c1465', 'c1466', 'c1467', 'c1468', 'c1469', 'c1470', 'c1471', 'c1472', 'c1473', 'c1474', 'c1475', 'c1476', 'c1477', 'c1478', 'c1479', 'c1480', 'c1481', 'c1482', 'c1483', 'c1484', 'c1485', 'c1486', 'c1487', 'c1488', 'c1489', 'c1490', 'c1491', 'c1492', 'c1493', 'c1494', 'c1495', 'c1496', 'c1497', 'c1498', 'c1499', 'c1500', 'c1501', 'c1502', 'c1503', 'c1504', 'c1505', 'c1506', 'c1507', 'c1508', 'c1509', 'c1510', 'c1511', 'c1512', 'c1513', 'c1514', 'c1515', 'c1516', 'c1517', 'c1518', 'c1519', 'c1520', 'c1521', 'c1522', 'c1523', 'c1524', 'c1525', 'c1526', 'c1527', 'c1528', 'c1529', 'c1530', 'c1531', 'c1532', 'c1533', 'c1534', 'c1535', 'c1536', 'c1537', 'c1538', 'c1539', 'c1540', 'c1541', 'c1542', 'c1543', 'c1544', 'c1545', 'c1546', 'c1547', 'c1548', 'c1549', 'c1550', 'c1551', 'c1552', 'c1553', 'c1554', 'c1555', 'c1556', 'c1557', 'c1558', 'c1559', 'c1560', 'c1561', 'c1562', 'c1563', 'c1564', 'c1565', 'c1566', 'c1567', 'c1568', 'c1569', 'c1570', 'c1571', 'c1572', 'c1573', 'c1574', 'c1575', 'c1576', 'c1577', 'c1578', 'c1579', 'c1580', 'c1581', 'c1582', 'c1583', 'c1584', 'c1585', 'c1586', 'c1587', 'c1588', 'c1589', 'c1590', 'c1591', 'c1592', 'c1593', 'c1594', 'c1595', 'c1596', 'c1597', 'c1598', 'c1599', 'c1600', 'c1601', 'c1602', 'c1603', 'c1604', 'c1605', 'c1606', 'c1607', 'c1608', 'c1609', 'c1610', 'c1611', 'c1612', 'c1613', 'c1614', 'c1615', 'c1616', 'c1617', 'c1618', 'c1619', 'c1620', 'c1621', 'c1622', 'c1623', 'c1624', 'c1625', 'c1626', 'c1627', 'c1628', 'c1629', 'c1630', 'c1631', 'c1632', 'c1633', 'c1634', 'c1635', 'c1636', 'c1637', 'c1638', 'c1639', 'c1640', 'c1641', 'c1642', 'c1643', 'c1644', 'c1645', 'c1646', 'c1647', 'c1648', 'c1649', 'c1650', 'c1651', 'c1652', 'c1653', 'c1654', 'c1655', 'c1656', 'c1657', 'c1658', 'c1659', 'c1660', 'c1661', 'c1662', 'c1663', 'c1664', 'c1665', 'c1666', 'c1667', 'c1668', 'c1669', 'c1670', 'c1671', 'c1672', 'c1673', 'c1674', 'c1675', 'c1676', 'c1677', 'c1678', 'c1679', 'c1680', 'c1681', 'c1682', 'c1683', 'c1684', 'c1685', 'c1686', 'c1687', 'c1688', 'c1689', 'c1690', 'c1691', 'c1692', 'c1693', 'c1694', 'c1695', 'c1696', 'c1697', 'c1698', 'c1699', 'c1700', 'c1701', 'c1702', 'c1703', 'c1704', 'c1705', 'c1706', 'c1707', 'c1708', 'c1709', 'c1710', 'c1711', 'c1712', 'c1713', 'c1714', 'c1715', 'c1716', 'c1717', 'c1718', 'c1719', 'c1720', 'c1721', 'c1722', 'c1723', 'c1724', 'c1725', 'c1726', 'c1727', 'c1728', 'c1729', 'c1730', 'c1731', 'c1732', 'c1733', 'c1734', 'c1735', 'c1736', 'c1737', 'c1738', 'c1739', 'c1740', 'c1741', 'c1742', 'c1743', 'c1744', 'c1745', 'c1746', 'c1747', 'c1748', 'c1749', 'c1750', 'c1751', 'c1752', 'c1753', 'c1754', 'c1755', 'c1756', 'c1757', 'c1758', 'c1759', 'c1760', 'c1761', 'c1762', 'c1763', 'c1764', 'c1765', 'c1766', 'c1767', 'c1768', 'c1769', 'c1770', 'c1771', 'c1772', 'c1773', 'c1774', 'c1775', 'c1776', 'c1777', 'c1778', 'c1779', 'c1780', 'c1781', 'c1782', 'c1783', 'c1784', 'c1785', 'c1786', 'c1787', 'c1788', 'c1789', 'c1790', 'c1791', 'c1792', 'c1793', 'c1794', 'c1795', 'c1796', 'c1797', 'c1798', 'c1799', 'c1800', 'c1801', 'c1802', 'c1803', 'c1804', 'c1805', 'c1806', 'c1807', 'c1808', 'c1809', 'c1810', 'c1811', 'c1812', 'c1813', 'c1814', 'c1815', 'c1816', 'c1817', 'c1818', 'c1819', 'c1820', 'c1821', 'c1822', 'c1823', 'c1824', 'c1825', 'c1826', 'c1827', 'c1828', 'c1829', 'c1830', 'c1831', 'c1832', 'c1833', 'c1834', 'c1835', 'c1836', 'c1837', 'c1838', 'c1839', 'c1840', 'c1841', 'c1842', 'c1843', 'c1844', 'c1845', 'c1846', 'c1847', 'c1848', 'c1849', 'c1850', 'c1851', 'c1852', 'c1853', 'c1854', 'c1855', 'c1856', 'c1857', 'c1858', 'c1859', 'c1860', 'c1861', 'c1862', 'c1863', 'c1864', 'c1865', 'c1866', 'c1867', 'c1868', 'c1869', 'c1870', 'c1871', 'c1872', 'c1873', 'c1874', 'c1875', 'c1876', 'c1877', 'c1878', 'c1879', 'c1880', 'c1881', 'c1882', 'c1883', 'c1884', 'c1885', 'c1886', 'c1887', 'c1888', 'c1889', 'c1890', 'c1891', 'c1892', 'c1893', 'c1894', 'c1895', 'c1896', 'c1897', 'c1898', 'c1899', 'c1900', 'c1901', 'c1902', 'c1903', 'c1904', 'c1905', 'c1906', 'c1907', 'c1908', 'c1909', 'c1910', 'c1911', 'c1912', 'c1913', 'c1914', 'c1915', 'c1916', 'c1917', 'c1918', 'c1919', 'c1920', 'c1921', 'c1922', 'c1923', 'c1924', 'c1925', 'c1926', 'c1927', 'c1928', 'c1929', 'c1930', 'c1931', 'c1932', 'c1933', 'c1934', 'c1935', 'c1936', 'c1937', 'c1938', 'c1939', 'c1940', 'c1941', 'c1942', 'c1943', 'c1944', 'c1945', 'c1946', 'c1947', 'c1948', 'c1949', 'c1950', 'c1951', 'c1952', 'c1953', 'c1954', 'c1955', 'c1956', 'c1957', 'c1958', 'c1959', 'c1960', 'c1961', 'c1962', 'c1963', 'c1964', 'c1965', 'c1966', 'c1967', 'c1968', 'c1969', 'c1970', 'c1971', 'c1972', 'c1973', 'c1974', 'c1975', 'c1976', 'c1977', 'c1978', 'c1979', 'c1980', 'c1981', 'c1982', 'c1983', 'c1984', 'c1985', 'c1986', 'c1987', 'c1988', 'c1989', 'c1990', 'c1991', 'c1992', 'c1993', 'c1994', 'c1995', 'c1996', 'c1997', 'c1998', 'c1999', 'c2000']
<column_types>
{'Unnamed: 0': 'object', 'c1': 'int64', 'c2': 'int64', 'c3': 'int64', 'c4': 'int64', 'c5': 'int64', 'c6': 'int64', 'c7': 'int64', 'c8': 'int64', 'c9': 'int64', 'c10': 'int64', 'c11': 'int64', 'c12': 'int64', 'c13': 'int64', 'c14': 'int64', 'c15': 'int64', 'c16': 'int64', 'c17': 'int64', 'c18': 'int64', 'c19': 'int64', 'c20': 'int64', 'c21': 'int64', 'c22': 'int64', 'c23': 'int64', 'c24': 'int64', 'c25': 'int64', 'c26': 'int64', 'c27': 'int64', 'c28': 'int64', 'c29': 'int64', 'c30': 'int64', 'c31': 'int64', 'c32': 'int64', 'c33': 'int64', 'c34': 'int64', 'c35': 'int64', 'c36': 'int64', 'c37': 'int64', 'c38': 'int64', 'c39': 'int64', 'c40': 'int64', 'c41': 'int64', 'c42': 'int64', 'c43': 'int64', 'c44': 'int64', 'c45': 'int64', 'c46': 'int64', 'c47': 'int64', 'c48': 'int64', 'c49': 'int64', 'c50': 'int64', 'c51': 'int64', 'c52': 'int64', 'c53': 'int64', 'c54': 'int64', 'c55': 'int64', 'c56': 'int64', 'c57': 'int64', 'c58': 'int64', 'c59': 'int64', 'c60': 'int64', 'c61': 'int64', 'c62': 'int64', 'c63': 'int64', 'c64': 'int64', 'c65': 'int64', 'c66': 'int64', 'c67': 'int64', 'c68': 'int64', 'c69': 'int64', 'c70': 'int64', 'c71': 'int64', 'c72': 'int64', 'c73': 'int64', 'c74': 'int64', 'c75': 'int64', 'c76': 'int64', 'c77': 'int64', 'c78': 'int64', 'c79': 'int64', 'c80': 'int64', 'c81': 'int64', 'c82': 'int64', 'c83': 'int64', 'c84': 'int64', 'c85': 'int64', 'c86': 'int64', 'c87': 'int64', 'c88': 'int64', 'c89': 'int64', 'c90': 'int64', 'c91': 'int64', 'c92': 'int64', 'c93': 'int64', 'c94': 'int64', 'c95': 'int64', 'c96': 'int64', 'c97': 'int64', 'c98': 'int64', 'c99': 'int64', 'c100': 'int64', 'c101': 'int64', 'c102': 'int64', 'c103': 'int64', 'c104': 'int64', 'c105': 'int64', 'c106': 'int64', 'c107': 'int64', 'c108': 'int64', 'c109': 'int64', 'c110': 'int64', 'c111': 'int64', 'c112': 'int64', 'c113': 'int64', 'c114': 'int64', 'c115': 'int64', 'c116': 'int64', 'c117': 'int64', 'c118': 'int64', 'c119': 'int64', 'c120': 'int64', 'c121': 'int64', 'c122': 'int64', 'c123': 'int64', 'c124': 'int64', 'c125': 'int64', 'c126': 'int64', 'c127': 'int64', 'c128': 'int64', 'c129': 'int64', 'c130': 'int64', 'c131': 'int64', 'c132': 'int64', 'c133': 'int64', 'c134': 'int64', 'c135': 'int64', 'c136': 'int64', 'c137': 'int64', 'c138': 'int64', 'c139': 'int64', 'c140': 'int64', 'c141': 'int64', 'c142': 'int64', 'c143': 'int64', 'c144': 'int64', 'c145': 'int64', 'c146': 'int64', 'c147': 'int64', 'c148': 'int64', 'c149': 'int64', 'c150': 'int64', 'c151': 'int64', 'c152': 'int64', 'c153': 'int64', 'c154': 'int64', 'c155': 'int64', 'c156': 'int64', 'c157': 'int64', 'c158': 'int64', 'c159': 'int64', 'c160': 'int64', 'c161': 'int64', 'c162': 'int64', 'c163': 'int64', 'c164': 'int64', 'c165': 'int64', 'c166': 'int64', 'c167': 'int64', 'c168': 'int64', 'c169': 'int64', 'c170': 'int64', 'c171': 'int64', 'c172': 'int64', 'c173': 'int64', 'c174': 'int64', 'c175': 'int64', 'c176': 'int64', 'c177': 'int64', 'c178': 'int64', 'c179': 'int64', 'c180': 'int64', 'c181': 'int64', 'c182': 'int64', 'c183': 'int64', 'c184': 'int64', 'c185': 'int64', 'c186': 'int64', 'c187': 'int64', 'c188': 'int64', 'c189': 'int64', 'c190': 'int64', 'c191': 'int64', 'c192': 'int64', 'c193': 'int64', 'c194': 'int64', 'c195': 'int64', 'c196': 'int64', 'c197': 'int64', 'c198': 'int64', 'c199': 'int64', 'c200': 'int64', 'c201': 'int64', 'c202': 'int64', 'c203': 'int64', 'c204': 'int64', 'c205': 'int64', 'c206': 'int64', 'c207': 'int64', 'c208': 'int64', 'c209': 'int64', 'c210': 'int64', 'c211': 'int64', 'c212': 'int64', 'c213': 'int64', 'c214': 'int64', 'c215': 'int64', 'c216': 'int64', 'c217': 'int64', 'c218': 'int64', 'c219': 'int64', 'c220': 'int64', 'c221': 'int64', 'c222': 'int64', 'c223': 'int64', 'c224': 'int64', 'c225': 'int64', 'c226': 'int64', 'c227': 'int64', 'c228': 'int64', 'c229': 'int64', 'c230': 'int64', 'c231': 'int64', 'c232': 'int64', 'c233': 'int64', 'c234': 'int64', 'c235': 'int64', 'c236': 'int64', 'c237': 'int64', 'c238': 'int64', 'c239': 'int64', 'c240': 'int64', 'c241': 'int64', 'c242': 'int64', 'c243': 'int64', 'c244': 'int64', 'c245': 'int64', 'c246': 'int64', 'c247': 'int64', 'c248': 'int64', 'c249': 'int64', 'c250': 'int64', 'c251': 'int64', 'c252': 'int64', 'c253': 'int64', 'c254': 'int64', 'c255': 'int64', 'c256': 'int64', 'c257': 'int64', 'c258': 'int64', 'c259': 'int64', 'c260': 'int64', 'c261': 'int64', 'c262': 'int64', 'c263': 'int64', 'c264': 'int64', 'c265': 'int64', 'c266': 'int64', 'c267': 'int64', 'c268': 'int64', 'c269': 'int64', 'c270': 'int64', 'c271': 'int64', 'c272': 'int64', 'c273': 'int64', 'c274': 'int64', 'c275': 'int64', 'c276': 'int64', 'c277': 'int64', 'c278': 'int64', 'c279': 'int64', 'c280': 'int64', 'c281': 'int64', 'c282': 'int64', 'c283': 'int64', 'c284': 'int64', 'c285': 'int64', 'c286': 'int64', 'c287': 'int64', 'c288': 'int64', 'c289': 'int64', 'c290': 'int64', 'c291': 'int64', 'c292': 'int64', 'c293': 'int64', 'c294': 'int64', 'c295': 'int64', 'c296': 'int64', 'c297': 'int64', 'c298': 'int64', 'c299': 'int64', 'c300': 'int64', 'c301': 'int64', 'c302': 'int64', 'c303': 'int64', 'c304': 'int64', 'c305': 'int64', 'c306': 'int64', 'c307': 'int64', 'c308': 'int64', 'c309': 'int64', 'c310': 'int64', 'c311': 'int64', 'c312': 'int64', 'c313': 'int64', 'c314': 'int64', 'c315': 'int64', 'c316': 'int64', 'c317': 'int64', 'c318': 'int64', 'c319': 'int64', 'c320': 'int64', 'c321': 'int64', 'c322': 'int64', 'c323': 'int64', 'c324': 'int64', 'c325': 'int64', 'c326': 'int64', 'c327': 'int64', 'c328': 'int64', 'c329': 'int64', 'c330': 'int64', 'c331': 'int64', 'c332': 'int64', 'c333': 'int64', 'c334': 'int64', 'c335': 'int64', 'c336': 'int64', 'c337': 'int64', 'c338': 'int64', 'c339': 'int64', 'c340': 'int64', 'c341': 'int64', 'c342': 'int64', 'c343': 'int64', 'c344': 'int64', 'c345': 'int64', 'c346': 'int64', 'c347': 'int64', 'c348': 'int64', 'c349': 'int64', 'c350': 'int64', 'c351': 'int64', 'c352': 'int64', 'c353': 'int64', 'c354': 'int64', 'c355': 'int64', 'c356': 'int64', 'c357': 'int64', 'c358': 'int64', 'c359': 'int64', 'c360': 'int64', 'c361': 'int64', 'c362': 'int64', 'c363': 'int64', 'c364': 'int64', 'c365': 'int64', 'c366': 'int64', 'c367': 'int64', 'c368': 'int64', 'c369': 'int64', 'c370': 'int64', 'c371': 'int64', 'c372': 'int64', 'c373': 'int64', 'c374': 'int64', 'c375': 'int64', 'c376': 'int64', 'c377': 'int64', 'c378': 'int64', 'c379': 'int64', 'c380': 'int64', 'c381': 'int64', 'c382': 'int64', 'c383': 'int64', 'c384': 'int64', 'c385': 'int64', 'c386': 'int64', 'c387': 'int64', 'c388': 'int64', 'c389': 'int64', 'c390': 'int64', 'c391': 'int64', 'c392': 'int64', 'c393': 'int64', 'c394': 'int64', 'c395': 'int64', 'c396': 'int64', 'c397': 'int64', 'c398': 'int64', 'c399': 'int64', 'c400': 'int64', 'c401': 'int64', 'c402': 'int64', 'c403': 'int64', 'c404': 'int64', 'c405': 'int64', 'c406': 'int64', 'c407': 'int64', 'c408': 'int64', 'c409': 'int64', 'c410': 'int64', 'c411': 'int64', 'c412': 'int64', 'c413': 'int64', 'c414': 'int64', 'c415': 'int64', 'c416': 'int64', 'c417': 'int64', 'c418': 'int64', 'c419': 'int64', 'c420': 'int64', 'c421': 'int64', 'c422': 'int64', 'c423': 'int64', 'c424': 'int64', 'c425': 'int64', 'c426': 'int64', 'c427': 'int64', 'c428': 'int64', 'c429': 'int64', 'c430': 'int64', 'c431': 'int64', 'c432': 'int64', 'c433': 'int64', 'c434': 'int64', 'c435': 'int64', 'c436': 'int64', 'c437': 'int64', 'c438': 'int64', 'c439': 'int64', 'c440': 'int64', 'c441': 'int64', 'c442': 'int64', 'c443': 'int64', 'c444': 'int64', 'c445': 'int64', 'c446': 'int64', 'c447': 'int64', 'c448': 'int64', 'c449': 'int64', 'c450': 'int64', 'c451': 'int64', 'c452': 'int64', 'c453': 'int64', 'c454': 'int64', 'c455': 'int64', 'c456': 'int64', 'c457': 'int64', 'c458': 'int64', 'c459': 'int64', 'c460': 'int64', 'c461': 'int64', 'c462': 'int64', 'c463': 'int64', 'c464': 'int64', 'c465': 'int64', 'c466': 'int64', 'c467': 'int64', 'c468': 'int64', 'c469': 'int64', 'c470': 'int64', 'c471': 'int64', 'c472': 'int64', 'c473': 'int64', 'c474': 'int64', 'c475': 'int64', 'c476': 'int64', 'c477': 'int64', 'c478': 'int64', 'c479': 'int64', 'c480': 'int64', 'c481': 'int64', 'c482': 'int64', 'c483': 'int64', 'c484': 'int64', 'c485': 'int64', 'c486': 'int64', 'c487': 'int64', 'c488': 'int64', 'c489': 'int64', 'c490': 'int64', 'c491': 'int64', 'c492': 'int64', 'c493': 'int64', 'c494': 'int64', 'c495': 'int64', 'c496': 'int64', 'c497': 'int64', 'c498': 'int64', 'c499': 'int64', 'c500': 'int64', 'c501': 'int64', 'c502': 'int64', 'c503': 'int64', 'c504': 'int64', 'c505': 'int64', 'c506': 'int64', 'c507': 'int64', 'c508': 'int64', 'c509': 'int64', 'c510': 'int64', 'c511': 'int64', 'c512': 'int64', 'c513': 'int64', 'c514': 'int64', 'c515': 'int64', 'c516': 'int64', 'c517': 'int64', 'c518': 'int64', 'c519': 'int64', 'c520': 'int64', 'c521': 'int64', 'c522': 'int64', 'c523': 'int64', 'c524': 'int64', 'c525': 'int64', 'c526': 'int64', 'c527': 'int64', 'c528': 'int64', 'c529': 'int64', 'c530': 'int64', 'c531': 'int64', 'c532': 'int64', 'c533': 'int64', 'c534': 'int64', 'c535': 'int64', 'c536': 'int64', 'c537': 'int64', 'c538': 'int64', 'c539': 'int64', 'c540': 'int64', 'c541': 'int64', 'c542': 'int64', 'c543': 'int64', 'c544': 'int64', 'c545': 'int64', 'c546': 'int64', 'c547': 'int64', 'c548': 'int64', 'c549': 'int64', 'c550': 'int64', 'c551': 'int64', 'c552': 'int64', 'c553': 'int64', 'c554': 'int64', 'c555': 'int64', 'c556': 'int64', 'c557': 'int64', 'c558': 'int64', 'c559': 'int64', 'c560': 'int64', 'c561': 'int64', 'c562': 'int64', 'c563': 'int64', 'c564': 'int64', 'c565': 'int64', 'c566': 'int64', 'c567': 'int64', 'c568': 'int64', 'c569': 'int64', 'c570': 'int64', 'c571': 'int64', 'c572': 'int64', 'c573': 'int64', 'c574': 'int64', 'c575': 'int64', 'c576': 'int64', 'c577': 'int64', 'c578': 'int64', 'c579': 'int64', 'c580': 'int64', 'c581': 'int64', 'c582': 'int64', 'c583': 'int64', 'c584': 'int64', 'c585': 'int64', 'c586': 'int64', 'c587': 'int64', 'c588': 'int64', 'c589': 'int64', 'c590': 'int64', 'c591': 'int64', 'c592': 'int64', 'c593': 'int64', 'c594': 'int64', 'c595': 'int64', 'c596': 'int64', 'c597': 'int64', 'c598': 'int64', 'c599': 'int64', 'c600': 'int64', 'c601': 'int64', 'c602': 'int64', 'c603': 'int64', 'c604': 'int64', 'c605': 'int64', 'c606': 'int64', 'c607': 'int64', 'c608': 'int64', 'c609': 'int64', 'c610': 'int64', 'c611': 'int64', 'c612': 'int64', 'c613': 'int64', 'c614': 'int64', 'c615': 'int64', 'c616': 'int64', 'c617': 'int64', 'c618': 'int64', 'c619': 'int64', 'c620': 'int64', 'c621': 'int64', 'c622': 'int64', 'c623': 'int64', 'c624': 'int64', 'c625': 'int64', 'c626': 'int64', 'c627': 'int64', 'c628': 'int64', 'c629': 'int64', 'c630': 'int64', 'c631': 'int64', 'c632': 'int64', 'c633': 'int64', 'c634': 'int64', 'c635': 'int64', 'c636': 'int64', 'c637': 'int64', 'c638': 'int64', 'c639': 'int64', 'c640': 'int64', 'c641': 'int64', 'c642': 'int64', 'c643': 'int64', 'c644': 'int64', 'c645': 'int64', 'c646': 'int64', 'c647': 'int64', 'c648': 'int64', 'c649': 'int64', 'c650': 'int64', 'c651': 'int64', 'c652': 'int64', 'c653': 'int64', 'c654': 'int64', 'c655': 'int64', 'c656': 'int64', 'c657': 'int64', 'c658': 'int64', 'c659': 'int64', 'c660': 'int64', 'c661': 'int64', 'c662': 'int64', 'c663': 'int64', 'c664': 'int64', 'c665': 'int64', 'c666': 'int64', 'c667': 'int64', 'c668': 'int64', 'c669': 'int64', 'c670': 'int64', 'c671': 'int64', 'c672': 'int64', 'c673': 'int64', 'c674': 'int64', 'c675': 'int64', 'c676': 'int64', 'c677': 'int64', 'c678': 'int64', 'c679': 'int64', 'c680': 'int64', 'c681': 'int64', 'c682': 'int64', 'c683': 'int64', 'c684': 'int64', 'c685': 'int64', 'c686': 'int64', 'c687': 'int64', 'c688': 'int64', 'c689': 'int64', 'c690': 'int64', 'c691': 'int64', 'c692': 'int64', 'c693': 'int64', 'c694': 'int64', 'c695': 'int64', 'c696': 'int64', 'c697': 'int64', 'c698': 'int64', 'c699': 'int64', 'c700': 'int64', 'c701': 'int64', 'c702': 'int64', 'c703': 'int64', 'c704': 'int64', 'c705': 'int64', 'c706': 'int64', 'c707': 'int64', 'c708': 'int64', 'c709': 'int64', 'c710': 'int64', 'c711': 'int64', 'c712': 'int64', 'c713': 'int64', 'c714': 'int64', 'c715': 'int64', 'c716': 'int64', 'c717': 'int64', 'c718': 'int64', 'c719': 'int64', 'c720': 'int64', 'c721': 'int64', 'c722': 'int64', 'c723': 'int64', 'c724': 'int64', 'c725': 'int64', 'c726': 'int64', 'c727': 'int64', 'c728': 'int64', 'c729': 'int64', 'c730': 'int64', 'c731': 'int64', 'c732': 'int64', 'c733': 'int64', 'c734': 'int64', 'c735': 'int64', 'c736': 'int64', 'c737': 'int64', 'c738': 'int64', 'c739': 'int64', 'c740': 'int64', 'c741': 'int64', 'c742': 'int64', 'c743': 'int64', 'c744': 'int64', 'c745': 'int64', 'c746': 'int64', 'c747': 'int64', 'c748': 'int64', 'c749': 'int64', 'c750': 'int64', 'c751': 'int64', 'c752': 'int64', 'c753': 'int64', 'c754': 'int64', 'c755': 'int64', 'c756': 'int64', 'c757': 'int64', 'c758': 'int64', 'c759': 'int64', 'c760': 'int64', 'c761': 'int64', 'c762': 'int64', 'c763': 'int64', 'c764': 'int64', 'c765': 'int64', 'c766': 'int64', 'c767': 'int64', 'c768': 'int64', 'c769': 'int64', 'c770': 'int64', 'c771': 'int64', 'c772': 'int64', 'c773': 'int64', 'c774': 'int64', 'c775': 'int64', 'c776': 'int64', 'c777': 'int64', 'c778': 'int64', 'c779': 'int64', 'c780': 'int64', 'c781': 'int64', 'c782': 'int64', 'c783': 'int64', 'c784': 'int64', 'c785': 'int64', 'c786': 'int64', 'c787': 'int64', 'c788': 'int64', 'c789': 'int64', 'c790': 'int64', 'c791': 'int64', 'c792': 'int64', 'c793': 'int64', 'c794': 'int64', 'c795': 'int64', 'c796': 'int64', 'c797': 'int64', 'c798': 'int64', 'c799': 'int64', 'c800': 'int64', 'c801': 'int64', 'c802': 'int64', 'c803': 'int64', 'c804': 'int64', 'c805': 'int64', 'c806': 'int64', 'c807': 'int64', 'c808': 'int64', 'c809': 'int64', 'c810': 'int64', 'c811': 'int64', 'c812': 'int64', 'c813': 'int64', 'c814': 'int64', 'c815': 'int64', 'c816': 'int64', 'c817': 'int64', 'c818': 'int64', 'c819': 'int64', 'c820': 'int64', 'c821': 'int64', 'c822': 'int64', 'c823': 'int64', 'c824': 'int64', 'c825': 'int64', 'c826': 'int64', 'c827': 'int64', 'c828': 'int64', 'c829': 'int64', 'c830': 'int64', 'c831': 'int64', 'c832': 'int64', 'c833': 'int64', 'c834': 'int64', 'c835': 'int64', 'c836': 'int64', 'c837': 'int64', 'c838': 'int64', 'c839': 'int64', 'c840': 'int64', 'c841': 'int64', 'c842': 'int64', 'c843': 'int64', 'c844': 'int64', 'c845': 'int64', 'c846': 'int64', 'c847': 'int64', 'c848': 'int64', 'c849': 'int64', 'c850': 'int64', 'c851': 'int64', 'c852': 'int64', 'c853': 'int64', 'c854': 'int64', 'c855': 'int64', 'c856': 'int64', 'c857': 'int64', 'c858': 'int64', 'c859': 'int64', 'c860': 'int64', 'c861': 'int64', 'c862': 'int64', 'c863': 'int64', 'c864': 'int64', 'c865': 'int64', 'c866': 'int64', 'c867': 'int64', 'c868': 'int64', 'c869': 'int64', 'c870': 'int64', 'c871': 'int64', 'c872': 'int64', 'c873': 'int64', 'c874': 'int64', 'c875': 'int64', 'c876': 'int64', 'c877': 'int64', 'c878': 'int64', 'c879': 'int64', 'c880': 'int64', 'c881': 'int64', 'c882': 'int64', 'c883': 'int64', 'c884': 'int64', 'c885': 'int64', 'c886': 'int64', 'c887': 'int64', 'c888': 'int64', 'c889': 'int64', 'c890': 'int64', 'c891': 'int64', 'c892': 'int64', 'c893': 'int64', 'c894': 'int64', 'c895': 'int64', 'c896': 'int64', 'c897': 'int64', 'c898': 'int64', 'c899': 'int64', 'c900': 'int64', 'c901': 'int64', 'c902': 'int64', 'c903': 'int64', 'c904': 'int64', 'c905': 'int64', 'c906': 'int64', 'c907': 'int64', 'c908': 'int64', 'c909': 'int64', 'c910': 'int64', 'c911': 'int64', 'c912': 'int64', 'c913': 'int64', 'c914': 'int64', 'c915': 'int64', 'c916': 'int64', 'c917': 'int64', 'c918': 'int64', 'c919': 'int64', 'c920': 'int64', 'c921': 'int64', 'c922': 'int64', 'c923': 'int64', 'c924': 'int64', 'c925': 'int64', 'c926': 'int64', 'c927': 'int64', 'c928': 'int64', 'c929': 'int64', 'c930': 'int64', 'c931': 'int64', 'c932': 'int64', 'c933': 'int64', 'c934': 'int64', 'c935': 'int64', 'c936': 'int64', 'c937': 'int64', 'c938': 'int64', 'c939': 'int64', 'c940': 'int64', 'c941': 'int64', 'c942': 'int64', 'c943': 'int64', 'c944': 'int64', 'c945': 'int64', 'c946': 'int64', 'c947': 'int64', 'c948': 'int64', 'c949': 'int64', 'c950': 'int64', 'c951': 'int64', 'c952': 'int64', 'c953': 'int64', 'c954': 'int64', 'c955': 'int64', 'c956': 'int64', 'c957': 'int64', 'c958': 'int64', 'c959': 'int64', 'c960': 'int64', 'c961': 'int64', 'c962': 'int64', 'c963': 'int64', 'c964': 'int64', 'c965': 'int64', 'c966': 'int64', 'c967': 'int64', 'c968': 'int64', 'c969': 'int64', 'c970': 'int64', 'c971': 'int64', 'c972': 'int64', 'c973': 'int64', 'c974': 'int64', 'c975': 'int64', 'c976': 'int64', 'c977': 'int64', 'c978': 'int64', 'c979': 'int64', 'c980': 'int64', 'c981': 'int64', 'c982': 'int64', 'c983': 'int64', 'c984': 'int64', 'c985': 'int64', 'c986': 'int64', 'c987': 'int64', 'c988': 'int64', 'c989': 'int64', 'c990': 'int64', 'c991': 'int64', 'c992': 'int64', 'c993': 'int64', 'c994': 'int64', 'c995': 'int64', 'c996': 'int64', 'c997': 'int64', 'c998': 'int64', 'c999': 'int64', 'c1000': 'int64', 'c1001': 'int64', 'c1002': 'int64', 'c1003': 'int64', 'c1004': 'int64', 'c1005': 'int64', 'c1006': 'int64', 'c1007': 'int64', 'c1008': 'int64', 'c1009': 'int64', 'c1010': 'int64', 'c1011': 'int64', 'c1012': 'int64', 'c1013': 'int64', 'c1014': 'int64', 'c1015': 'int64', 'c1016': 'int64', 'c1017': 'int64', 'c1018': 'int64', 'c1019': 'int64', 'c1020': 'int64', 'c1021': 'int64', 'c1022': 'int64', 'c1023': 'int64', 'c1024': 'int64', 'c1025': 'int64', 'c1026': 'int64', 'c1027': 'int64', 'c1028': 'int64', 'c1029': 'int64', 'c1030': 'int64', 'c1031': 'int64', 'c1032': 'int64', 'c1033': 'int64', 'c1034': 'int64', 'c1035': 'int64', 'c1036': 'int64', 'c1037': 'int64', 'c1038': 'int64', 'c1039': 'int64', 'c1040': 'int64', 'c1041': 'int64', 'c1042': 'int64', 'c1043': 'int64', 'c1044': 'int64', 'c1045': 'int64', 'c1046': 'int64', 'c1047': 'int64', 'c1048': 'int64', 'c1049': 'int64', 'c1050': 'int64', 'c1051': 'int64', 'c1052': 'int64', 'c1053': 'int64', 'c1054': 'int64', 'c1055': 'int64', 'c1056': 'int64', 'c1057': 'int64', 'c1058': 'int64', 'c1059': 'int64', 'c1060': 'int64', 'c1061': 'int64', 'c1062': 'int64', 'c1063': 'int64', 'c1064': 'int64', 'c1065': 'int64', 'c1066': 'int64', 'c1067': 'int64', 'c1068': 'int64', 'c1069': 'int64', 'c1070': 'int64', 'c1071': 'int64', 'c1072': 'int64', 'c1073': 'int64', 'c1074': 'int64', 'c1075': 'int64', 'c1076': 'int64', 'c1077': 'int64', 'c1078': 'int64', 'c1079': 'int64', 'c1080': 'int64', 'c1081': 'int64', 'c1082': 'int64', 'c1083': 'int64', 'c1084': 'int64', 'c1085': 'int64', 'c1086': 'int64', 'c1087': 'int64', 'c1088': 'int64', 'c1089': 'int64', 'c1090': 'int64', 'c1091': 'int64', 'c1092': 'int64', 'c1093': 'int64', 'c1094': 'int64', 'c1095': 'int64', 'c1096': 'int64', 'c1097': 'int64', 'c1098': 'int64', 'c1099': 'int64', 'c1100': 'int64', 'c1101': 'int64', 'c1102': 'int64', 'c1103': 'int64', 'c1104': 'int64', 'c1105': 'int64', 'c1106': 'int64', 'c1107': 'int64', 'c1108': 'int64', 'c1109': 'int64', 'c1110': 'int64', 'c1111': 'int64', 'c1112': 'int64', 'c1113': 'int64', 'c1114': 'int64', 'c1115': 'int64', 'c1116': 'int64', 'c1117': 'int64', 'c1118': 'int64', 'c1119': 'int64', 'c1120': 'int64', 'c1121': 'int64', 'c1122': 'int64', 'c1123': 'int64', 'c1124': 'int64', 'c1125': 'int64', 'c1126': 'int64', 'c1127': 'int64', 'c1128': 'int64', 'c1129': 'int64', 'c1130': 'int64', 'c1131': 'int64', 'c1132': 'int64', 'c1133': 'int64', 'c1134': 'int64', 'c1135': 'int64', 'c1136': 'int64', 'c1137': 'int64', 'c1138': 'int64', 'c1139': 'int64', 'c1140': 'int64', 'c1141': 'int64', 'c1142': 'int64', 'c1143': 'int64', 'c1144': 'int64', 'c1145': 'int64', 'c1146': 'int64', 'c1147': 'int64', 'c1148': 'int64', 'c1149': 'int64', 'c1150': 'int64', 'c1151': 'int64', 'c1152': 'int64', 'c1153': 'int64', 'c1154': 'int64', 'c1155': 'int64', 'c1156': 'int64', 'c1157': 'int64', 'c1158': 'int64', 'c1159': 'int64', 'c1160': 'int64', 'c1161': 'int64', 'c1162': 'int64', 'c1163': 'int64', 'c1164': 'int64', 'c1165': 'int64', 'c1166': 'int64', 'c1167': 'int64', 'c1168': 'int64', 'c1169': 'int64', 'c1170': 'int64', 'c1171': 'int64', 'c1172': 'int64', 'c1173': 'int64', 'c1174': 'int64', 'c1175': 'int64', 'c1176': 'int64', 'c1177': 'int64', 'c1178': 'int64', 'c1179': 'int64', 'c1180': 'int64', 'c1181': 'int64', 'c1182': 'int64', 'c1183': 'int64', 'c1184': 'int64', 'c1185': 'int64', 'c1186': 'int64', 'c1187': 'int64', 'c1188': 'int64', 'c1189': 'int64', 'c1190': 'int64', 'c1191': 'int64', 'c1192': 'int64', 'c1193': 'int64', 'c1194': 'int64', 'c1195': 'int64', 'c1196': 'int64', 'c1197': 'int64', 'c1198': 'int64', 'c1199': 'int64', 'c1200': 'int64', 'c1201': 'int64', 'c1202': 'int64', 'c1203': 'int64', 'c1204': 'int64', 'c1205': 'int64', 'c1206': 'int64', 'c1207': 'int64', 'c1208': 'int64', 'c1209': 'int64', 'c1210': 'int64', 'c1211': 'int64', 'c1212': 'int64', 'c1213': 'int64', 'c1214': 'int64', 'c1215': 'int64', 'c1216': 'int64', 'c1217': 'int64', 'c1218': 'int64', 'c1219': 'int64', 'c1220': 'int64', 'c1221': 'int64', 'c1222': 'int64', 'c1223': 'int64', 'c1224': 'int64', 'c1225': 'int64', 'c1226': 'int64', 'c1227': 'int64', 'c1228': 'int64', 'c1229': 'int64', 'c1230': 'int64', 'c1231': 'int64', 'c1232': 'int64', 'c1233': 'int64', 'c1234': 'int64', 'c1235': 'int64', 'c1236': 'int64', 'c1237': 'int64', 'c1238': 'int64', 'c1239': 'int64', 'c1240': 'int64', 'c1241': 'int64', 'c1242': 'int64', 'c1243': 'int64', 'c1244': 'int64', 'c1245': 'int64', 'c1246': 'int64', 'c1247': 'int64', 'c1248': 'int64', 'c1249': 'int64', 'c1250': 'int64', 'c1251': 'int64', 'c1252': 'int64', 'c1253': 'int64', 'c1254': 'int64', 'c1255': 'int64', 'c1256': 'int64', 'c1257': 'int64', 'c1258': 'int64', 'c1259': 'int64', 'c1260': 'int64', 'c1261': 'int64', 'c1262': 'int64', 'c1263': 'int64', 'c1264': 'int64', 'c1265': 'int64', 'c1266': 'int64', 'c1267': 'int64', 'c1268': 'int64', 'c1269': 'int64', 'c1270': 'int64', 'c1271': 'int64', 'c1272': 'int64', 'c1273': 'int64', 'c1274': 'int64', 'c1275': 'int64', 'c1276': 'int64', 'c1277': 'int64', 'c1278': 'int64', 'c1279': 'int64', 'c1280': 'int64', 'c1281': 'int64', 'c1282': 'int64', 'c1283': 'int64', 'c1284': 'int64', 'c1285': 'int64', 'c1286': 'int64', 'c1287': 'int64', 'c1288': 'int64', 'c1289': 'int64', 'c1290': 'int64', 'c1291': 'int64', 'c1292': 'int64', 'c1293': 'int64', 'c1294': 'int64', 'c1295': 'int64', 'c1296': 'int64', 'c1297': 'int64', 'c1298': 'int64', 'c1299': 'int64', 'c1300': 'int64', 'c1301': 'int64', 'c1302': 'int64', 'c1303': 'int64', 'c1304': 'int64', 'c1305': 'int64', 'c1306': 'int64', 'c1307': 'int64', 'c1308': 'int64', 'c1309': 'int64', 'c1310': 'int64', 'c1311': 'int64', 'c1312': 'int64', 'c1313': 'int64', 'c1314': 'int64', 'c1315': 'int64', 'c1316': 'int64', 'c1317': 'int64', 'c1318': 'int64', 'c1319': 'int64', 'c1320': 'int64', 'c1321': 'int64', 'c1322': 'int64', 'c1323': 'int64', 'c1324': 'int64', 'c1325': 'int64', 'c1326': 'int64', 'c1327': 'int64', 'c1328': 'int64', 'c1329': 'int64', 'c1330': 'int64', 'c1331': 'int64', 'c1332': 'int64', 'c1333': 'int64', 'c1334': 'int64', 'c1335': 'int64', 'c1336': 'int64', 'c1337': 'int64', 'c1338': 'int64', 'c1339': 'int64', 'c1340': 'int64', 'c1341': 'int64', 'c1342': 'int64', 'c1343': 'int64', 'c1344': 'int64', 'c1345': 'int64', 'c1346': 'int64', 'c1347': 'int64', 'c1348': 'int64', 'c1349': 'int64', 'c1350': 'int64', 'c1351': 'int64', 'c1352': 'int64', 'c1353': 'int64', 'c1354': 'int64', 'c1355': 'int64', 'c1356': 'int64', 'c1357': 'int64', 'c1358': 'int64', 'c1359': 'int64', 'c1360': 'int64', 'c1361': 'int64', 'c1362': 'int64', 'c1363': 'int64', 'c1364': 'int64', 'c1365': 'int64', 'c1366': 'int64', 'c1367': 'int64', 'c1368': 'int64', 'c1369': 'int64', 'c1370': 'int64', 'c1371': 'int64', 'c1372': 'int64', 'c1373': 'int64', 'c1374': 'int64', 'c1375': 'int64', 'c1376': 'int64', 'c1377': 'int64', 'c1378': 'int64', 'c1379': 'int64', 'c1380': 'int64', 'c1381': 'int64', 'c1382': 'int64', 'c1383': 'int64', 'c1384': 'int64', 'c1385': 'int64', 'c1386': 'int64', 'c1387': 'int64', 'c1388': 'int64', 'c1389': 'int64', 'c1390': 'int64', 'c1391': 'int64', 'c1392': 'int64', 'c1393': 'int64', 'c1394': 'int64', 'c1395': 'int64', 'c1396': 'int64', 'c1397': 'int64', 'c1398': 'int64', 'c1399': 'int64', 'c1400': 'int64', 'c1401': 'int64', 'c1402': 'int64', 'c1403': 'int64', 'c1404': 'int64', 'c1405': 'int64', 'c1406': 'int64', 'c1407': 'int64', 'c1408': 'int64', 'c1409': 'int64', 'c1410': 'int64', 'c1411': 'int64', 'c1412': 'int64', 'c1413': 'int64', 'c1414': 'int64', 'c1415': 'int64', 'c1416': 'int64', 'c1417': 'int64', 'c1418': 'int64', 'c1419': 'int64', 'c1420': 'int64', 'c1421': 'int64', 'c1422': 'int64', 'c1423': 'int64', 'c1424': 'int64', 'c1425': 'int64', 'c1426': 'int64', 'c1427': 'int64', 'c1428': 'int64', 'c1429': 'int64', 'c1430': 'int64', 'c1431': 'int64', 'c1432': 'int64', 'c1433': 'int64', 'c1434': 'int64', 'c1435': 'int64', 'c1436': 'int64', 'c1437': 'int64', 'c1438': 'int64', 'c1439': 'int64', 'c1440': 'int64', 'c1441': 'int64', 'c1442': 'int64', 'c1443': 'int64', 'c1444': 'int64', 'c1445': 'int64', 'c1446': 'int64', 'c1447': 'int64', 'c1448': 'int64', 'c1449': 'int64', 'c1450': 'int64', 'c1451': 'int64', 'c1452': 'int64', 'c1453': 'int64', 'c1454': 'int64', 'c1455': 'int64', 'c1456': 'int64', 'c1457': 'int64', 'c1458': 'int64', 'c1459': 'int64', 'c1460': 'int64', 'c1461': 'int64', 'c1462': 'int64', 'c1463': 'int64', 'c1464': 'int64', 'c1465': 'int64', 'c1466': 'int64', 'c1467': 'int64', 'c1468': 'int64', 'c1469': 'int64', 'c1470': 'int64', 'c1471': 'int64', 'c1472': 'int64', 'c1473': 'int64', 'c1474': 'int64', 'c1475': 'int64', 'c1476': 'int64', 'c1477': 'int64', 'c1478': 'int64', 'c1479': 'int64', 'c1480': 'int64', 'c1481': 'int64', 'c1482': 'int64', 'c1483': 'int64', 'c1484': 'int64', 'c1485': 'int64', 'c1486': 'int64', 'c1487': 'int64', 'c1488': 'int64', 'c1489': 'int64', 'c1490': 'int64', 'c1491': 'int64', 'c1492': 'int64', 'c1493': 'int64', 'c1494': 'int64', 'c1495': 'int64', 'c1496': 'int64', 'c1497': 'int64', 'c1498': 'int64', 'c1499': 'int64', 'c1500': 'int64', 'c1501': 'int64', 'c1502': 'int64', 'c1503': 'int64', 'c1504': 'int64', 'c1505': 'int64', 'c1506': 'int64', 'c1507': 'int64', 'c1508': 'int64', 'c1509': 'int64', 'c1510': 'int64', 'c1511': 'int64', 'c1512': 'int64', 'c1513': 'int64', 'c1514': 'int64', 'c1515': 'int64', 'c1516': 'int64', 'c1517': 'int64', 'c1518': 'int64', 'c1519': 'int64', 'c1520': 'int64', 'c1521': 'int64', 'c1522': 'int64', 'c1523': 'int64', 'c1524': 'int64', 'c1525': 'int64', 'c1526': 'int64', 'c1527': 'int64', 'c1528': 'int64', 'c1529': 'int64', 'c1530': 'int64', 'c1531': 'int64', 'c1532': 'int64', 'c1533': 'int64', 'c1534': 'int64', 'c1535': 'int64', 'c1536': 'int64', 'c1537': 'int64', 'c1538': 'int64', 'c1539': 'int64', 'c1540': 'int64', 'c1541': 'int64', 'c1542': 'int64', 'c1543': 'int64', 'c1544': 'int64', 'c1545': 'int64', 'c1546': 'int64', 'c1547': 'int64', 'c1548': 'int64', 'c1549': 'int64', 'c1550': 'int64', 'c1551': 'int64', 'c1552': 'int64', 'c1553': 'int64', 'c1554': 'int64', 'c1555': 'int64', 'c1556': 'int64', 'c1557': 'int64', 'c1558': 'int64', 'c1559': 'int64', 'c1560': 'int64', 'c1561': 'int64', 'c1562': 'int64', 'c1563': 'int64', 'c1564': 'int64', 'c1565': 'int64', 'c1566': 'int64', 'c1567': 'int64', 'c1568': 'int64', 'c1569': 'int64', 'c1570': 'int64', 'c1571': 'int64', 'c1572': 'int64', 'c1573': 'int64', 'c1574': 'int64', 'c1575': 'int64', 'c1576': 'int64', 'c1577': 'int64', 'c1578': 'int64', 'c1579': 'int64', 'c1580': 'int64', 'c1581': 'int64', 'c1582': 'int64', 'c1583': 'int64', 'c1584': 'int64', 'c1585': 'int64', 'c1586': 'int64', 'c1587': 'int64', 'c1588': 'int64', 'c1589': 'int64', 'c1590': 'int64', 'c1591': 'int64', 'c1592': 'int64', 'c1593': 'int64', 'c1594': 'int64', 'c1595': 'int64', 'c1596': 'int64', 'c1597': 'int64', 'c1598': 'int64', 'c1599': 'int64', 'c1600': 'int64', 'c1601': 'int64', 'c1602': 'int64', 'c1603': 'int64', 'c1604': 'int64', 'c1605': 'int64', 'c1606': 'int64', 'c1607': 'int64', 'c1608': 'int64', 'c1609': 'int64', 'c1610': 'int64', 'c1611': 'int64', 'c1612': 'int64', 'c1613': 'int64', 'c1614': 'int64', 'c1615': 'int64', 'c1616': 'int64', 'c1617': 'int64', 'c1618': 'int64', 'c1619': 'int64', 'c1620': 'int64', 'c1621': 'int64', 'c1622': 'int64', 'c1623': 'int64', 'c1624': 'int64', 'c1625': 'int64', 'c1626': 'int64', 'c1627': 'int64', 'c1628': 'int64', 'c1629': 'int64', 'c1630': 'int64', 'c1631': 'int64', 'c1632': 'int64', 'c1633': 'int64', 'c1634': 'int64', 'c1635': 'int64', 'c1636': 'int64', 'c1637': 'int64', 'c1638': 'int64', 'c1639': 'int64', 'c1640': 'int64', 'c1641': 'int64', 'c1642': 'int64', 'c1643': 'int64', 'c1644': 'int64', 'c1645': 'int64', 'c1646': 'int64', 'c1647': 'int64', 'c1648': 'int64', 'c1649': 'int64', 'c1650': 'int64', 'c1651': 'int64', 'c1652': 'int64', 'c1653': 'int64', 'c1654': 'int64', 'c1655': 'int64', 'c1656': 'int64', 'c1657': 'int64', 'c1658': 'int64', 'c1659': 'int64', 'c1660': 'int64', 'c1661': 'int64', 'c1662': 'int64', 'c1663': 'int64', 'c1664': 'int64', 'c1665': 'int64', 'c1666': 'int64', 'c1667': 'int64', 'c1668': 'int64', 'c1669': 'int64', 'c1670': 'int64', 'c1671': 'int64', 'c1672': 'int64', 'c1673': 'int64', 'c1674': 'int64', 'c1675': 'int64', 'c1676': 'int64', 'c1677': 'int64', 'c1678': 'int64', 'c1679': 'int64', 'c1680': 'int64', 'c1681': 'int64', 'c1682': 'int64', 'c1683': 'int64', 'c1684': 'int64', 'c1685': 'int64', 'c1686': 'int64', 'c1687': 'int64', 'c1688': 'int64', 'c1689': 'int64', 'c1690': 'int64', 'c1691': 'int64', 'c1692': 'int64', 'c1693': 'int64', 'c1694': 'int64', 'c1695': 'int64', 'c1696': 'int64', 'c1697': 'int64', 'c1698': 'int64', 'c1699': 'int64', 'c1700': 'int64', 'c1701': 'int64', 'c1702': 'int64', 'c1703': 'int64', 'c1704': 'int64', 'c1705': 'int64', 'c1706': 'int64', 'c1707': 'int64', 'c1708': 'int64', 'c1709': 'int64', 'c1710': 'int64', 'c1711': 'int64', 'c1712': 'int64', 'c1713': 'int64', 'c1714': 'int64', 'c1715': 'int64', 'c1716': 'int64', 'c1717': 'int64', 'c1718': 'int64', 'c1719': 'int64', 'c1720': 'int64', 'c1721': 'int64', 'c1722': 'int64', 'c1723': 'int64', 'c1724': 'int64', 'c1725': 'int64', 'c1726': 'int64', 'c1727': 'int64', 'c1728': 'int64', 'c1729': 'int64', 'c1730': 'int64', 'c1731': 'int64', 'c1732': 'int64', 'c1733': 'int64', 'c1734': 'int64', 'c1735': 'int64', 'c1736': 'int64', 'c1737': 'int64', 'c1738': 'int64', 'c1739': 'int64', 'c1740': 'int64', 'c1741': 'int64', 'c1742': 'int64', 'c1743': 'int64', 'c1744': 'int64', 'c1745': 'int64', 'c1746': 'int64', 'c1747': 'int64', 'c1748': 'int64', 'c1749': 'int64', 'c1750': 'int64', 'c1751': 'int64', 'c1752': 'int64', 'c1753': 'int64', 'c1754': 'int64', 'c1755': 'int64', 'c1756': 'int64', 'c1757': 'int64', 'c1758': 'int64', 'c1759': 'int64', 'c1760': 'int64', 'c1761': 'int64', 'c1762': 'int64', 'c1763': 'int64', 'c1764': 'int64', 'c1765': 'int64', 'c1766': 'int64', 'c1767': 'int64', 'c1768': 'int64', 'c1769': 'int64', 'c1770': 'int64', 'c1771': 'int64', 'c1772': 'int64', 'c1773': 'int64', 'c1774': 'int64', 'c1775': 'int64', 'c1776': 'int64', 'c1777': 'int64', 'c1778': 'int64', 'c1779': 'int64', 'c1780': 'int64', 'c1781': 'int64', 'c1782': 'int64', 'c1783': 'int64', 'c1784': 'int64', 'c1785': 'int64', 'c1786': 'int64', 'c1787': 'int64', 'c1788': 'int64', 'c1789': 'int64', 'c1790': 'int64', 'c1791': 'int64', 'c1792': 'int64', 'c1793': 'int64', 'c1794': 'int64', 'c1795': 'int64', 'c1796': 'int64', 'c1797': 'int64', 'c1798': 'int64', 'c1799': 'int64', 'c1800': 'int64', 'c1801': 'int64', 'c1802': 'int64', 'c1803': 'int64', 'c1804': 'int64', 'c1805': 'int64', 'c1806': 'int64', 'c1807': 'int64', 'c1808': 'int64', 'c1809': 'int64', 'c1810': 'int64', 'c1811': 'int64', 'c1812': 'int64', 'c1813': 'int64', 'c1814': 'int64', 'c1815': 'int64', 'c1816': 'int64', 'c1817': 'int64', 'c1818': 'int64', 'c1819': 'int64', 'c1820': 'int64', 'c1821': 'int64', 'c1822': 'int64', 'c1823': 'int64', 'c1824': 'int64', 'c1825': 'int64', 'c1826': 'int64', 'c1827': 'int64', 'c1828': 'int64', 'c1829': 'int64', 'c1830': 'int64', 'c1831': 'int64', 'c1832': 'int64', 'c1833': 'int64', 'c1834': 'int64', 'c1835': 'int64', 'c1836': 'int64', 'c1837': 'int64', 'c1838': 'int64', 'c1839': 'int64', 'c1840': 'int64', 'c1841': 'int64', 'c1842': 'int64', 'c1843': 'int64', 'c1844': 'int64', 'c1845': 'int64', 'c1846': 'int64', 'c1847': 'int64', 'c1848': 'int64', 'c1849': 'int64', 'c1850': 'int64', 'c1851': 'int64', 'c1852': 'int64', 'c1853': 'int64', 'c1854': 'int64', 'c1855': 'int64', 'c1856': 'int64', 'c1857': 'int64', 'c1858': 'int64', 'c1859': 'int64', 'c1860': 'int64', 'c1861': 'int64', 'c1862': 'int64', 'c1863': 'int64', 'c1864': 'int64', 'c1865': 'int64', 'c1866': 'int64', 'c1867': 'int64', 'c1868': 'int64', 'c1869': 'int64', 'c1870': 'int64', 'c1871': 'int64', 'c1872': 'int64', 'c1873': 'int64', 'c1874': 'int64', 'c1875': 'int64', 'c1876': 'int64', 'c1877': 'int64', 'c1878': 'int64', 'c1879': 'int64', 'c1880': 'int64', 'c1881': 'int64', 'c1882': 'int64', 'c1883': 'int64', 'c1884': 'int64', 'c1885': 'int64', 'c1886': 'int64', 'c1887': 'int64', 'c1888': 'int64', 'c1889': 'int64', 'c1890': 'int64', 'c1891': 'int64', 'c1892': 'int64', 'c1893': 'int64', 'c1894': 'int64', 'c1895': 'int64', 'c1896': 'int64', 'c1897': 'int64', 'c1898': 'int64', 'c1899': 'int64', 'c1900': 'int64', 'c1901': 'int64', 'c1902': 'int64', 'c1903': 'int64', 'c1904': 'int64', 'c1905': 'int64', 'c1906': 'int64', 'c1907': 'int64', 'c1908': 'int64', 'c1909': 'int64', 'c1910': 'int64', 'c1911': 'int64', 'c1912': 'int64', 'c1913': 'int64', 'c1914': 'int64', 'c1915': 'int64', 'c1916': 'int64', 'c1917': 'int64', 'c1918': 'int64', 'c1919': 'int64', 'c1920': 'int64', 'c1921': 'int64', 'c1922': 'int64', 'c1923': 'int64', 'c1924': 'int64', 'c1925': 'int64', 'c1926': 'int64', 'c1927': 'int64', 'c1928': 'int64', 'c1929': 'int64', 'c1930': 'int64', 'c1931': 'int64', 'c1932': 'int64', 'c1933': 'int64', 'c1934': 'int64', 'c1935': 'int64', 'c1936': 'int64', 'c1937': 'int64', 'c1938': 'int64', 'c1939': 'int64', 'c1940': 'int64', 'c1941': 'int64', 'c1942': 'int64', 'c1943': 'int64', 'c1944': 'int64', 'c1945': 'int64', 'c1946': 'int64', 'c1947': 'int64', 'c1948': 'int64', 'c1949': 'int64', 'c1950': 'int64', 'c1951': 'int64', 'c1952': 'int64', 'c1953': 'int64', 'c1954': 'int64', 'c1955': 'int64', 'c1956': 'int64', 'c1957': 'int64', 'c1958': 'int64', 'c1959': 'int64', 'c1960': 'int64', 'c1961': 'int64', 'c1962': 'int64', 'c1963': 'int64', 'c1964': 'int64', 'c1965': 'int64', 'c1966': 'int64', 'c1967': 'int64', 'c1968': 'int64', 'c1969': 'int64', 'c1970': 'int64', 'c1971': 'int64', 'c1972': 'int64', 'c1973': 'int64', 'c1974': 'int64', 'c1975': 'int64', 'c1976': 'int64', 'c1977': 'int64', 'c1978': 'int64', 'c1979': 'int64', 'c1980': 'int64', 'c1981': 'int64', 'c1982': 'int64', 'c1983': 'int64', 'c1984': 'int64', 'c1985': 'int64', 'c1986': 'int64', 'c1987': 'int64', 'c1988': 'int64', 'c1989': 'int64', 'c1990': 'int64', 'c1991': 'int64', 'c1992': 'int64', 'c1993': 'int64', 'c1994': 'int64', 'c1995': 'int64', 'c1996': 'int64', 'c1997': 'int64', 'c1998': 'int64', 'c1999': 'int64', 'c2000': 'int64'}
<dataframe_Summary>
{'c1': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c2': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c3': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c4': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c5': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c6': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c7': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c8': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c9': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c10': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c11': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c12': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c13': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c14': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c15': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c16': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c17': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c18': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c19': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c20': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c21': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c22': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c23': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c24': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c25': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c26': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c27': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c28': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c29': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c30': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c31': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c32': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c33': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c34': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c35': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c36': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c37': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c38': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c39': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c40': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c41': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c42': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c43': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c44': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c45': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c46': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c47': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c48': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c49': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392178, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c50': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c51': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c52': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c53': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c54': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c55': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c56': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c57': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c58': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c59': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c60': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c61': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c62': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c63': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c64': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c65': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c66': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c67': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c68': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c69': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c70': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c71': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c72': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c73': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c74': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c75': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c76': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c77': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c78': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c79': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c80': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c81': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c82': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c83': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c84': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c85': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c86': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c87': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c88': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c89': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c90': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c91': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c92': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c93': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c94': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c95': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c96': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c97': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c98': {'count': 10000.0, 'mean': 0.0073, 'std': 0.0851318667919032, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c99': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c100': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c101': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c102': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c103': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392178, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c104': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c105': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c106': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c107': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c108': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c109': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c110': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c111': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c112': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c113': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c114': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c115': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c116': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c117': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c118': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c119': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c120': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c121': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c122': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c123': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c124': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c125': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c126': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c127': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c128': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c129': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c130': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c131': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c132': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c133': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c134': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c135': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c136': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c137': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c138': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c139': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c140': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c141': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c142': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c143': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c144': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c145': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c146': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c147': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c148': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c149': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c150': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c151': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c152': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c153': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c154': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c155': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c156': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c157': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c158': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c159': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c160': {'count': 10000.0, 'mean': 0.0065, 'std': 0.08036414523644209, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c161': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c162': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c163': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c164': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c165': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c166': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c167': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c168': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c169': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c170': {'count': 10000.0, 'mean': 0.0086, 'std': 0.09234117548130372, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c171': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c172': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c173': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c174': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346915, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c175': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c176': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c177': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c178': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c179': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c180': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c181': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c182': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c183': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c184': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c185': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c186': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c187': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c188': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c189': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c190': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c191': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c192': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c193': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c194': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c195': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c196': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c197': {'count': 10000.0, 'mean': 0.0083, 'std': 0.09073000161644071, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c198': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c199': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c200': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c201': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c202': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c203': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c204': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c205': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c206': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970884, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c207': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c208': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c209': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c210': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c211': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c212': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c213': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c214': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c215': {'count': 10000.0, 'mean': 0.0073, 'std': 0.0851318667919032, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c216': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c217': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c218': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c219': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c220': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c221': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c222': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c223': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c224': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c225': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c226': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c227': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c228': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c229': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c230': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c231': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c232': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c233': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c234': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c235': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c236': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c237': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c238': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c239': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c240': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c241': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c242': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c243': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c244': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346915, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c245': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c246': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c247': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c248': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c249': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c250': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c251': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c252': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c253': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c254': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c255': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c256': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c257': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c258': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c259': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c260': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c261': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c262': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c263': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c264': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c265': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c266': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c267': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c268': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c269': {'count': 10000.0, 'mean': 0.0082, 'std': 0.09018632577800104, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c270': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c271': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c272': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c273': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c274': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c275': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c276': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c277': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c278': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c279': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c280': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c281': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209603, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c282': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c283': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c284': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c285': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c286': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c287': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c288': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c289': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c290': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c291': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c292': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086433, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c293': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c294': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c295': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c296': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c297': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c298': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c299': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c300': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c301': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c302': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c303': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c304': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c305': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c306': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c307': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c308': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c309': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c310': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c311': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c312': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c313': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c314': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c315': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c316': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c317': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c318': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c319': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c320': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c321': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c322': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c323': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c324': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c325': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c326': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c327': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c328': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c329': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c330': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c331': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c332': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c333': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c334': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c335': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c336': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c337': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c338': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c339': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c340': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c341': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c342': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c343': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c344': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c345': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c346': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c347': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c348': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c349': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c350': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c351': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c352': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c353': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c354': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c355': {'count': 10000.0, 'mean': 0.0084, 'std': 0.09127032939188251, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c356': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c357': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c358': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c359': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c360': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c361': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c362': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c363': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c364': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c365': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c366': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c367': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c368': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c369': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c370': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c371': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c372': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c373': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c374': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c375': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c376': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c377': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c378': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c379': {'count': 10000.0, 'mean': 0.0061, 'std': 0.07786781324548651, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c380': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c381': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c382': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c383': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c384': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c385': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c386': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c387': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c388': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c389': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c390': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c391': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c392': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c393': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c394': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c395': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c396': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c397': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c398': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c399': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c400': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c401': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c402': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c403': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c404': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c405': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c406': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c407': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c408': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c409': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c410': {'count': 10000.0, 'mean': 0.0043, 'std': 0.06543652033703642, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c411': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c412': {'count': 10000.0, 'mean': 0.0061, 'std': 0.07786781324548651, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c413': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c414': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c415': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c416': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c417': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c418': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c419': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c420': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c421': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c422': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c423': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c424': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c425': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c426': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c427': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c428': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c429': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c430': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c431': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c432': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c433': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c434': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c435': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c436': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c437': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c438': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c439': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c440': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c441': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c442': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c443': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c444': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c445': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c446': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c447': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c448': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c449': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c450': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c451': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c452': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c453': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c454': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c455': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c456': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c457': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c458': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c459': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c460': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c461': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c462': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c463': {'count': 10000.0, 'mean': 0.0056, 'std': 0.074627052197524, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c464': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c465': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c466': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c467': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c468': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c469': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c470': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c471': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c472': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c473': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c474': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c475': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c476': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c477': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c478': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c479': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c480': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c481': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c482': {'count': 10000.0, 'mean': 0.0089, 'std': 0.09392375720347179, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c483': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c484': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c485': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c486': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c487': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c488': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c489': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c490': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c491': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c492': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c493': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c494': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c495': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c496': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c497': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c498': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c499': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c500': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c501': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c502': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c503': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c504': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c505': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c506': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c507': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c508': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c509': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c510': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c511': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c512': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c513': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c514': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c515': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c516': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c517': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c518': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c519': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c520': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c521': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c522': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c523': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c524': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c525': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c526': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c527': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c528': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c529': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c530': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c531': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c532': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c533': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c534': {'count': 10000.0, 'mean': 0.004, 'std': 0.06312209153572135, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c535': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c536': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c537': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c538': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c539': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c540': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c541': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c542': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c543': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c544': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c545': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c546': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c547': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c548': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c549': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c550': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c551': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c552': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c553': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c554': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c555': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c556': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c557': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c558': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c559': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c560': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c561': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c562': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c563': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c564': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c565': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c566': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c567': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c568': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c569': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c570': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c571': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c572': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c573': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c574': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c575': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c576': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c577': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c578': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c579': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c580': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c581': {'count': 10000.0, 'mean': 0.004, 'std': 0.06312209153572135, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c582': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c583': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c584': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c585': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c586': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c587': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c588': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c589': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c590': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c591': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c592': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c593': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c594': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c595': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c596': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c597': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c598': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c599': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c600': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c601': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c602': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c603': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c604': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c605': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c606': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c607': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c608': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c609': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c610': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c611': {'count': 10000.0, 'mean': 0.0082, 'std': 0.09018632577800105, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c612': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c613': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c614': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c615': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c616': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c617': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c618': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c619': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c620': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c621': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c622': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c623': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c624': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c625': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c626': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c627': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c628': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c629': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c630': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209603, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c631': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c632': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c633': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c634': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c635': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c636': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c637': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c638': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c639': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c640': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c641': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c642': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c643': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c644': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c645': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c646': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c647': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c648': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c649': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c650': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c651': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c652': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c653': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c654': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c655': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c656': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c657': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c658': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c659': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c660': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c661': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c662': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c663': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c664': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c665': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c666': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c667': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c668': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c669': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c670': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c671': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c672': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c673': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c674': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c675': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c676': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c677': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c678': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c679': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c680': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c681': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c682': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c683': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c684': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c685': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c686': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c687': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c688': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c689': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c690': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c691': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c692': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c693': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c694': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c695': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c696': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c697': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c698': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c699': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c700': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c701': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c702': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c703': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c704': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c705': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c706': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c707': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c708': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c709': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c710': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c711': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c712': {'count': 10000.0, 'mean': 0.0044, 'std': 0.06618971300595572, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c713': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c714': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209603, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c715': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c716': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c717': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c718': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c719': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c720': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c721': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c722': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c723': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c724': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c725': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c726': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c727': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c728': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c729': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c730': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c731': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c732': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c733': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c734': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c735': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c736': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c737': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c738': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c739': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c740': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c741': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c742': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c743': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c744': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c745': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c746': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c747': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c748': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c749': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c750': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c751': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c752': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c753': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c754': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c755': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c756': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c757': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c758': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c759': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c760': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c761': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c762': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c763': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c764': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c765': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c766': {'count': 10000.0, 'mean': 0.0044, 'std': 0.0661897130059557, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c767': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c768': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c769': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c770': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c771': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190317, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c772': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c773': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c774': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c775': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c776': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c777': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c778': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c779': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c780': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c781': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c782': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c783': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c784': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c785': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c786': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c787': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c788': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c789': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c790': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c791': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c792': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c793': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c794': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c795': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c796': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c797': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c798': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c799': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c800': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c801': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c802': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c803': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c804': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c805': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c806': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c807': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c808': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c809': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c810': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c811': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c812': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c813': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c814': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c815': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c816': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c817': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c818': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c819': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c820': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c821': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c822': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c823': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c824': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c825': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c826': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c827': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c828': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c829': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c830': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c831': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c832': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c833': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c834': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c835': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c836': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c837': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c838': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c839': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c840': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c841': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c842': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c843': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c844': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c845': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c846': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649111, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c847': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c848': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c849': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c850': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c851': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c852': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c853': {'count': 10000.0, 'mean': 0.0044, 'std': 0.0661897130059557, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c854': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c855': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c856': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c857': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c858': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c859': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c860': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c861': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c862': {'count': 10000.0, 'mean': 0.0043, 'std': 0.06543652033703642, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c863': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c864': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c865': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c866': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c867': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c868': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c869': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c870': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c871': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c872': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c873': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c874': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c875': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c876': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c877': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c878': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c879': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c880': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c881': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c882': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346915, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c883': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c884': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c885': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c886': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c887': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c888': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c889': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c890': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c891': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c892': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c893': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c894': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c895': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c896': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c897': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c898': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c899': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c900': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c901': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c902': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c903': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c904': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c905': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c906': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c907': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c908': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c909': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c910': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c911': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c912': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c913': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c914': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c915': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c916': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c917': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c918': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c919': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c920': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c921': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c922': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c923': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c924': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c925': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c926': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c927': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c928': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c929': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c930': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c931': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c932': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c933': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c934': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970886, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c935': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c936': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c937': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c938': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c939': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c940': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c941': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c942': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c943': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c944': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c945': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c946': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c947': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c948': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c949': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c950': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c951': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c952': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c953': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c954': {'count': 10000.0, 'mean': 0.0044, 'std': 0.0661897130059557, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c955': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c956': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c957': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c958': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c959': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c960': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c961': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c962': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c963': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c964': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c965': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c966': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c967': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c968': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c969': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c970': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c971': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c972': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c973': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c974': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c975': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c976': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c977': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c978': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c979': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c980': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c981': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c982': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c983': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c984': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c985': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c986': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c987': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c988': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c989': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c990': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c991': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c992': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c993': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c994': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c995': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c996': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c997': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c998': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c999': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1000': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1001': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1002': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1003': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1004': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1005': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1006': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1007': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1008': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1009': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1010': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1011': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1012': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1013': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1014': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1015': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1016': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1017': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1018': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1019': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1020': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1021': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1022': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1023': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1024': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1025': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1026': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1027': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1028': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1029': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1030': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1031': {'count': 10000.0, 'mean': 0.0073, 'std': 0.0851318667919032, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1032': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1033': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1034': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1035': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1036': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1037': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1038': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1039': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1040': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1041': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1042': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1043': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1044': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1045': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1046': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1047': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1048': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1049': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1050': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1051': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1052': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1053': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1054': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1055': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1056': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1057': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1058': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1059': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1060': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1061': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1062': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1063': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1064': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1065': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1066': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1067': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1068': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1069': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1070': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1071': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1072': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1073': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1074': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1075': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1076': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1077': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1078': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1079': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1080': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1081': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1082': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1083': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1084': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1085': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1086': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1087': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1088': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1089': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1090': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1091': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1092': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1093': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1094': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1095': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1096': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1097': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1098': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1099': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1100': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1101': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1102': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1103': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1104': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1105': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1106': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1107': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1108': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1109': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1110': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1111': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1112': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1113': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1114': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1115': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1116': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1117': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1118': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1119': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1120': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1121': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1122': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1123': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1124': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1125': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1126': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1127': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1128': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1129': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1130': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1131': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1132': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1133': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1134': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1135': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1136': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1137': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1138': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1139': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1140': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1141': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1142': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1143': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1144': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1145': {'count': 10000.0, 'mean': 0.0056, 'std': 0.074627052197524, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1146': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1147': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1148': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1149': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1150': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1151': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1152': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1153': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1154': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1155': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1156': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1157': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1158': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1159': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1160': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1161': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1162': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1163': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1164': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1165': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1166': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1167': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1168': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1169': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1170': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1171': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1172': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1173': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1174': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1175': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1176': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1177': {'count': 10000.0, 'mean': 0.0043, 'std': 0.06543652033703642, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1178': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1179': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1180': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1181': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1182': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1183': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1184': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1185': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392178, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1186': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970884, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1187': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1188': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1189': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1190': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1191': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1192': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1193': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1194': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1195': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1196': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1197': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1198': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1199': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1200': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1201': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1202': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1203': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1204': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1205': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1206': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1207': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1208': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1209': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1210': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1211': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1212': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1213': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1214': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970884, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1215': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1216': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1217': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1218': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1219': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1220': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1221': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1222': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1223': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1224': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1225': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1226': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1227': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1228': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1229': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1230': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1231': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1232': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1233': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1234': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1235': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1236': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1237': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1238': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1239': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1240': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1241': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1242': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1243': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1244': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1245': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1246': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1247': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1248': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1249': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1250': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1251': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1252': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1253': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1254': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1255': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1256': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1257': {'count': 10000.0, 'mean': 0.0049, 'std': 0.06983178107255159, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1258': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1259': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1260': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1261': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1262': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1263': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1264': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1265': {'count': 10000.0, 'mean': 0.0049, 'std': 0.06983178107255159, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1266': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1267': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1268': {'count': 10000.0, 'mean': 0.0044, 'std': 0.0661897130059557, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1269': {'count': 10000.0, 'mean': 0.0079, 'std': 0.08853459119114819, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1270': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1271': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1272': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1273': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1274': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1275': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1276': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1277': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1278': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1279': {'count': 10000.0, 'mean': 0.0049, 'std': 0.06983178107255159, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1280': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1281': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1282': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1283': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1284': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1285': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1286': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1287': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1288': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1289': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1290': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1291': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1292': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1293': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1294': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1295': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649111, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1296': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1297': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1298': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1299': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1300': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1301': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1302': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1303': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1304': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1305': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1306': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1307': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1308': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1309': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1310': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1311': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1312': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1313': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1314': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1315': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1316': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1317': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1318': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1319': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1320': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1321': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1322': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1323': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1324': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1325': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1326': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1327': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1328': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1329': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1330': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1331': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1332': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1333': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1334': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1335': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1336': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1337': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1338': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1339': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1340': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1341': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1342': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1343': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1344': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1345': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1346': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1347': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1348': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1349': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1350': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1351': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1352': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1353': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1354': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1355': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1356': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1357': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1358': {'count': 10000.0, 'mean': 0.0079, 'std': 0.0885345911911482, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1359': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1360': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1361': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1362': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1363': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1364': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1365': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1366': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1367': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1368': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1369': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1370': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1371': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1372': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1373': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1374': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1375': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1376': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1377': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1378': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1379': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1380': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1381': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1382': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1383': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1384': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1385': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1386': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1387': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1388': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1389': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1390': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1391': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1392': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1393': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1394': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1395': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1396': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1397': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1398': {'count': 10000.0, 'mean': 0.0072, 'std': 0.0845510194349468, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1399': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1400': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1401': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1402': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1403': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1404': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1405': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1406': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1407': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1408': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1409': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1410': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1411': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1412': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1413': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1414': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1415': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1416': {'count': 10000.0, 'mean': 0.0049, 'std': 0.06983178107255159, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1417': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1418': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1419': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1420': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1421': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1422': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1423': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1424': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1425': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1426': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970886, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1427': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1428': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1429': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1430': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1431': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1432': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1433': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1434': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1435': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1436': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1437': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1438': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1439': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1440': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963212, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1441': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1442': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1443': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1444': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1445': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1446': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1447': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1448': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1449': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1450': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1451': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1452': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1453': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1454': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1455': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1456': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1457': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1458': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1459': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1460': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1461': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1462': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1463': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1464': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1465': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1466': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1467': {'count': 10000.0, 'mean': 0.0047, 'std': 0.06839866839189034, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1468': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1469': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1470': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1471': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1472': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1473': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1474': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1475': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1476': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1477': {'count': 10000.0, 'mean': 0.0049, 'std': 0.06983178107255159, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1478': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1479': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1480': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1481': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1482': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1483': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1484': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1485': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1486': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1487': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1488': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1489': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1490': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1491': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1492': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1493': {'count': 10000.0, 'mean': 0.005, 'std': 0.07053720684684771, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1494': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1495': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1496': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1497': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1498': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1499': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1500': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1501': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1502': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1503': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1504': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1505': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1506': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1507': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1508': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1509': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1510': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1511': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1512': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1513': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649114, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1514': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1515': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1516': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1517': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1518': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1519': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1520': {'count': 10000.0, 'mean': 0.0083, 'std': 0.09073000161644071, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1521': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1522': {'count': 10000.0, 'mean': 0.0079, 'std': 0.0885345911911482, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1523': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1524': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1525': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1526': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1527': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1528': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1529': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346915, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1530': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1531': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1532': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1533': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1534': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1535': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1536': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1537': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1538': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1539': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1540': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1541': {'count': 10000.0, 'mean': 0.0059, 'std': 0.07658835797729162, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1542': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1543': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1544': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1545': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1546': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1547': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1548': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1549': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1550': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1551': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1552': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1553': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1554': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1555': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1556': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1557': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1558': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1559': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1560': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1561': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1562': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1563': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1564': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1565': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1566': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1567': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1568': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1569': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1570': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1571': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1572': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1573': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1574': {'count': 10000.0, 'mean': 0.0081, 'std': 0.08963924095702695, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1575': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970884, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1576': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1577': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1578': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1579': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1580': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1581': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1582': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1583': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1584': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1585': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1586': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1587': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1588': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1589': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1590': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1591': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1592': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1593': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1594': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1595': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1596': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1597': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1598': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1599': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1600': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1601': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1602': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1603': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1604': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1605': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1606': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1607': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1608': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1609': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1610': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1611': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1612': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1613': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1614': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1615': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1616': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1617': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1618': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1619': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1620': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1621': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1622': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1623': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1624': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1625': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1626': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1627': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1628': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1629': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1630': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1631': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1632': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1633': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1634': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1635': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1636': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1637': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1638': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1639': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1640': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1641': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1642': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1643': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1644': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1645': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1646': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1647': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1648': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1649': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1650': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1651': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1652': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1653': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1654': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1655': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1656': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1657': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1658': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1659': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1660': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1661': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1662': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1663': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1664': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1665': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1666': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1667': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1668': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1669': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1670': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1671': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1672': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1673': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1674': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1675': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1676': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1677': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1678': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1679': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1680': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1681': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1682': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1683': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900171, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1684': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1685': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1686': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1687': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1688': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1689': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1690': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1691': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1692': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1693': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1694': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1695': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1696': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1697': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1698': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1699': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1700': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1701': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1702': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1703': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1704': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1705': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1706': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1707': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1708': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1709': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1710': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1711': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1712': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1713': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1714': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1715': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1716': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1717': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1718': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1719': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1720': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1721': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1722': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1723': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1724': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1725': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1726': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1727': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1728': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1729': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1730': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1731': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1732': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1733': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1734': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1735': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1736': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1737': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1738': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1739': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1740': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1741': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1742': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1743': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1744': {'count': 10000.0, 'mean': 0.0078, 'std': 0.08797689465649113, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1745': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1746': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1747': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1748': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1749': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1750': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1751': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1752': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1753': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1754': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1755': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1756': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1757': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1758': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1759': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1760': {'count': 10000.0, 'mean': 0.0055, 'std': 0.07396145637900169, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1761': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1762': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1763': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1764': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1765': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1766': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1767': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1768': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1769': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1770': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1771': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1772': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1773': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1774': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1775': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547654, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1776': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1777': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1778': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1779': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1780': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1781': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1782': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1783': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1784': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1785': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1786': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1787': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1788': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1789': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1790': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1791': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573503, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1792': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1793': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1794': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1795': {'count': 10000.0, 'mean': 0.0044, 'std': 0.0661897130059557, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1796': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1797': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1798': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1799': {'count': 10000.0, 'mean': 0.0083, 'std': 0.09073000161644071, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1800': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1801': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963213, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1802': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1803': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1804': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1805': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1806': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1807': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1808': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1809': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1810': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1811': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1812': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1813': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1814': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1815': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1816': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1817': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1818': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1819': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1820': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1821': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190317, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1822': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1823': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1824': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1825': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1826': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1827': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1828': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1829': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1830': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1831': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1832': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1833': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1834': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1835': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1836': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1837': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1838': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1839': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1840': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1841': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1842': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1843': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190318, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1844': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1845': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1846': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1847': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1848': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1849': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1850': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1851': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1852': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1853': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1854': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1855': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713951, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1856': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1857': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1858': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1859': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1860': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1861': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1862': {'count': 10000.0, 'mean': 0.008, 'std': 0.08908868435086431, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1863': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1864': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1865': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1866': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1867': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1868': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1869': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1870': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1871': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1872': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1873': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1874': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1875': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1876': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1877': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1878': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1879': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1880': {'count': 10000.0, 'mean': 0.0083, 'std': 0.0907300016164407, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1881': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1882': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1883': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1884': {'count': 10000.0, 'mean': 0.0073, 'std': 0.08513186679190317, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1885': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1886': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1887': {'count': 10000.0, 'mean': 0.0069, 'std': 0.08278330331371629, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1888': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494678, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1889': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1890': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1891': {'count': 10000.0, 'mean': 0.0075, 'std': 0.08628148381573501, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1892': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1893': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1894': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1895': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1896': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1897': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1898': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1899': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1900': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1901': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1902': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1903': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1904': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1905': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1906': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1907': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1908': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1909': {'count': 10000.0, 'mean': 0.0053, 'std': 0.07261155034651424, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1910': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1911': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1912': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1913': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1914': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346918, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1915': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1916': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1917': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1918': {'count': 10000.0, 'mean': 0.0067, 'std': 0.0815829368039528, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1919': {'count': 10000.0, 'mean': 0.0067, 'std': 0.08158293680395279, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1920': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1921': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1922': {'count': 10000.0, 'mean': 0.0054, 'std': 0.07328967961257418, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1923': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1924': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1925': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1926': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1927': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1928': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1929': {'count': 10000.0, 'mean': 0.005, 'std': 0.0705372068468477, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1930': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1931': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1932': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1933': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1934': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1935': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531585, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1936': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1937': {'count': 10000.0, 'mean': 0.0049, 'std': 0.0698317810725516, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1938': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1939': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1940': {'count': 10000.0, 'mean': 0.0074, 'std': 0.08570866115778351, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1941': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1942': {'count': 10000.0, 'mean': 0.0077, 'std': 0.08741552578011953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1943': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1944': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1945': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1946': {'count': 10000.0, 'mean': 0.0072, 'std': 0.08455101943494679, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1947': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1948': {'count': 10000.0, 'mean': 0.006, 'std': 0.07723079994177172, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1949': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1950': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1951': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1952': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1953': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1954': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1955': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1956': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1957': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547653, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1958': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1959': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1960': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1961': {'count': 10000.0, 'mean': 0.0065, 'std': 0.0803641452364421, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1962': {'count': 10000.0, 'mean': 0.0062, 'std': 0.07849953004713953, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1963': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1964': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1965': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1966': {'count': 10000.0, 'mean': 0.0064, 'std': 0.0797475765625311, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1967': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1968': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1969': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626908, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1970': {'count': 10000.0, 'mean': 0.007, 'std': 0.08337682633392177, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1971': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563316, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1972': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1973': {'count': 10000.0, 'mean': 0.0041, 'std': 0.06390303873710532, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1974': {'count': 10000.0, 'mean': 0.0048, 'std': 0.06911901144963213, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1975': {'count': 10000.0, 'mean': 0.0068, 'std': 0.08218537244269417, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1976': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1977': {'count': 10000.0, 'mean': 0.0059, 'std': 0.0765883579772916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1978': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1979': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1980': {'count': 10000.0, 'mean': 0.0076, 'std': 0.08685041335209605, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1981': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1982': {'count': 10000.0, 'mean': 0.0066, 'std': 0.08097589585531584, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1983': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232849, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1984': {'count': 10000.0, 'mean': 0.0057, 'std': 0.07528663100232848, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1985': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1986': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1987': {'count': 10000.0, 'mean': 0.0052, 'std': 0.07192688890626907, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1988': {'count': 10000.0, 'mean': 0.0046, 'std': 0.06767051004531426, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1989': {'count': 10000.0, 'mean': 0.0071, 'std': 0.08396603497547656, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1990': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1991': {'count': 10000.0, 'mean': 0.0045, 'std': 0.06693428134970886, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1992': {'count': 10000.0, 'mean': 0.0069, 'std': 0.0827833033137163, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1993': {'count': 10000.0, 'mean': 0.0064, 'std': 0.07974757656253109, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1994': {'count': 10000.0, 'mean': 0.0061, 'std': 0.0778678132454865, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1995': {'count': 10000.0, 'mean': 0.0063, 'std': 0.07912607720346916, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1996': {'count': 10000.0, 'mean': 0.006, 'std': 0.0772307999417717, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1997': {'count': 10000.0, 'mean': 0.0056, 'std': 0.07462705219752398, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1998': {'count': 10000.0, 'mean': 0.0051, 'std': 0.07123550694523746, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c1999': {'count': 10000.0, 'mean': 0.0055, 'std': 0.0739614563790017, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}, 'c2000': {'count': 10000.0, 'mean': 0.0058, 'std': 0.07594034957563314, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 10000 entries, 0 to 9999
Columns: 2001 entries, Unnamed: 0 to c2000
dtypes: int64(2000), object(1)
memory usage: 152.7+ MB
<some_examples>
{'Unnamed: 0': {'0': 'r1', '1': 'r2', '2': 'r3', '3': 'r4'}, 'c1': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c2': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c3': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c4': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c5': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c6': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c7': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c8': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c9': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c10': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c11': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c12': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c13': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c14': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c15': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c16': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c17': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c18': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c19': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c20': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c21': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c22': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c23': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c24': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c25': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c26': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c27': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c28': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c29': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c30': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c31': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c32': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c33': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c34': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c35': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c36': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c37': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c38': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c39': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c40': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c41': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c42': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c43': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c44': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c45': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c46': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c47': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c48': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c49': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c50': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c51': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c52': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c53': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c54': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c55': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c56': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c57': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c58': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c59': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c60': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c61': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c62': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c63': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c64': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c65': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c66': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c67': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c68': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c69': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c70': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c71': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c72': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c73': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c74': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c75': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c76': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c77': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c78': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c79': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c80': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c81': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c82': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c83': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c84': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c85': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c86': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c87': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c88': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c89': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c90': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c91': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c92': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c93': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c94': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c95': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c96': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c97': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c98': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c99': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c100': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c101': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c102': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c103': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c104': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c105': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c106': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c107': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c108': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c109': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c110': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c111': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c112': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c113': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c114': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c115': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c116': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c117': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c118': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c119': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c120': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c121': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c122': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c123': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c124': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c125': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c126': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c127': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c128': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c129': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c130': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c131': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c132': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c133': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c134': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c135': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c136': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c137': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c138': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c139': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c140': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c141': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c142': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c143': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c144': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c145': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c146': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c147': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c148': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c149': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c150': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c151': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c152': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c153': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c154': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c155': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c156': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c157': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c158': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c159': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c160': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c161': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c162': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c163': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c164': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c165': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c166': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c167': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c168': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c169': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c170': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c171': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c172': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c173': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c174': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c175': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c176': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c177': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c178': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c179': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c180': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c181': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c182': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c183': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c184': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c185': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c186': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c187': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c188': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c189': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c190': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c191': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c192': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c193': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c194': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c195': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c196': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c197': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c198': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c199': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c200': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c201': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c202': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c203': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c204': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c205': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c206': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c207': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c208': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c209': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c210': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c211': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c212': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c213': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c214': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c215': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c216': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c217': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c218': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c219': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c220': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c221': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c222': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c223': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c224': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c225': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c226': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c227': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c228': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c229': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c230': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c231': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c232': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c233': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c234': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c235': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c236': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c237': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c238': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c239': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c240': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c241': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c242': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c243': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c244': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c245': {'0': 1, '1': 1, '2': 0, '3': 0}, 'c246': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c247': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c248': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c249': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c250': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c251': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c252': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c253': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c254': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c255': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c256': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c257': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c258': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c259': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c260': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c261': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c262': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c263': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c264': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c265': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c266': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c267': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c268': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c269': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c270': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c271': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c272': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c273': {'0': 1, '1': 0, '2': 0, '3': 0}, 'c274': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c275': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c276': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c277': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c278': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c279': {'0': 0, '1': 0, '2': 0, '3': 1}, 'c280': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c281': {'0': 0, '1': 0, '2': 1, '3': 0}, 'c282': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c283': {'0': 0, '1': 1, '2': 0, '3': 0}, 'c284': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c285': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c286': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c287': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c288': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c289': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c290': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c291': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c292': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c293': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c294': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c295': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c296': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c297': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c298': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c299': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c300': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c301': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c302': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c303': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c304': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c305': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c306': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c307': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c308': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c309': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c310': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c311': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c312': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c313': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c314': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c315': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c316': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c317': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c318': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c319': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c320': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c321': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c322': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c323': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c324': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c325': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c326': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c327': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c328': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c329': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c330': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c331': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c332': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c333': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c334': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c335': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c336': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c337': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c338': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c339': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c340': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c341': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c342': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c343': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c344': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c345': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c346': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c347': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c348': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c349': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c350': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c351': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c352': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c353': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c354': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c355': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c356': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c357': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c358': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c359': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c360': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c361': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c362': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c363': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c364': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c365': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c366': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c367': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c368': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c369': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c370': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c371': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c372': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c373': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c374': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c375': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c376': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c377': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c378': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c379': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c380': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c381': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c382': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c383': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c384': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c385': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c386': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c387': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c388': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c389': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c390': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c391': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c392': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c393': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c394': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c395': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c396': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c397': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c398': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c399': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c400': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c401': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c402': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c403': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c404': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c405': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c406': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c407': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c408': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c409': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c410': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c411': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c412': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c413': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c414': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c415': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c416': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c417': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c418': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c419': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c420': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c421': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c422': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c423': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c424': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c425': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c426': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c427': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c428': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c429': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c430': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c431': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c432': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c433': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c434': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c435': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c436': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c437': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c438': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c439': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c440': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c441': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c442': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c443': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c444': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c445': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c446': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c447': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c448': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c449': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c450': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c451': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c452': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c453': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c454': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c455': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c456': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c457': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c458': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c459': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c460': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c461': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c462': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c463': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c464': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c465': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c466': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c467': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c468': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c469': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c470': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c471': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c472': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c473': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c474': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c475': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c476': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c477': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c478': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c479': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c480': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c481': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c482': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c483': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c484': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c485': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c486': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c487': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c488': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c489': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c490': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c491': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c492': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c493': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c494': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c495': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c496': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c497': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c498': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c499': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c500': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c501': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c502': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c503': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c504': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c505': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c506': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c507': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c508': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c509': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c510': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c511': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c512': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c513': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c514': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c515': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c516': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c517': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c518': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c519': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c520': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c521': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c522': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c523': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c524': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c525': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c526': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c527': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c528': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c529': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c530': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c531': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c532': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c533': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c534': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c535': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c536': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c537': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c538': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c539': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c540': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c541': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c542': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c543': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c544': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c545': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c546': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c547': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c548': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c549': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c550': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c551': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c552': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c553': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c554': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c555': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c556': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c557': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c558': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c559': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c560': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c561': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c562': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c563': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c564': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c565': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c566': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c567': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c568': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c569': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c570': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c571': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c572': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c573': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c574': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c575': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c576': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c577': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c578': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c579': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c580': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c581': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c582': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c583': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c584': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c585': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c586': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c587': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c588': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c589': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c590': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c591': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c592': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c593': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c594': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c595': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c596': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c597': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c598': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c599': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c600': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c601': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c602': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c603': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c604': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c605': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c606': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c607': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c608': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c609': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c610': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c611': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c612': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c613': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c614': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c615': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c616': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c617': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c618': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c619': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c620': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c621': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c622': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c623': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c624': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c625': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c626': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c627': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c628': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c629': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c630': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c631': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c632': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c633': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c634': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c635': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c636': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c637': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c638': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c639': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c640': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c641': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c642': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c643': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c644': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c645': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c646': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c647': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c648': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c649': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c650': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c651': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c652': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c653': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c654': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c655': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c656': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c657': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c658': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c659': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c660': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c661': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c662': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c663': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c664': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c665': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c666': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c667': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c668': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c669': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c670': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c671': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c672': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c673': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c674': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c675': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c676': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c677': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c678': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c679': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c680': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c681': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c682': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c683': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c684': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c685': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c686': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c687': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c688': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c689': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c690': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c691': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c692': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c693': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c694': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c695': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c696': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c697': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c698': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c699': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c700': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c701': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c702': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c703': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c704': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c705': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c706': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c707': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c708': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c709': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c710': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c711': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c712': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c713': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c714': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c715': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c716': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c717': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c718': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c719': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c720': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c721': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c722': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c723': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c724': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c725': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c726': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c727': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c728': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c729': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c730': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c731': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c732': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c733': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c734': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c735': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c736': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c737': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c738': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c739': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c740': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c741': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c742': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c743': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c744': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c745': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c746': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c747': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c748': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c749': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c750': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c751': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c752': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c753': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c754': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c755': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c756': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c757': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c758': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c759': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c760': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c761': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c762': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c763': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c764': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c765': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c766': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c767': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c768': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c769': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c770': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c771': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c772': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c773': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c774': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c775': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c776': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c777': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c778': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c779': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c780': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c781': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c782': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c783': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c784': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c785': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c786': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c787': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c788': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c789': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c790': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c791': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c792': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c793': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c794': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c795': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c796': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c797': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c798': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c799': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c800': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c801': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c802': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c803': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c804': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c805': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c806': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c807': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c808': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c809': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c810': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c811': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c812': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c813': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c814': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c815': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c816': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c817': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c818': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c819': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c820': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c821': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c822': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c823': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c824': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c825': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c826': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c827': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c828': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c829': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c830': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c831': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c832': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c833': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c834': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c835': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c836': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c837': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c838': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c839': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c840': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c841': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c842': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c843': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c844': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c845': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c846': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c847': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c848': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c849': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c850': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c851': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c852': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c853': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c854': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c855': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c856': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c857': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c858': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c859': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c860': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c861': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c862': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c863': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c864': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c865': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c866': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c867': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c868': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c869': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c870': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c871': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c872': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c873': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c874': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c875': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c876': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c877': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c878': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c879': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c880': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c881': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c882': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c883': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c884': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c885': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c886': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c887': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c888': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c889': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c890': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c891': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c892': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c893': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c894': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c895': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c896': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c897': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c898': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c899': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c900': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c901': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c902': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c903': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c904': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c905': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c906': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c907': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c908': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c909': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c910': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c911': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c912': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c913': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c914': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c915': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c916': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c917': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c918': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c919': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c920': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c921': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c922': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c923': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c924': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c925': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c926': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c927': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c928': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c929': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c930': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c931': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c932': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c933': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c934': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c935': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c936': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c937': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c938': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c939': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c940': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c941': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c942': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c943': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c944': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c945': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c946': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c947': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c948': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c949': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c950': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c951': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c952': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c953': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c954': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c955': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c956': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c957': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c958': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c959': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c960': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c961': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c962': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c963': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c964': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c965': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c966': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c967': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c968': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c969': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c970': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c971': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c972': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c973': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c974': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c975': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c976': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c977': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c978': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c979': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c980': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c981': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c982': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c983': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c984': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c985': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c986': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c987': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c988': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c989': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c990': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c991': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c992': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c993': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c994': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c995': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c996': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c997': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c998': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c999': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1000': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1001': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1002': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1003': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1004': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1005': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1006': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1007': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1008': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1009': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1010': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1011': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1012': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1013': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1014': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1015': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1016': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1017': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1018': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1019': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1020': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1021': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1022': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1023': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1024': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1025': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1026': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1027': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1028': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1029': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1030': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1031': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1032': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1033': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1034': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1035': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1036': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1037': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1038': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1039': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1040': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1041': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1042': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1043': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1044': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1045': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1046': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1047': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1048': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1049': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1050': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1051': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1052': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1053': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1054': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1055': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1056': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1057': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1058': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1059': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1060': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1061': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1062': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1063': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1064': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1065': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1066': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1067': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1068': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1069': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1070': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1071': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1072': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1073': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1074': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1075': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1076': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1077': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1078': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1079': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1080': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1081': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1082': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1083': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1084': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1085': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1086': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1087': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1088': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1089': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1090': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1091': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1092': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1093': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1094': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1095': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1096': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1097': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1098': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1099': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1100': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1101': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1102': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1103': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1104': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1105': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1106': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1107': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1108': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1109': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1110': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1111': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1112': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1113': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1114': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1115': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1116': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1117': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1118': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1119': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1120': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1121': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1122': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1123': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1124': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1125': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1126': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1127': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1128': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1129': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1130': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1131': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1132': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1133': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1134': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1135': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1136': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1137': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1138': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1139': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1140': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1141': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1142': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1143': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1144': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1145': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1146': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1147': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1148': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1149': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1150': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1151': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1152': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1153': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1154': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1155': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1156': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1157': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1158': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1159': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1160': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1161': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1162': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1163': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1164': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1165': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1166': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1167': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1168': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1169': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1170': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1171': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1172': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1173': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1174': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1175': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1176': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1177': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1178': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1179': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1180': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1181': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1182': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1183': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1184': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1185': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1186': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1187': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1188': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1189': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1190': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1191': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1192': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1193': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1194': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1195': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1196': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1197': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1198': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1199': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1200': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1201': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1202': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1203': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1204': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1205': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1206': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1207': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1208': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1209': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1210': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1211': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1212': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1213': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1214': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1215': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1216': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1217': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1218': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1219': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1220': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1221': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1222': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1223': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1224': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1225': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1226': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1227': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1228': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1229': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1230': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1231': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1232': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1233': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1234': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1235': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1236': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1237': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1238': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1239': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1240': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1241': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1242': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1243': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1244': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1245': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1246': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1247': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1248': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1249': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1250': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1251': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1252': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1253': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1254': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1255': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1256': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1257': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1258': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1259': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1260': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1261': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1262': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1263': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1264': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1265': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1266': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1267': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1268': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1269': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1270': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1271': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1272': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1273': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1274': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1275': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1276': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1277': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1278': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1279': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1280': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1281': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1282': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1283': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1284': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1285': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1286': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1287': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1288': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1289': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1290': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1291': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1292': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1293': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1294': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1295': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1296': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1297': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1298': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1299': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1300': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1301': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1302': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1303': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1304': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1305': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1306': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1307': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1308': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1309': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1310': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1311': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1312': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1313': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1314': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1315': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1316': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1317': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1318': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1319': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1320': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1321': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1322': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1323': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1324': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1325': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1326': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1327': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1328': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1329': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1330': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1331': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1332': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1333': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1334': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1335': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1336': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1337': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1338': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1339': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1340': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1341': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1342': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1343': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1344': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1345': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1346': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1347': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1348': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1349': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1350': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1351': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1352': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1353': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1354': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1355': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1356': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1357': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1358': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1359': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1360': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1361': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1362': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1363': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1364': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1365': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1366': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1367': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1368': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1369': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1370': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1371': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1372': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1373': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1374': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1375': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1376': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1377': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1378': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1379': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1380': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1381': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1382': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1383': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1384': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1385': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1386': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1387': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1388': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1389': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1390': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1391': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1392': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1393': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1394': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1395': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1396': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1397': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1398': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1399': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1400': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1401': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1402': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1403': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1404': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1405': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1406': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1407': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1408': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1409': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1410': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1411': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1412': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1413': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1414': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1415': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1416': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1417': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1418': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1419': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1420': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1421': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1422': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1423': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1424': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1425': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1426': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1427': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1428': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1429': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1430': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1431': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1432': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1433': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1434': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1435': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1436': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1437': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1438': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1439': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1440': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1441': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1442': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1443': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1444': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1445': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1446': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1447': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1448': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1449': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1450': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1451': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1452': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1453': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1454': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1455': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1456': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1457': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1458': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1459': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1460': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1461': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1462': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1463': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1464': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1465': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1466': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1467': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1468': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1469': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1470': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1471': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1472': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1473': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1474': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1475': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1476': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1477': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1478': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1479': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1480': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1481': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1482': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1483': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1484': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1485': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1486': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1487': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1488': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1489': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1490': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1491': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1492': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1493': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1494': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1495': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1496': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1497': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1498': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1499': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1500': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1501': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1502': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1503': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1504': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1505': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1506': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1507': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1508': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1509': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1510': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1511': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1512': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1513': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1514': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1515': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1516': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1517': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1518': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1519': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1520': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1521': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1522': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1523': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1524': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1525': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1526': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1527': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1528': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1529': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1530': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1531': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1532': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1533': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1534': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1535': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1536': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1537': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1538': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1539': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1540': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1541': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1542': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1543': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1544': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1545': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1546': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1547': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1548': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1549': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1550': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1551': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1552': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1553': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1554': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1555': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1556': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1557': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1558': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1559': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1560': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1561': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1562': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1563': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1564': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1565': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1566': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1567': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1568': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1569': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1570': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1571': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1572': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1573': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1574': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1575': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1576': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1577': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1578': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1579': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1580': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1581': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1582': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1583': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1584': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1585': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1586': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1587': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1588': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1589': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1590': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1591': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1592': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1593': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1594': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1595': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1596': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1597': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1598': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1599': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1600': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1601': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1602': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1603': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1604': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1605': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1606': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1607': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1608': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1609': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1610': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1611': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1612': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1613': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1614': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1615': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1616': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1617': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1618': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1619': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1620': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1621': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1622': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1623': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1624': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1625': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1626': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1627': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1628': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1629': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1630': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1631': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1632': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1633': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1634': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1635': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1636': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1637': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1638': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1639': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1640': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1641': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1642': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1643': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1644': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1645': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1646': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1647': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1648': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1649': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1650': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1651': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1652': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1653': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1654': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1655': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1656': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1657': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1658': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1659': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1660': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1661': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1662': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1663': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1664': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1665': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1666': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1667': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1668': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1669': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1670': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1671': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1672': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1673': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1674': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1675': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1676': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1677': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1678': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1679': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1680': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1681': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1682': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1683': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1684': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1685': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1686': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1687': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1688': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1689': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1690': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1691': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1692': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1693': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1694': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1695': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1696': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1697': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1698': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1699': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1700': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1701': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1702': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1703': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1704': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1705': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1706': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1707': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1708': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1709': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1710': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1711': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1712': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1713': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1714': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1715': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1716': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1717': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1718': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1719': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1720': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1721': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1722': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1723': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1724': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1725': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1726': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1727': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1728': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1729': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1730': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1731': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1732': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1733': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1734': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1735': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1736': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1737': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1738': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1739': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1740': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1741': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1742': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1743': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1744': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1745': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1746': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1747': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1748': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1749': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1750': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1751': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1752': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1753': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1754': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1755': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1756': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1757': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1758': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1759': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1760': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1761': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1762': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1763': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1764': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1765': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1766': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1767': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1768': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1769': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1770': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1771': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1772': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1773': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1774': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1775': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1776': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1777': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1778': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1779': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1780': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1781': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1782': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1783': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1784': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1785': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1786': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1787': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1788': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1789': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1790': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1791': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1792': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1793': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1794': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1795': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1796': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1797': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1798': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1799': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1800': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1801': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1802': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1803': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1804': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1805': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1806': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1807': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1808': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1809': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1810': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1811': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1812': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1813': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1814': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1815': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1816': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1817': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1818': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1819': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1820': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1821': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1822': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1823': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1824': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1825': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1826': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1827': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1828': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1829': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1830': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1831': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1832': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1833': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1834': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1835': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1836': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1837': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1838': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1839': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1840': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1841': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1842': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1843': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1844': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1845': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1846': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1847': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1848': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1849': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1850': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1851': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1852': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1853': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1854': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1855': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1856': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1857': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1858': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1859': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1860': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1861': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1862': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1863': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1864': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1865': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1866': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1867': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1868': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1869': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1870': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1871': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1872': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1873': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1874': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1875': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1876': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1877': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1878': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1879': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1880': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1881': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1882': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1883': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1884': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1885': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1886': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1887': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1888': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1889': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1890': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1891': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1892': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1893': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1894': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1895': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1896': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1897': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1898': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1899': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1900': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1901': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1902': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1903': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1904': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1905': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1906': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1907': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1908': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1909': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1910': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1911': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1912': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1913': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1914': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1915': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1916': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1917': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1918': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1919': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1920': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1921': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1922': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1923': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1924': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1925': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1926': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1927': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1928': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1929': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1930': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1931': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1932': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1933': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1934': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1935': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1936': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1937': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1938': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1939': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1940': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1941': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1942': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1943': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1944': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1945': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1946': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1947': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1948': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1949': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1950': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1951': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1952': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1953': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1954': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1955': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1956': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1957': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1958': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1959': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1960': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1961': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1962': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1963': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1964': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1965': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1966': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1967': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1968': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1969': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1970': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1971': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1972': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1973': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1974': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1975': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1976': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1977': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1978': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1979': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1980': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1981': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1982': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1983': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1984': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1985': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1986': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1987': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1988': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1989': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1990': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1991': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1992': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1993': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1994': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1995': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1996': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1997': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1998': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c1999': {'0': 0, '1': 0, '2': 0, '3': 0}, 'c2000': {'0': 0, '1': 0, '2': 0, '3': 0}}
<end_description>
| 2,351 | 0 | 3,263 | 2,351 |
69637726
|
# # import
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import re
from datetime import datetime
from datetime import timedelta
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 35
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# # DATA
DATA_DIR = "/kaggle/input/sf-dst-restaurant-rating/"
df_train = pd.read_csv(DATA_DIR + "/main_task.csv")
df_test = pd.read_csv(DATA_DIR + "kaggle_task.csv")
sample_submission = pd.read_csv(DATA_DIR + "/sample_submission.csv")
df_train.info()
df_train.head(5)
df_test.info()
df_test.head(5)
sample_submission.head(5)
sample_submission.info()
# ВАЖНО! для корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"Rating"
] = 0 # в тесте у нас нет значения Rating, мы его должны предсказать, по этому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Подробнее по признакам:
# * City: Город
# * Cuisine Style: Кухня
# * Ranking: Ранг ресторана относительно других ресторанов в этом городе
# * Price Range: Цены в ресторане в 3 категориях
# * Number of Reviews: Количество отзывов
# * Reviews: 2 последних отзыва и даты этих отзывов
# * URL_TA: страница ресторана на 'www.tripadvisor.com'
# * ID_TA: ID ресторана в TripAdvisor
# * Rating: Рейтинг ресторана
data.sample(5)
data.Reviews[1]
# Названия столбцов переведем в формат строчных без пробелов.
data.columns = ["_".join(col.split()).lower() for col in data.columns]
# ## Предобработка
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# Функция для заполнения dummy переменных.
def find_item(cell):
if item in cell:
return 1
return 0
# Функция для округления MAE
def round_of_rating(number):
return np.round(number * 2) / 2
# ## Обработаем цены
# В столбце Price Range 13886 пустых значений, количество знаков $ указывает на размер цен.
data["price_range"].value_counts()
# Добавим признак по неизвестным ценам.
data["price_NaN"] = data["price_range"].isna().astype("int32")
# Заменим символьное обозначение цен в колонке Price Range на числовое.
price = {"$": 0, "$$ - $$$": 1, "$$$$": 2}
data["price_range"] = data["price_range"].replace(price)
# Пропуски заполним средним значением.
data["price_range"].fillna(1, inplace=True)
# ## Обработаем количество отзывов
# Создадим признак отсутствия отзыва.
data["number_of_reviews_NaN"] = data["number_of_reviews"].isna().astype("int32")
# Где нет отзывов, заполним пропуски нулем.
data["number_of_reviews"].fillna(0, inplace=True)
data["number_of_reviews"].count()
# ## Обработаем ранжирование
# Создадим новые признаки по ранжированию по городам - минимум, максимум, нормализованный.
rank_city_min = data.groupby(["city"])["ranking"].min().to_dict()
rank_city_max = data.groupby(["city"])["ranking"].max().to_dict()
data["rank_city_min"] = data.city.map(rank_city_min)
data["rank_city_max"] = data.city.map(rank_city_max)
data["rank_norm"] = data.apply(
lambda x: (x["ranking"] - x["rank_city_min"])
/ (x["rank_city_max"] - x["rank_city_min"]),
axis=1,
)
# ## Обработаем кухню
# В столбце 9283 пропущенных значений, сделаем новый признак по ним.
data["NaN_cuisine_style"] = pd.isna(data["cuisine_style"]).astype("float64")
# Удалим лишние символы из столбца Cuisine Style, пустые строки заполним неопреденным стилем для проверки.
data["cuisine_style"].fillna("Any", inplace=True)
data["cuisine_style"] = data["cuisine_style"].replace("\[", "", regex=True)
data["cuisine_style"] = data["cuisine_style"].replace("\]", "", regex=True)
data["cuisine_style"] = data["cuisine_style"].replace("'", "", regex=True)
# Переименуем проверочный стиль на самый популярный.
data["cuisine_style"] = data["cuisine_style"].replace(
"Any", "Vegetarian Friendly", regex=True
)
# Сделаем новый признак по количеству указанных кухонь.
data["count_cuisine"] = data["cuisine_style"].apply(lambda x: len(str(x).split(",")))
# Создадим новый признак по вегетарианской/веганской кухням.
def find_item_1(style):
if "Vegan Options" in style or "Vegetarian Friendly" in style:
return 1
return 0
data["vegetarian"] = data["cuisine_style"].apply(find_item_1)
# Создадим новый признак по азиатским кухням.
def find_item_2(style):
if (
"Japanese" in style
or "Asian" in style
or "Chinese" in style
or "Sushi" in style
or "Indian" in style
or "Thai" in style
or "Vietnamese" in style
or "Korean" in style
or "Nepali" in style
or "Bangladeshi" in style
or "Indonesian" in style
or "Yunnan" in style
or "Malaysian" in style
or "Sri Lankan" in style
or "Tibetan" in style
or "Taiwanese" in style
or "Cambodian" in style
or "Singaporean" in style
or "Mongolian" in style
or "Xinjiang" in style
or "Burmese" in style
or "Central Asian" in style
or "Filipino" in style
or "Minority Chinese" in style
or "Fujian" in style
or "Burmese" in style
):
return 1
return 0
data["asian_cuisine"] = data["cuisine_style"].apply(find_item_2)
# ## Обработаем города
# Создадим dummy признаки по городам.
city = set(data["city"].tolist())
for item in city:
data[item] = data["city"].apply(find_item)
# Добавим признак - столица.
capitals = [
"London",
"Paris",
"Madrid",
"Berlin",
"Rome",
"Prague",
"Lisbon",
"Vienna",
"Amsterdam",
"Brussels",
"Stockholm",
"Budapest",
"Warsaw",
"Dublin",
"Copenhagen",
"Athens",
"Oslo",
"Helsinki",
"Bratislava",
"Luxembourg",
"Ljubljana",
"Edinburgh",
]
data["capital"] = data["city"].apply(lambda x: 1 if x in capitals else 0)
# Добавим признак - количество ресторанов в городе.
data["numb_of_rest_city"] = data["city"].map(
data.groupby(["city"])["restaurant_id"].count().to_dict()
)
# Добавим признак - население города (деленный на 1000000).
population = {
"London": 8908081,
"Paris": 2148327,
"Madrid": 3221824,
"Barcelona": 1628552,
"Berlin": 3644826,
"Milan": 1372434,
"Rome": 2864466,
"Prague": 1301132,
"Lisbon": 506654,
"Vienna": 1888776,
"Amsterdam": 872757,
"Brussels": 185103,
"Hamburg": 1841179,
"Munich": 1471508,
"Lyon": 506615,
"Stockholm": 961609,
"Budapest": 1752286,
"Warsaw": 1790658,
"Dublin": 1173179,
"Copenhagen": 615993,
"Athens": 664046,
"Edinburgh": 488100,
"Zurich": 428737,
"Oporto": 237591,
"Geneva": 200548,
"Krakow": 779115,
"Oslo": 673469,
"Helsinki": 655281,
"Bratislava": 437725,
"Luxembourg": 124509,
"Ljubljana": 284355,
}
data["population"] = data["city"].replace(population) / 1000000
# Добавим признак плотности населения по городам (деленный на 10000)
population_density = {
"London": 5667,
"Paris": 20781,
"Madrid": 5345,
"Barcelona": 16285,
"Berlin": 4463,
"Milan": 7582,
"Rome": 2225,
"Prague": 2506,
"Lisbon": 6658,
"Vienna": 4502,
"Amsterdam": 4768,
"Brussels": 5497,
"Hamburg": 2438,
"Munich": 4746,
"Lyon": 10041,
"Stockholm": 5139,
"Budapest": 3330,
"Warsaw": 3449,
"Dublin": 3689,
"Copenhagen": 6214,
"Athens": 7500,
"Edinburgh": 4140,
"Zurich": 4666,
"Oporto": 5703,
"Geneva": 12589,
"Krakow": 2384,
"Oslo": 1483,
"Helsinki": 3058,
"Bratislava": 1189,
"Luxembourg": 2240,
"Ljubljana": 1736,
}
data["population_density"] = data["city"].replace(population_density) / 10000
# ## Обработаем даты.
# Уберем лишние символы из отзывов.
data["reviews"] = data["reviews"].replace("\[", "", regex=True)
data["reviews"] = data["reviews"].replace("\]", "", regex=True)
data["reviews"] = data["reviews"].replace(",", "", regex=True)
# Заполним пропуски тех. значением.
data["reviews"].fillna("'01/01/2021' '01/01/2021'", inplace=True)
# Зададим паттерн для даты.
pattern = re.compile("'\d+\/\d+\/\d+'?")
# Запишем даты в список.
dates = []
for date in data["reviews"]:
if len(date) >= 2:
dat = pattern.findall(date)
dates.append(dat)
else:
dat = [
"'01/01/2021'",
"'01/01/2021'",
] # Пустые строки заполним произвольным значением.
dates.append(dat)
# Сделаем одиночным датам дубли для дальнейшего удобства обработки.
dates_full = []
for date in dates:
if len(date) < 2:
date = date * 2
dates_full.append(date)
else:
dates_full.append(date)
# Переведем даты в формат datetime.
datetime_list = []
for dtm in dates_full:
if len(dtm) >= 2:
temp_date = []
for date in dtm:
date = date[1:-1]
dt = datetime.strptime(date, "%m/%d/%Y")
temp_date.append(dt)
datetime_list.append(temp_date)
# Найдем разницу в днях между датами каждого второго отзыва с предыдущим. Отсутствие отзыва или одна дата даст ноль.
delta_list = []
for i in range(0, len(datetime_list)):
delta = datetime_list[i][0] - datetime_list[i][1]
delta_list.append(abs(delta))
# Сделаем новый признак с разницей по дням.
data["diff_of_days"] = delta_list
data["diff_of_days"] = data["diff_of_days"].dt.days.astype("int16")
# Определим границу выбросов.
perc75 = data.diff_of_days.quantile(0.75)
IQR = data.diff_of_days.quantile(0.75)
print(
"75-й перцентиль: {},".format(perc75),
"IQR: {}, ".format(IQR),
"Границы выбросов: [{f}, {l}].".format(f=0, l=perc75 + 1.5 * IQR),
)
data.diff_of_days.loc[data.diff_of_days.between(0, perc75 + 1.5 * IQR)].hist(
bins=16, label="IQR"
)
plt.legend()
# Создадим новый признак по границе выбросов.
temp_list = data["diff_of_days"].tolist()
temp_days = []
for numb in temp_list:
if numb in range(301, 3300):
temp_days.append(300)
else:
temp_days.append(numb)
data["diff_clear"] = temp_days
# Сделаем грубую нормализацию времени между отзывами по принципу: 1-100 дней - 2, 101-200 - 1, 0 и > 200 - 0.
diff_list = data["diff_of_days"].tolist()
norm_list = []
for numb in diff_list:
if numb in range(1, 101):
norm_list.append(2)
elif numb in range(101, 201):
norm_list.append(1)
else:
norm_list.append(0)
data["norm_of_days"] = norm_list
data["norm_of_days"].value_counts()
# Сделаем столбцы с датами.
date_one = [] # Список с первыми датами.
for date in dates_full:
date = date[:1]
date_one.append(date)
date_str_one = str(date_one) # Почистим от лишнего в формате строки.
date_str_one = date_str_one.replace("[", "")
date_str_one = date_str_one.replace("]", "")
date_str_one = date_str_one.replace("'", "")
date_str_one = date_str_one.replace(" ", "")
date_str_one = date_str_one.replace('"', "")
delimiter = ","
date_one_list = date_str_one.split(delimiter) # Вернем список.
data["date_feedback_one"] = date_one_list
data["date_feedback_one"] = [
datetime.strptime(x, "%m/%d/%Y") for x in date_one_list
] # Столбец в формате datetime.
# Признак по второй дате отдельно не будет использоваться, улучшения не дало.
# Очистим из столбцов с датами произвольно записанные.
data.loc[data["date_feedback_one"] == "2021-01-01", "date_feedback_one"] = np.nan
# Сделаем признак по месяцу первого отзыва.
data["month_one"] = data["date_feedback_one"].dt.strftime("%B")
# Создадим признак - день недели по первой дате.
data["dayofweek_one"] = data["date_feedback_one"].dt.strftime("%A")
# Заполним пропуски самым распространенным месяцем отзыва.
data["month_one"].fillna("December", inplace=True)
# Заполним день недели самым распространенным значением.
data["dayofweek_one"].fillna("Sunday", inplace=True)
# Создадим признаки по месяцам для первой даты.
month_one = set(data["month_one"].tolist())
for item in month_one:
data[item] = data["month_one"].apply(find_item)
# Создадим признаки по дням недели для первой даты.
dayofweek_one = set(data["dayofweek_one"].tolist())
for item in dayofweek_one:
data[item] = data["dayofweek_one"].apply(find_item)
# ### Посмотрим распределение признака
plt.rcParams["figure.figsize"] = (10, 7)
df_train["Ranking"].hist(bins=100)
df_train["City"].value_counts(ascending=True).plot(kind="barh")
df_train["Ranking"][df_train["City"] == "London"].hist(bins=100)
# посмотрим на топ 10 городов
for x in (df_train["City"].value_counts())[0:10].index:
df_train["Ranking"][df_train["City"] == x].hist(bins=100)
plt.show()
# ### Посмотрим распределение целевой переменной
df_train["Rating"].value_counts(ascending=True).plot(kind="barh")
# ### Посмотрим распределение целевой переменной относительно признака
df_train["Ranking"][df_train["Rating"] == 5].hist(bins=100)
df_train["Ranking"][df_train["Rating"] < 4].hist(bins=100)
# ### [корреляция признаков](https://ru.wikipedia.org/wiki/Корреляция)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(
data.drop(["sample"], axis=1).corr(),
)
# #### Запускаем и проверяем что получилось
data.info()
# Удалим лишние столбцы.
data.drop(data.columns[[0, 1, 2, 6, 7, 8, 11, 55, 58, 59, 60]], axis=1, inplace=True)
data.sample()
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.rating.values # наш таргет
X = train_data.drop(["rating"], axis=1)
# **Перед тем как отправлять наши данные на обучение, разделим данные на еще один тест и трейн, для валидации.
# Это поможет нам проверить, как хорошо наша модель работает, до отправки submissiona на kaggle.**
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# # Model
# Сам ML
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = round_of_rating(model.predict(X_test))
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
# # Submission
# Если все устраевает - готовим Submission на кагл
test_data.sample(10)
test_data = test_data.drop(["rating"], axis=1)
sample_submission.head()
predict_submission = model.predict(test_data)
predict_submission
sample_submission["Rating"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637726.ipynb
| null | null |
[{"Id": 69637726, "ScriptId": 18989572, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8023115, "CreationDate": "08/02/2021 12:45:31", "VersionNumber": 5.0, "Title": "Baseline [SF TripAdvisor Rating] v2.7", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 502.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 501.0, "LinesInsertedFromFork": 346.0, "LinesDeletedFromFork": 191.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 156.0, "TotalVotes": 0}]
| null | null | null | null |
# # import
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Any results you write to the current directory are saved as output.
import re
from datetime import datetime
from datetime import timedelta
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 35
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# # DATA
DATA_DIR = "/kaggle/input/sf-dst-restaurant-rating/"
df_train = pd.read_csv(DATA_DIR + "/main_task.csv")
df_test = pd.read_csv(DATA_DIR + "kaggle_task.csv")
sample_submission = pd.read_csv(DATA_DIR + "/sample_submission.csv")
df_train.info()
df_train.head(5)
df_test.info()
df_test.head(5)
sample_submission.head(5)
sample_submission.info()
# ВАЖНО! для корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"Rating"
] = 0 # в тесте у нас нет значения Rating, мы его должны предсказать, по этому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Подробнее по признакам:
# * City: Город
# * Cuisine Style: Кухня
# * Ranking: Ранг ресторана относительно других ресторанов в этом городе
# * Price Range: Цены в ресторане в 3 категориях
# * Number of Reviews: Количество отзывов
# * Reviews: 2 последних отзыва и даты этих отзывов
# * URL_TA: страница ресторана на 'www.tripadvisor.com'
# * ID_TA: ID ресторана в TripAdvisor
# * Rating: Рейтинг ресторана
data.sample(5)
data.Reviews[1]
# Названия столбцов переведем в формат строчных без пробелов.
data.columns = ["_".join(col.split()).lower() for col in data.columns]
# ## Предобработка
pd.set_option("display.max_columns", None)
pd.set_option("display.max_rows", None)
# Функция для заполнения dummy переменных.
def find_item(cell):
if item in cell:
return 1
return 0
# Функция для округления MAE
def round_of_rating(number):
return np.round(number * 2) / 2
# ## Обработаем цены
# В столбце Price Range 13886 пустых значений, количество знаков $ указывает на размер цен.
data["price_range"].value_counts()
# Добавим признак по неизвестным ценам.
data["price_NaN"] = data["price_range"].isna().astype("int32")
# Заменим символьное обозначение цен в колонке Price Range на числовое.
price = {"$": 0, "$$ - $$$": 1, "$$$$": 2}
data["price_range"] = data["price_range"].replace(price)
# Пропуски заполним средним значением.
data["price_range"].fillna(1, inplace=True)
# ## Обработаем количество отзывов
# Создадим признак отсутствия отзыва.
data["number_of_reviews_NaN"] = data["number_of_reviews"].isna().astype("int32")
# Где нет отзывов, заполним пропуски нулем.
data["number_of_reviews"].fillna(0, inplace=True)
data["number_of_reviews"].count()
# ## Обработаем ранжирование
# Создадим новые признаки по ранжированию по городам - минимум, максимум, нормализованный.
rank_city_min = data.groupby(["city"])["ranking"].min().to_dict()
rank_city_max = data.groupby(["city"])["ranking"].max().to_dict()
data["rank_city_min"] = data.city.map(rank_city_min)
data["rank_city_max"] = data.city.map(rank_city_max)
data["rank_norm"] = data.apply(
lambda x: (x["ranking"] - x["rank_city_min"])
/ (x["rank_city_max"] - x["rank_city_min"]),
axis=1,
)
# ## Обработаем кухню
# В столбце 9283 пропущенных значений, сделаем новый признак по ним.
data["NaN_cuisine_style"] = pd.isna(data["cuisine_style"]).astype("float64")
# Удалим лишние символы из столбца Cuisine Style, пустые строки заполним неопреденным стилем для проверки.
data["cuisine_style"].fillna("Any", inplace=True)
data["cuisine_style"] = data["cuisine_style"].replace("\[", "", regex=True)
data["cuisine_style"] = data["cuisine_style"].replace("\]", "", regex=True)
data["cuisine_style"] = data["cuisine_style"].replace("'", "", regex=True)
# Переименуем проверочный стиль на самый популярный.
data["cuisine_style"] = data["cuisine_style"].replace(
"Any", "Vegetarian Friendly", regex=True
)
# Сделаем новый признак по количеству указанных кухонь.
data["count_cuisine"] = data["cuisine_style"].apply(lambda x: len(str(x).split(",")))
# Создадим новый признак по вегетарианской/веганской кухням.
def find_item_1(style):
if "Vegan Options" in style or "Vegetarian Friendly" in style:
return 1
return 0
data["vegetarian"] = data["cuisine_style"].apply(find_item_1)
# Создадим новый признак по азиатским кухням.
def find_item_2(style):
if (
"Japanese" in style
or "Asian" in style
or "Chinese" in style
or "Sushi" in style
or "Indian" in style
or "Thai" in style
or "Vietnamese" in style
or "Korean" in style
or "Nepali" in style
or "Bangladeshi" in style
or "Indonesian" in style
or "Yunnan" in style
or "Malaysian" in style
or "Sri Lankan" in style
or "Tibetan" in style
or "Taiwanese" in style
or "Cambodian" in style
or "Singaporean" in style
or "Mongolian" in style
or "Xinjiang" in style
or "Burmese" in style
or "Central Asian" in style
or "Filipino" in style
or "Minority Chinese" in style
or "Fujian" in style
or "Burmese" in style
):
return 1
return 0
data["asian_cuisine"] = data["cuisine_style"].apply(find_item_2)
# ## Обработаем города
# Создадим dummy признаки по городам.
city = set(data["city"].tolist())
for item in city:
data[item] = data["city"].apply(find_item)
# Добавим признак - столица.
capitals = [
"London",
"Paris",
"Madrid",
"Berlin",
"Rome",
"Prague",
"Lisbon",
"Vienna",
"Amsterdam",
"Brussels",
"Stockholm",
"Budapest",
"Warsaw",
"Dublin",
"Copenhagen",
"Athens",
"Oslo",
"Helsinki",
"Bratislava",
"Luxembourg",
"Ljubljana",
"Edinburgh",
]
data["capital"] = data["city"].apply(lambda x: 1 if x in capitals else 0)
# Добавим признак - количество ресторанов в городе.
data["numb_of_rest_city"] = data["city"].map(
data.groupby(["city"])["restaurant_id"].count().to_dict()
)
# Добавим признак - население города (деленный на 1000000).
population = {
"London": 8908081,
"Paris": 2148327,
"Madrid": 3221824,
"Barcelona": 1628552,
"Berlin": 3644826,
"Milan": 1372434,
"Rome": 2864466,
"Prague": 1301132,
"Lisbon": 506654,
"Vienna": 1888776,
"Amsterdam": 872757,
"Brussels": 185103,
"Hamburg": 1841179,
"Munich": 1471508,
"Lyon": 506615,
"Stockholm": 961609,
"Budapest": 1752286,
"Warsaw": 1790658,
"Dublin": 1173179,
"Copenhagen": 615993,
"Athens": 664046,
"Edinburgh": 488100,
"Zurich": 428737,
"Oporto": 237591,
"Geneva": 200548,
"Krakow": 779115,
"Oslo": 673469,
"Helsinki": 655281,
"Bratislava": 437725,
"Luxembourg": 124509,
"Ljubljana": 284355,
}
data["population"] = data["city"].replace(population) / 1000000
# Добавим признак плотности населения по городам (деленный на 10000)
population_density = {
"London": 5667,
"Paris": 20781,
"Madrid": 5345,
"Barcelona": 16285,
"Berlin": 4463,
"Milan": 7582,
"Rome": 2225,
"Prague": 2506,
"Lisbon": 6658,
"Vienna": 4502,
"Amsterdam": 4768,
"Brussels": 5497,
"Hamburg": 2438,
"Munich": 4746,
"Lyon": 10041,
"Stockholm": 5139,
"Budapest": 3330,
"Warsaw": 3449,
"Dublin": 3689,
"Copenhagen": 6214,
"Athens": 7500,
"Edinburgh": 4140,
"Zurich": 4666,
"Oporto": 5703,
"Geneva": 12589,
"Krakow": 2384,
"Oslo": 1483,
"Helsinki": 3058,
"Bratislava": 1189,
"Luxembourg": 2240,
"Ljubljana": 1736,
}
data["population_density"] = data["city"].replace(population_density) / 10000
# ## Обработаем даты.
# Уберем лишние символы из отзывов.
data["reviews"] = data["reviews"].replace("\[", "", regex=True)
data["reviews"] = data["reviews"].replace("\]", "", regex=True)
data["reviews"] = data["reviews"].replace(",", "", regex=True)
# Заполним пропуски тех. значением.
data["reviews"].fillna("'01/01/2021' '01/01/2021'", inplace=True)
# Зададим паттерн для даты.
pattern = re.compile("'\d+\/\d+\/\d+'?")
# Запишем даты в список.
dates = []
for date in data["reviews"]:
if len(date) >= 2:
dat = pattern.findall(date)
dates.append(dat)
else:
dat = [
"'01/01/2021'",
"'01/01/2021'",
] # Пустые строки заполним произвольным значением.
dates.append(dat)
# Сделаем одиночным датам дубли для дальнейшего удобства обработки.
dates_full = []
for date in dates:
if len(date) < 2:
date = date * 2
dates_full.append(date)
else:
dates_full.append(date)
# Переведем даты в формат datetime.
datetime_list = []
for dtm in dates_full:
if len(dtm) >= 2:
temp_date = []
for date in dtm:
date = date[1:-1]
dt = datetime.strptime(date, "%m/%d/%Y")
temp_date.append(dt)
datetime_list.append(temp_date)
# Найдем разницу в днях между датами каждого второго отзыва с предыдущим. Отсутствие отзыва или одна дата даст ноль.
delta_list = []
for i in range(0, len(datetime_list)):
delta = datetime_list[i][0] - datetime_list[i][1]
delta_list.append(abs(delta))
# Сделаем новый признак с разницей по дням.
data["diff_of_days"] = delta_list
data["diff_of_days"] = data["diff_of_days"].dt.days.astype("int16")
# Определим границу выбросов.
perc75 = data.diff_of_days.quantile(0.75)
IQR = data.diff_of_days.quantile(0.75)
print(
"75-й перцентиль: {},".format(perc75),
"IQR: {}, ".format(IQR),
"Границы выбросов: [{f}, {l}].".format(f=0, l=perc75 + 1.5 * IQR),
)
data.diff_of_days.loc[data.diff_of_days.between(0, perc75 + 1.5 * IQR)].hist(
bins=16, label="IQR"
)
plt.legend()
# Создадим новый признак по границе выбросов.
temp_list = data["diff_of_days"].tolist()
temp_days = []
for numb in temp_list:
if numb in range(301, 3300):
temp_days.append(300)
else:
temp_days.append(numb)
data["diff_clear"] = temp_days
# Сделаем грубую нормализацию времени между отзывами по принципу: 1-100 дней - 2, 101-200 - 1, 0 и > 200 - 0.
diff_list = data["diff_of_days"].tolist()
norm_list = []
for numb in diff_list:
if numb in range(1, 101):
norm_list.append(2)
elif numb in range(101, 201):
norm_list.append(1)
else:
norm_list.append(0)
data["norm_of_days"] = norm_list
data["norm_of_days"].value_counts()
# Сделаем столбцы с датами.
date_one = [] # Список с первыми датами.
for date in dates_full:
date = date[:1]
date_one.append(date)
date_str_one = str(date_one) # Почистим от лишнего в формате строки.
date_str_one = date_str_one.replace("[", "")
date_str_one = date_str_one.replace("]", "")
date_str_one = date_str_one.replace("'", "")
date_str_one = date_str_one.replace(" ", "")
date_str_one = date_str_one.replace('"', "")
delimiter = ","
date_one_list = date_str_one.split(delimiter) # Вернем список.
data["date_feedback_one"] = date_one_list
data["date_feedback_one"] = [
datetime.strptime(x, "%m/%d/%Y") for x in date_one_list
] # Столбец в формате datetime.
# Признак по второй дате отдельно не будет использоваться, улучшения не дало.
# Очистим из столбцов с датами произвольно записанные.
data.loc[data["date_feedback_one"] == "2021-01-01", "date_feedback_one"] = np.nan
# Сделаем признак по месяцу первого отзыва.
data["month_one"] = data["date_feedback_one"].dt.strftime("%B")
# Создадим признак - день недели по первой дате.
data["dayofweek_one"] = data["date_feedback_one"].dt.strftime("%A")
# Заполним пропуски самым распространенным месяцем отзыва.
data["month_one"].fillna("December", inplace=True)
# Заполним день недели самым распространенным значением.
data["dayofweek_one"].fillna("Sunday", inplace=True)
# Создадим признаки по месяцам для первой даты.
month_one = set(data["month_one"].tolist())
for item in month_one:
data[item] = data["month_one"].apply(find_item)
# Создадим признаки по дням недели для первой даты.
dayofweek_one = set(data["dayofweek_one"].tolist())
for item in dayofweek_one:
data[item] = data["dayofweek_one"].apply(find_item)
# ### Посмотрим распределение признака
plt.rcParams["figure.figsize"] = (10, 7)
df_train["Ranking"].hist(bins=100)
df_train["City"].value_counts(ascending=True).plot(kind="barh")
df_train["Ranking"][df_train["City"] == "London"].hist(bins=100)
# посмотрим на топ 10 городов
for x in (df_train["City"].value_counts())[0:10].index:
df_train["Ranking"][df_train["City"] == x].hist(bins=100)
plt.show()
# ### Посмотрим распределение целевой переменной
df_train["Rating"].value_counts(ascending=True).plot(kind="barh")
# ### Посмотрим распределение целевой переменной относительно признака
df_train["Ranking"][df_train["Rating"] == 5].hist(bins=100)
df_train["Ranking"][df_train["Rating"] < 4].hist(bins=100)
# ### [корреляция признаков](https://ru.wikipedia.org/wiki/Корреляция)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(
data.drop(["sample"], axis=1).corr(),
)
# #### Запускаем и проверяем что получилось
data.info()
# Удалим лишние столбцы.
data.drop(data.columns[[0, 1, 2, 6, 7, 8, 11, 55, 58, 59, 60]], axis=1, inplace=True)
data.sample()
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.rating.values # наш таргет
X = train_data.drop(["rating"], axis=1)
# **Перед тем как отправлять наши данные на обучение, разделим данные на еще один тест и трейн, для валидации.
# Это поможет нам проверить, как хорошо наша модель работает, до отправки submissiona на kaggle.**
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# # Model
# Сам ML
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = round_of_rating(model.predict(X_test))
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
# # Submission
# Если все устраевает - готовим Submission на кагл
test_data.sample(10)
test_data = test_data.drop(["rating"], axis=1)
sample_submission.head()
predict_submission = model.predict(test_data)
predict_submission
sample_submission["Rating"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
| false | 0 | 6,661 | 0 | 6,661 | 6,661 |
||
69637747
|
<jupyter_start><jupyter_text>NIH Chest X-rays
# NIH Chest X-ray Dataset
---
### National Institutes of Health Chest X-Ray Dataset
Chest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.
This NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: "ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases." (*Wang et al.*)
[Link to paper][30]
[1]: https://openi.nlm.nih.gov/
<br>
### Data limitations:
1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%.
2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)
3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their “updated” image labels and/or new bounding boxes in their own studied later, maybe through manual annotation
<br>
### File contents
- **Image format**: 112,120 total images with size 1024 x 1024
- **images_001.zip**: Contains 4999 images
- **images_002.zip**: Contains 10,000 images
- **images_003.zip**: Contains 10,000 images
- **images_004.zip**: Contains 10,000 images
- **images_005.zip**: Contains 10,000 images
- **images_006.zip**: Contains 10,000 images
- **images_007.zip**: Contains 10,000 images
- **images_008.zip**: Contains 10,000 images
- **images_009.zip**: Contains 10,000 images
- **images_010.zip**: Contains 10,000 images
- **images_011.zip**: Contains 10,000 images
- **images_012.zip**: Contains 7,121 images
- **README_ChestXray.pdf**: Original README file
- **BBox_list_2017.csv**: Bounding box coordinates. *Note: Start at x,y, extend horizontally w pixels, and vertically h pixels*
- Image Index: File name
- Finding Label: Disease type (Class label)
- Bbox x
- Bbox y
- Bbox w
- Bbox h
- **Data_entry_2017.csv**: Class labels and patient data for the entire dataset
- Image Index: File name
- Finding Labels: Disease type (Class label)
- Follow-up #
- Patient ID
- Patient Age
- Patient Gender
- View Position: X-ray orientation
- OriginalImageWidth
- OriginalImageHeight
- OriginalImagePixelSpacing_x
- OriginalImagePixelSpacing_y
<br>
### Class descriptions
There are 15 classes (14 diseases, and one for "No findings"). Images can be classified as "No findings" or one or more disease classes:
- Atelectasis
- Consolidation
- Infiltration
- Pneumothorax
- Edema
- Emphysema
- Fibrosis
- Effusion
- Pneumonia
- Pleural_thickening
- Cardiomegaly
- Nodule Mass
- Hernia
<br>
### Full Dataset Content
There are 12 zip files in total and range from ~2 gb to 4 gb in size. Additionally, we randomly sampled 5% of these images and created a smaller dataset for use in Kernels. The random sample contains 5606 X-ray images and class labels.
- [Sample][9]: sample.zip
[9]: https://www.kaggle.com/nih-chest-xrays/sample
<br>
### Modifications to original data
- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform
- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory
<br>
### Citations
- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]
- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]
- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]
<br>
Kaggle dataset identifier: data
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from glob import glob
import matplotlib.pyplot as plt
import tensorflow as tf
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
all_xray_df = pd.read_csv("../input/data/Data_Entry_2017.csv")
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "*", "*.png"))
}
print("Scans found:", len(all_image_paths), ", Total Headers", all_xray_df.shape[0])
all_xray_df["path"] = all_xray_df["Image Index"].map(all_image_paths.get)
# all_xray_df['Patient Age'] = all_xray_df['Patient Age'].map(lambda x: int(x[:-1]))
all_xray_df.sample(3)
all_xray_df["Finding Labels"] = all_xray_df["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
from itertools import chain
all_labels = np.unique(
list(chain(*all_xray_df["Finding Labels"].map(lambda x: x.split("|")).tolist()))
)
all_labels = [x for x in all_labels if len(x) > 0]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
all_xray_df[c_label] = all_xray_df["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
all_xray_df.sample(3)
all_xray_df["labels"] = all_xray_df.apply(
lambda x: x["Finding Labels"].split("|"), axis=1
)
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
all_xray_df,
test_size=0.25,
random_state=2018,
stratify=all_xray_df["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (256, 256)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
train_gen = core_idg.flow_from_dataframe(
dataframe=train_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=32,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
valid_gen = core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=256,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=1024,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
)
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
)
weight_path = "{}_nih_pretrained.h5".format("xray_class")
checkpoint = ModelCheckpoint(
weight_path, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
callbacks_list = [checkpoint, early]
with tf.device("/gpu:0"):
base_mobilenet_model = MobileNet(
input_shape=(256, 256, 1), include_top=False, weights=None
)
multi_disease_model = Sequential()
multi_disease_model.add(base_mobilenet_model)
multi_disease_model.add(GlobalAveragePooling2D())
multi_disease_model.add(Dropout(0.5))
multi_disease_model.add(Dense(512))
multi_disease_model.add(Dropout(0.5))
multi_disease_model.add(Dense(len(all_labels), activation="sigmoid"))
multi_disease_model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["binary_accuracy", "mae"]
)
multi_disease_model.summary()
multi_disease_model.fit(
train_gen,
steps_per_epoch=100,
validation_data=(test_X, test_Y),
epochs=15,
callbacks=callbacks_list,
)
# > * Train: 0.8854
# > * Val: 0.8720
# > * loss_train: 0.3102
# > * loss_val: 0.4242
y_preds = multi_disease_model.predict(test_X)
print(np.around(y_preds[4], 2))
print(test_Y[4])
# # Fine tune on CheXpert
path = "../input/chexpert-dataset/"
train_df = pd.read_csv("../input/modified-chexpert/modifiedv2_train.csv")
valid_df = pd.read_csv("../input/modified-chexpert/modifiedv2_valid.csv")
train_df["path"] = path + train_df["Path"]
valid_df["path"] = path + valid_df["Path"]
dfs = [train_df, valid_df]
all_xray_df = pd.concat(dfs)
all_xray_df.sample(3)
# all_xray_df.drop("No Finding", axis=1, inplace=True)
all_xray_df.columns
all_xray_df = all_xray_df[all_xray_df["Finding Labels"].notnull()]
all_xray_df["Finding Labels"] = all_xray_df["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
all_labels = [
"Atelectasis",
"Consolidation",
"Infiltration",
"Pneumothorax",
"Edema",
"Emphysema",
"Fibrosis",
"Pleural Effusion",
"Mass",
"Pneumonia",
"Pleural_thickening",
"Cardiomegaly",
"Nodule Mass",
"Hernia",
"Enlarged Cardiom",
"Lung Lesion",
"Lung Opacity",
"Pleural Other",
"Fracture",
]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
all_xray_df[c_label] = all_xray_df["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
all_xray_df.sample(3)
all_xray_df.columns
# all_xray_df.drop("Enlarged Cardiomediastinum", axis=1, inplace=True)
all_xray_df["labels"] = all_xray_df.apply(
lambda x: x["Finding Labels"].split("|"), axis=1
)
all_xray_df["labels"]
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
all_xray_df,
test_size=0.25,
random_state=2018,
stratify=all_xray_df["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (256, 256)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
train_df.head()
train_gen = core_idg.flow_from_dataframe(
dataframe=train_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=32,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
valid_gen = core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=256,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=1024,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
)
from tensorflow.keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
)
weight_path = "{}_chexpert_finetuned.h5".format("xray_class")
checkpoint = ModelCheckpoint(
weight_path, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
callbacks_list = [checkpoint, early]
for x, y in train_gen:
print(x.shape)
break
new_model = tf.keras.Sequential()
for layer in multi_disease_model.layers[0:-2]:
new_model.add(layer)
new_model.add(tf.keras.layers.Dense(200, activation="relu"))
new_model.add(tf.keras.layers.Dense(19, activation="sigmoid"))
new_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["binary_accuracy", "mae"],
)
new_model.summary()
new_model.fit(
train_gen,
steps_per_epoch=100,
validation_data=(test_X, test_Y),
epochs=15,
callbacks=callbacks_list,
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/637/69637747.ipynb
|
data
| null |
[{"Id": 69637747, "ScriptId": 19014589, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3842663, "CreationDate": "08/02/2021 12:45:44", "VersionNumber": 3.0, "Title": "chest_x_ray_experiments", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 283.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 265.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 93074374, "KernelVersionId": 69637747, "SourceDatasetVersionId": 18613}]
|
[{"Id": 18613, "DatasetId": 5839, "DatasourceVersionId": 18613, "CreatorUserId": 998023, "LicenseName": "CC0: Public Domain", "CreationDate": "02/21/2018 20:52:23", "VersionNumber": 3.0, "Title": "NIH Chest X-rays", "Slug": "data", "Subtitle": "Over 112,000 Chest X-ray images from more than 30,000 unique patients", "Description": "# NIH Chest X-ray Dataset \n\n---\n\n### National Institutes of Health Chest X-Ray Dataset\n\nChest X-ray exams are one of the most frequent and cost-effective medical imaging examinations available. However, clinical diagnosis of a chest X-ray can be challenging and sometimes more difficult than diagnosis via chest CT imaging. The lack of large publicly available datasets with annotations means it is still very difficult, if not impossible, to achieve clinically relevant computer-aided detection and diagnosis (CAD) in real world medical sites with chest X-rays. One major hurdle in creating large X-ray image datasets is the lack resources for labeling so many images. Prior to the release of this dataset, [Openi][1] was the largest publicly available source of chest X-ray images with 4,143 images available.\n\nThis NIH Chest X-ray Dataset is comprised of 112,120 X-ray images with disease labels from 30,805 unique patients. To create these labels, the authors used Natural Language Processing to text-mine disease classifications from the associated radiological reports. The labels are expected to be >90% accurate and suitable for weakly-supervised learning. The original radiology reports are not publicly available but you can find more details on the labeling process in this Open Access paper: \"ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases.\" (*Wang et al.*)\n\n[Link to paper][30]\n\n[1]: https://openi.nlm.nih.gov/\n\n<br>\n### Data limitations: \n\n1. The image labels are NLP extracted so there could be some erroneous labels but the NLP labeling accuracy is estimated to be >90%. \n2. Very limited numbers of disease region bounding boxes (See BBox_list_2017.csv)\n3. Chest x-ray radiology reports are not anticipated to be publicly shared. Parties who use this public dataset are encouraged to share their \u201cupdated\u201d image labels and/or new bounding boxes in their own studied later, maybe through manual annotation\n\n\n<br>\n### File contents\n\n- **Image format**: 112,120 total images with size 1024 x 1024\n\n- **images_001.zip**: Contains 4999 images\n\n- **images_002.zip**: Contains 10,000 images\n\n- **images_003.zip**: Contains 10,000 images\n\n- **images_004.zip**: Contains 10,000 images\n\n- **images_005.zip**: Contains 10,000 images\n\n- **images_006.zip**: Contains 10,000 images\n\n- **images_007.zip**: Contains 10,000 images\n\n- **images_008.zip**: Contains 10,000 images\n\n- **images_009.zip**: Contains 10,000 images\n\n- **images_010.zip**: Contains 10,000 images\n\n- **images_011.zip**: Contains 10,000 images\n\n- **images_012.zip**: Contains 7,121 images\n\n- **README_ChestXray.pdf**: Original README file\n\n- **BBox_list_2017.csv**: Bounding box coordinates. *Note: Start at x,y, extend horizontally w pixels, and vertically h pixels*\n - Image Index: File name\n - Finding Label: Disease type (Class label)\n - Bbox x \n - Bbox y\n - Bbox w\n - Bbox h\n\n\n- **Data_entry_2017.csv**: Class labels and patient data for the entire dataset\n - Image Index: File name\n - Finding Labels: Disease type (Class label)\n - Follow-up # \n - Patient ID\n - Patient Age\n - Patient Gender\n - View Position: X-ray orientation\n - OriginalImageWidth\n - OriginalImageHeight\n - OriginalImagePixelSpacing_x\n - OriginalImagePixelSpacing_y\n\n\n<br>\n### Class descriptions\n\nThere are 15 classes (14 diseases, and one for \"No findings\"). Images can be classified as \"No findings\" or one or more disease classes:\n\n- Atelectasis\n- Consolidation\n- Infiltration\n- Pneumothorax\n- Edema\n- Emphysema\n- Fibrosis\n- Effusion\n- Pneumonia\n- Pleural_thickening\n- Cardiomegaly\n- Nodule Mass\n- Hernia\n\n\n<br>\n### Full Dataset Content\n\nThere are 12 zip files in total and range from ~2 gb to 4 gb in size. Additionally, we randomly sampled 5% of these images and created a smaller dataset for use in Kernels. The random sample contains 5606 X-ray images and class labels. \n\n- [Sample][9]: sample.zip\n\n[9]: https://www.kaggle.com/nih-chest-xrays/sample\n\n\n\n<br>\n### Modifications to original data\n\n- Original TAR archives were converted to ZIP archives to be compatible with the Kaggle platform\n\n- CSV headers slightly modified to be more explicit in comma separation and also to allow fields to be self-explanatory\n\n\n<br>\n### Citations\n\n- Wang X, Peng Y, Lu L, Lu Z, Bagheri M, Summers RM. ChestX-ray8: Hospital-scale Chest X-ray Database and Benchmarks on Weakly-Supervised Classification and Localization of Common Thorax Diseases. IEEE CVPR 2017, [ChestX-ray8_Hospital-Scale_Chest_CVPR_2017_paper.pdf][30]\n\n- NIH News release: [NIH Clinical Center provides one of the largest publicly available chest x-ray datasets to scientific community][30]\n\n- Original source files and documents: [https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345][31]\n\n<br>\n### Acknowledgements\n\nThis work was supported by the Intramural Research Program of the NClinical Center (clinicalcenter.nih.gov) and National Library of Medicine (www.nlm.nih.gov). \n\n\n [30]: https://www.nih.gov/news-events/news-releases/nih-clinical-center-provides-one-largest-publicly-available-chest-x-ray-datasets-scientific-community\n\n [31]: https://nihcc.app.box.com/v/ChestXray-NIHCC/folder/36938765345", "VersionNotes": "Add updated material", "TotalCompressedBytes": 45087244360.0, "TotalUncompressedBytes": 45087244360.0}]
|
[{"Id": 5839, "CreatorUserId": 998023, "OwnerUserId": NaN, "OwnerOrganizationId": 1146.0, "CurrentDatasetVersionId": 18613.0, "CurrentDatasourceVersionId": 18613.0, "ForumId": 12132, "Type": 2, "CreationDate": "12/01/2017 19:19:36", "LastActivityDate": "02/06/2018", "TotalViews": 527492, "TotalDownloads": 73912, "TotalVotes": 1069, "TotalKernels": 393}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
from glob import glob
import matplotlib.pyplot as plt
import tensorflow as tf
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
# import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
all_xray_df = pd.read_csv("../input/data/Data_Entry_2017.csv")
all_image_paths = {
os.path.basename(x): x
for x in glob(os.path.join("..", "input", "data", "images*", "*", "*.png"))
}
print("Scans found:", len(all_image_paths), ", Total Headers", all_xray_df.shape[0])
all_xray_df["path"] = all_xray_df["Image Index"].map(all_image_paths.get)
# all_xray_df['Patient Age'] = all_xray_df['Patient Age'].map(lambda x: int(x[:-1]))
all_xray_df.sample(3)
all_xray_df["Finding Labels"] = all_xray_df["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
from itertools import chain
all_labels = np.unique(
list(chain(*all_xray_df["Finding Labels"].map(lambda x: x.split("|")).tolist()))
)
all_labels = [x for x in all_labels if len(x) > 0]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
all_xray_df[c_label] = all_xray_df["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
all_xray_df.sample(3)
all_xray_df["labels"] = all_xray_df.apply(
lambda x: x["Finding Labels"].split("|"), axis=1
)
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
all_xray_df,
test_size=0.25,
random_state=2018,
stratify=all_xray_df["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (256, 256)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
train_gen = core_idg.flow_from_dataframe(
dataframe=train_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=32,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
valid_gen = core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=256,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=1024,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
)
from tensorflow.keras.applications.mobilenet import MobileNet
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, Flatten
from tensorflow.keras.models import Sequential
from tensorflow.keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
)
weight_path = "{}_nih_pretrained.h5".format("xray_class")
checkpoint = ModelCheckpoint(
weight_path, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
callbacks_list = [checkpoint, early]
with tf.device("/gpu:0"):
base_mobilenet_model = MobileNet(
input_shape=(256, 256, 1), include_top=False, weights=None
)
multi_disease_model = Sequential()
multi_disease_model.add(base_mobilenet_model)
multi_disease_model.add(GlobalAveragePooling2D())
multi_disease_model.add(Dropout(0.5))
multi_disease_model.add(Dense(512))
multi_disease_model.add(Dropout(0.5))
multi_disease_model.add(Dense(len(all_labels), activation="sigmoid"))
multi_disease_model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["binary_accuracy", "mae"]
)
multi_disease_model.summary()
multi_disease_model.fit(
train_gen,
steps_per_epoch=100,
validation_data=(test_X, test_Y),
epochs=15,
callbacks=callbacks_list,
)
# > * Train: 0.8854
# > * Val: 0.8720
# > * loss_train: 0.3102
# > * loss_val: 0.4242
y_preds = multi_disease_model.predict(test_X)
print(np.around(y_preds[4], 2))
print(test_Y[4])
# # Fine tune on CheXpert
path = "../input/chexpert-dataset/"
train_df = pd.read_csv("../input/modified-chexpert/modifiedv2_train.csv")
valid_df = pd.read_csv("../input/modified-chexpert/modifiedv2_valid.csv")
train_df["path"] = path + train_df["Path"]
valid_df["path"] = path + valid_df["Path"]
dfs = [train_df, valid_df]
all_xray_df = pd.concat(dfs)
all_xray_df.sample(3)
# all_xray_df.drop("No Finding", axis=1, inplace=True)
all_xray_df.columns
all_xray_df = all_xray_df[all_xray_df["Finding Labels"].notnull()]
all_xray_df["Finding Labels"] = all_xray_df["Finding Labels"].map(
lambda x: x.replace("No Finding", "")
)
all_labels = [
"Atelectasis",
"Consolidation",
"Infiltration",
"Pneumothorax",
"Edema",
"Emphysema",
"Fibrosis",
"Pleural Effusion",
"Mass",
"Pneumonia",
"Pleural_thickening",
"Cardiomegaly",
"Nodule Mass",
"Hernia",
"Enlarged Cardiom",
"Lung Lesion",
"Lung Opacity",
"Pleural Other",
"Fracture",
]
print("All Labels ({}): {}".format(len(all_labels), all_labels))
for c_label in all_labels:
if len(c_label) > 1: # leave out empty labels
all_xray_df[c_label] = all_xray_df["Finding Labels"].map(
lambda finding: 1.0 if c_label in finding else 0
)
all_xray_df.sample(3)
all_xray_df.columns
# all_xray_df.drop("Enlarged Cardiomediastinum", axis=1, inplace=True)
all_xray_df["labels"] = all_xray_df.apply(
lambda x: x["Finding Labels"].split("|"), axis=1
)
all_xray_df["labels"]
from sklearn.model_selection import train_test_split
train_df, valid_df = train_test_split(
all_xray_df,
test_size=0.25,
random_state=2018,
stratify=all_xray_df["Finding Labels"].map(lambda x: x[:4]),
)
print("train", train_df.shape[0], "validation", valid_df.shape[0])
from tensorflow.keras.preprocessing.image import ImageDataGenerator
IMG_SIZE = (256, 256)
core_idg = ImageDataGenerator(
samplewise_center=True,
samplewise_std_normalization=True,
horizontal_flip=True,
vertical_flip=False,
height_shift_range=0.05,
width_shift_range=0.1,
rotation_range=5,
shear_range=0.1,
fill_mode="reflect",
zoom_range=0.15,
)
train_df.head()
train_gen = core_idg.flow_from_dataframe(
dataframe=train_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=32,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
valid_gen = core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=256,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
) # we can use much larger batches for evaluation
# used a fixed dataset for evaluating the algorithm
test_X, test_Y = next(
core_idg.flow_from_dataframe(
dataframe=valid_df,
directory=None,
x_col="path",
y_col="labels",
class_mode="categorical",
batch_size=1024,
classes=all_labels,
color_mode="grayscale",
target_size=IMG_SIZE,
)
)
from tensorflow.keras.callbacks import (
ModelCheckpoint,
LearningRateScheduler,
EarlyStopping,
ReduceLROnPlateau,
)
weight_path = "{}_chexpert_finetuned.h5".format("xray_class")
checkpoint = ModelCheckpoint(
weight_path, monitor="val_loss", verbose=1, save_best_only=True, mode="min"
)
early = EarlyStopping(monitor="val_loss", mode="min", patience=3)
callbacks_list = [checkpoint, early]
for x, y in train_gen:
print(x.shape)
break
new_model = tf.keras.Sequential()
for layer in multi_disease_model.layers[0:-2]:
new_model.add(layer)
new_model.add(tf.keras.layers.Dense(200, activation="relu"))
new_model.add(tf.keras.layers.Dense(19, activation="sigmoid"))
new_model.compile(
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
loss=tf.keras.losses.BinaryCrossentropy(),
metrics=["binary_accuracy", "mae"],
)
new_model.summary()
new_model.fit(
train_gen,
steps_per_epoch=100,
validation_data=(test_X, test_Y),
epochs=15,
callbacks=callbacks_list,
)
| false | 0 | 3,117 | 0 | 4,620 | 3,117 |
||
69640360
|
# # CommonLit Readability Prize
# 1. Problem statement: Develop **algorithm to rate the complexity of reading passage** for grade 3-12 classroom use.
# 2. Submissions are scored on the root mean squared error.
# Competetion Link: https://www.kaggle.com/c/commonlitreadabilityprize/discussion/241029
# Solution:
# #### Steps implemented:
# 1. Preparing input data
# 2. EDA
# 3. Model(s) Development stage one
# 4. Submit Final Output
# 5. Save final Model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn import svm, linear_model, metrics
import math
import numpy as np
import re
from pandas import read_csv, set_option, DataFrame, concat
set_option("display.max_rows", 4)
set_option("display.max_colWidth", 20)
import gc
gc.enable()
import seaborn as sns
sns.set_theme()
import matplotlib.pyplot as plt
def seed_everything(seed=10):
# random.seed(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
seed_everything()
pdInputTrainData = read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
pdInputTrainData
#
# # 1. Preparing input data
# 1. Calculation of existing standard readability indexes
# 2. Normalization
# 3. Outliers treatment
# 4. Derive two new params
dictCorrValues = {} # updated during training
def return_all_index_framework(pdInputData, strType):
global dictCorrValues
pdFeatures = pdInputData.copy()
# 1. derive readability indexes
pdFeatures = readability_indexes(pdFeatures[["excerpt"]], "excerpt")
pdFeatures.TextStd = pdFeatures.TextStd.apply(lambda x: get_grade_number(x))
# 2. Normalization
lstInputFeatures = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
]
X = pdFeatures[lstInputFeatures].copy()
X = z_score(X)
# 3. Compute outliers
X_treated = outlier_treatment(X[lstInputFeatures], lstInputFeatures)
# 4. Derive two new params from the existing ones
# Derive corelation values for each parameter during training to use it as wt.s and calculate mean of selected ones
if strType == "training": # Find Corelation value, cal mean, check rmse
X_treated = concat([X_treated, pdInputData[["target"]]], axis=1)
dictCorrValues = get_feature_corr_index(X_treated, lstInputFeatures)
lstFeaturesPos = [
"Flesch",
"SzigrisztPazos",
"FernandezHuerta",
"GutierrezPolini",
]
X_treated["PosMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesPos
)
print("\nPosMean:")
calculate_rmse(pdInputData["target"], X_treated["PosMean"])
lstFeaturesNeg = ["DifficultWords", "DaleChall", "CrawFord", "SMOG"]
X_treated["NegMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesNeg
)
print("\nNegMean:")
calculate_rmse(pdInputData["target"], X_treated["NegMean"])
sns.pairplot(data=X_treated, y_vars="PosMean", x_vars="target", kind="reg")
sns.pairplot(data=X_treated, y_vars="NegMean", x_vars="target", kind="reg")
else: # Use save corelation value for each parameters during training
lstFeaturesPos = [
"Flesch",
"SzigrisztPazos",
"FernandezHuerta",
"GutierrezPolini",
]
X_treated["PosMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesPos
)
lstFeaturesNeg = ["DifficultWords", "DaleChall", "CrawFord", "SMOG"]
X_treated["NegMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesNeg
)
return X_treated
def readability_indexes(pdInput, strExcerptColName):
from textstat import (
flesch_reading_ease,
smog_index,
flesch_kincaid_grade,
coleman_liau_index,
automated_readability_index,
dale_chall_readability_score,
difficult_words,
linsear_write_formula,
gunning_fog,
text_standard,
fernandez_huerta,
szigriszt_pazos,
gutierrez_polini,
crawford,
)
lstAllIndexes = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
]
for Formula in lstAllIndexes:
pdInput[Formula] = ""
def compute_all_indexes(row, strExcerptColName):
strExcerpt = row[strExcerptColName]
row["Flesch"] = flesch_reading_ease(strExcerpt)
row["SMOG"] = smog_index(strExcerpt)
row["FleschKincaid"] = flesch_kincaid_grade(strExcerpt)
row["ColemanLiau"] = coleman_liau_index(strExcerpt)
row["Automated"] = automated_readability_index(strExcerpt)
row["DaleChall"] = dale_chall_readability_score(strExcerpt)
row["DifficultWords"] = difficult_words(strExcerpt)
row["LinsearWrite"] = linsear_write_formula(strExcerpt)
row["GunningFog"] = gunning_fog(strExcerpt)
row["TextStd"] = text_standard(strExcerpt)
row["FernandezHuerta"] = fernandez_huerta(strExcerpt)
row["SzigrisztPazos"] = szigriszt_pazos(strExcerpt)
row["GutierrezPolini"] = gutierrez_polini(strExcerpt)
row["CrawFord"] = crawford(strExcerpt)
return row
pdFeatures = pdInput.apply(
lambda row: compute_all_indexes(row, strExcerptColName), axis=1
)
return pdFeatures
def get_grade_number(num_string):
import re
pattern = (
"\\W|\\d+" # num_string can be '8th and 9th grade' or '-8th and -9th grade'
)
str_list = re.findall(
pattern, num_string
) # ['8', '', '9'] or ['-', '8', ' ', ' -', '9', ' ']
# combine -ve sign and number
if str_list[0] == "-":
str_list = [str_list[0] + str_list[1]]
# print(num_string, str_list)
return int(str_list[0]) # take lower grade number in the range e.g. 8
def calculate_index_mean(pdFeatures, dictCorrValues, lstFeatures):
pdInput = pdFeatures.copy()
for item in dictCorrValues:
# print("\n", item, dictCorrValues[item])
pdInput[item] = dictCorrValues[item] * pdInput[item]
# calculate_rmse(pdInput['target'], pdInput[item])
return pdInput[lstFeatures].apply(lambda row: np.mean(row), axis=1)
def get_feature_corr_index(pdInput, lstFeatures):
dictCorrValues = {}
for item in lstFeatures:
dictCorrValues[item] = pdInput.corr(method="pearson")[["target"]].loc[item][
"target"
]
print("\nTraining: Calculated corelation values: ", dictCorrValues)
return dictCorrValues
# Normalise data: the z-score method in Pandas same can be done using sklearn lib in one line code
def z_score(df):
lstColumns = df.columns
for column in lstColumns:
df[column] = (df[column] - df[column].mean()) / df[column].std()
return df
# Compute Outliers
def get_summary_statistics(dataset):
mean = np.round(np.mean(dataset), 2)
median = np.round(np.median(dataset), 2)
min_value = np.round(dataset.min(), 2)
max_value = np.round(dataset.max(), 2)
quartile_1 = np.round(dataset.quantile(0.25), 2)
quartile_3 = np.round(dataset.quantile(0.75), 2)
pdHoldValues = DataFrame(
columns={"Min", "Max", "Mean", "Q1_25", "Median", "Q3_75th", "IQR"}
)
# Interquartile range
iqr = np.round(quartile_3 - quartile_1, 2)
pdHoldValues["Min"] = min_value
pdHoldValues["Mean"] = mean
pdHoldValues["Max"] = max_value
pdHoldValues["Q1_25"] = quartile_1
pdHoldValues["Q3_75th"] = quartile_3
pdHoldValues["Median"] = median
pdHoldValues["IQR"] = iqr
return pdHoldValues
# Compute values for the outliers
def treate_outliers(X, lstInputFeatures, pdIQRValues, strTreatmentType):
if strTreatmentType == "LowerBound":
for colName in lstInputFeatures:
floatLowerLim = pdIQRValues[pdIQRValues["index"] == colName][
"floatLowerLim"
].values[0]
# print(colName, "imputing lowest value as: ", floatLowerLim)
# print("Before imputing: min:",round(X[colName].min(), 5), "mean:",round(X[colName].mean(), 5), "max:",round(X[colName].max(),5))
X.loc[X[colName] < floatLowerLim, colName] = floatLowerLim
# print("After imputing: min:", X[colName].min(), "mean:",X[colName].mean(), "max:",X[colName].max())
elif strTreatmentType == "UpperBound":
for colName in lstInputFeatures:
floatUpperLim = pdIQRValues[pdIQRValues["index"] == colName][
"floatUpperLim"
].values[0]
# print("\n",colName, "imputing highest value as: ", floatUpperLim)
# print("Before: ",X[colName].min(), X[colName].mean(), X[colName].max())
X.loc[X[colName] >= floatUpperLim, colName] = floatUpperLim
# print("After: ", X[colName].min(), X[colName].mean(), X[colName].max())
return X
def outlier_treatment(X, lstInputFeatures):
pdIQRValues = get_summary_statistics(X)
pdIQRValues["floatLowerLim"] = pdIQRValues["Q1_25"] - pdIQRValues["IQR"] * 1.5
pdIQRValues["floatUpperLim"] = pdIQRValues["Q3_75th"] + pdIQRValues["IQR"] * 1.5
pdIQRValues = pdIQRValues[["floatLowerLim", "floatUpperLim"]].reset_index()
plt.figure(figsize=(17, 7))
plt.title(f"Before computing for the outlier values for each para:")
sns.boxplot(data=X, orient="h", palette="Set1")
plt.show()
X = treate_outliers(X, lstInputFeatures, pdIQRValues, "LowerBound")
X = treate_outliers(X, lstInputFeatures, pdIQRValues, "UpperBound")
plt.figure(figsize=(17, 7))
plt.title(f"After computing for the outlier values for each para:")
sns.boxplot(data=X, orient="h", palette="Set1")
plt.show()
return X
def calculate_rmse(serActual, serPredicted):
MSE = metrics.mean_squared_error(serActual, serPredicted)
print("MSE: ", MSE, " RMSE: ", round(math.sqrt(MSE), 5))
pdFeatures = return_all_index_framework(pdInputTrainData, "training")
#
# # 2. EDA
# 1. Find corlation between derived parameters and target if present any.
plt.figure(figsize=(17, 8))
sns.heatmap(
pdFeatures.corr(method="pearson").sort_values("target"), annot=True, fmt="f"
)
plt.xticks(rotation=45)
plt.show()
lstInputFeatures = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
"PosMean",
"NegMean",
]
pdFeatures = pdFeatures[lstInputFeatures + ["target"]]
sns.pairplot(data=pdFeatures, y_vars="target", kind="reg")
pdFeatures.plot(figsize=(17, 6))
pdFeatures[["target", "PosMean", "NegMean"]].plot(figsize=(17, 6))
#
# # 3. Multi Linear Model training-testing to finalise the model(s)
def linear_models(X_train, y_train, X_test, y_test):
lstClassifiers = [
svm.SVR(
kernel="poly", degree=2, max_iter=-1
), # SVR: Epsilon-Support Vector Regression.
linear_model.SGDRegressor(penalty="l1", learning_rate="adaptive"),
linear_model.BayesianRidge(),
# linear_model.LassoLars(),
linear_model.ARDRegression(),
linear_model.PassiveAggressiveRegressor(),
linear_model.TheilSenRegressor(),
linear_model.LinearRegression(),
]
pdModelsOp = DataFrame()
# Train models
for clf in lstClassifiers:
clf.fit(X_train.values, y_train.values)
strModelName = str(clf)
pdModelsOp[strModelName] = clf.predict(X_test)
print(strModelName)
calculate_rmse(y_test, pdModelsOp[strModelName])
pdModelsOp["Actual"] = y_test.values
pdModelsOp = pdModelsOp.rename(
columns={
"""SVR(degree=2, kernel='poly')""": "SVR",
"""SGDRegressor(learning_rate='adaptive', penalty='l1')""": "SGDR",
"BayesianRidge()": "BR",
"ARDRegression()": "ARDR",
"PassiveAggressiveRegressor()": "PAR",
"TheilSenRegressor(max_subpopulation=10000)": "TSR",
"LinearRegression()": "LR",
#'LassoLars()':'LL',
"Actual": "target",
}
)
return pdModelsOp
# #### OBSERVATION:
# - After rying out different combinations of parameters and cmparing their RMSE finalised parameters to use.
X, Y = pdFeatures[lstInputFeatures], pdFeatures["target"]
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=333
)
pdModelsOp = linear_models(X_train, y_train, X_test, y_test)
plt.figure(figsize=(17, 8))
sns.heatmap(
pdModelsOp.corr(method="pearson").sort_values("target"), annot=True, fmt="f"
)
plt.xticks(rotation=45)
plt.show()
pdModelsOp[["SGDR", "BR", "ARDR", "TSR", "LR", "target"]].plot(figsize=(17, 7))
# #### OBSERVATION: Model selected
# - After comparing performance of all the linear models I am selecting SGDRegressor is selected for the final prediction
# Train SVR model on training data for the prediction on test data
SGDRegressor = linear_model.SGDRegressor(penalty="l1", learning_rate="adaptive")
X, Y = pdFeatures[lstInputFeatures], pdFeatures["target"]
SGDRegressor.fit(X.values, Y.values)
#
# # 4. Submit Final Output
# 1. Preparing Test data to predict possible readability value
pdTest = read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
pdSample = read_csv("/kaggle/input/commonlitreadabilityprize/sample_submission.csv")
y_pred = SGDRegressor.predict(pdTest)
pdSample["target"] = y_pred
pdSample
pdSample.to_csv("submission.csv", index=False)
#
# # 5. Save Final Model
# To save and reuse the model use following code
def save_model(model, strModelName):
import pickle
# save the model to disk
# filename = 'finalized_model.sav'
filename = strModelName + ".sav"
pickle.dump(model, open(filename, "wb"))
return
# save_model(clf, strModelName)
def load_model(strModelName):
filename = strModelName + ".sav"
# load the model from disk
loaded_model = pickle.load(open(filename, "rb"))
return loaded_model
# loaded_model = load_model(strModelName)
# result = loaded_model.score(X_test, y_test)
# print(result)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0069/640/69640360.ipynb
| null | null |
[{"Id": 69640360, "ScriptId": 19000222, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2617466, "CreationDate": "08/02/2021 13:14:21", "VersionNumber": 4.0, "Title": "Rating_passage_reading_complexity", "EvaluationDate": "08/02/2021", "IsChange": true, "TotalLines": 418.0, "LinesInsertedFromPrevious": 27.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 391.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # CommonLit Readability Prize
# 1. Problem statement: Develop **algorithm to rate the complexity of reading passage** for grade 3-12 classroom use.
# 2. Submissions are scored on the root mean squared error.
# Competetion Link: https://www.kaggle.com/c/commonlitreadabilityprize/discussion/241029
# Solution:
# #### Steps implemented:
# 1. Preparing input data
# 2. EDA
# 3. Model(s) Development stage one
# 4. Submit Final Output
# 5. Save final Model
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import warnings
warnings.filterwarnings("ignore")
from sklearn.model_selection import train_test_split
from sklearn import svm, linear_model, metrics
import math
import numpy as np
import re
from pandas import read_csv, set_option, DataFrame, concat
set_option("display.max_rows", 4)
set_option("display.max_colWidth", 20)
import gc
gc.enable()
import seaborn as sns
sns.set_theme()
import matplotlib.pyplot as plt
def seed_everything(seed=10):
# random.seed(seed)
# os.environ['PYTHONHASHSEED'] = str(seed)
np.random.seed(seed)
# torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
# torch.backends.cudnn.deterministic = True
seed_everything()
pdInputTrainData = read_csv("/kaggle/input/commonlitreadabilityprize/train.csv")
pdInputTrainData
#
# # 1. Preparing input data
# 1. Calculation of existing standard readability indexes
# 2. Normalization
# 3. Outliers treatment
# 4. Derive two new params
dictCorrValues = {} # updated during training
def return_all_index_framework(pdInputData, strType):
global dictCorrValues
pdFeatures = pdInputData.copy()
# 1. derive readability indexes
pdFeatures = readability_indexes(pdFeatures[["excerpt"]], "excerpt")
pdFeatures.TextStd = pdFeatures.TextStd.apply(lambda x: get_grade_number(x))
# 2. Normalization
lstInputFeatures = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
]
X = pdFeatures[lstInputFeatures].copy()
X = z_score(X)
# 3. Compute outliers
X_treated = outlier_treatment(X[lstInputFeatures], lstInputFeatures)
# 4. Derive two new params from the existing ones
# Derive corelation values for each parameter during training to use it as wt.s and calculate mean of selected ones
if strType == "training": # Find Corelation value, cal mean, check rmse
X_treated = concat([X_treated, pdInputData[["target"]]], axis=1)
dictCorrValues = get_feature_corr_index(X_treated, lstInputFeatures)
lstFeaturesPos = [
"Flesch",
"SzigrisztPazos",
"FernandezHuerta",
"GutierrezPolini",
]
X_treated["PosMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesPos
)
print("\nPosMean:")
calculate_rmse(pdInputData["target"], X_treated["PosMean"])
lstFeaturesNeg = ["DifficultWords", "DaleChall", "CrawFord", "SMOG"]
X_treated["NegMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesNeg
)
print("\nNegMean:")
calculate_rmse(pdInputData["target"], X_treated["NegMean"])
sns.pairplot(data=X_treated, y_vars="PosMean", x_vars="target", kind="reg")
sns.pairplot(data=X_treated, y_vars="NegMean", x_vars="target", kind="reg")
else: # Use save corelation value for each parameters during training
lstFeaturesPos = [
"Flesch",
"SzigrisztPazos",
"FernandezHuerta",
"GutierrezPolini",
]
X_treated["PosMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesPos
)
lstFeaturesNeg = ["DifficultWords", "DaleChall", "CrawFord", "SMOG"]
X_treated["NegMean"] = calculate_index_mean(
X_treated, dictCorrValues, lstFeaturesNeg
)
return X_treated
def readability_indexes(pdInput, strExcerptColName):
from textstat import (
flesch_reading_ease,
smog_index,
flesch_kincaid_grade,
coleman_liau_index,
automated_readability_index,
dale_chall_readability_score,
difficult_words,
linsear_write_formula,
gunning_fog,
text_standard,
fernandez_huerta,
szigriszt_pazos,
gutierrez_polini,
crawford,
)
lstAllIndexes = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
]
for Formula in lstAllIndexes:
pdInput[Formula] = ""
def compute_all_indexes(row, strExcerptColName):
strExcerpt = row[strExcerptColName]
row["Flesch"] = flesch_reading_ease(strExcerpt)
row["SMOG"] = smog_index(strExcerpt)
row["FleschKincaid"] = flesch_kincaid_grade(strExcerpt)
row["ColemanLiau"] = coleman_liau_index(strExcerpt)
row["Automated"] = automated_readability_index(strExcerpt)
row["DaleChall"] = dale_chall_readability_score(strExcerpt)
row["DifficultWords"] = difficult_words(strExcerpt)
row["LinsearWrite"] = linsear_write_formula(strExcerpt)
row["GunningFog"] = gunning_fog(strExcerpt)
row["TextStd"] = text_standard(strExcerpt)
row["FernandezHuerta"] = fernandez_huerta(strExcerpt)
row["SzigrisztPazos"] = szigriszt_pazos(strExcerpt)
row["GutierrezPolini"] = gutierrez_polini(strExcerpt)
row["CrawFord"] = crawford(strExcerpt)
return row
pdFeatures = pdInput.apply(
lambda row: compute_all_indexes(row, strExcerptColName), axis=1
)
return pdFeatures
def get_grade_number(num_string):
import re
pattern = (
"\\W|\\d+" # num_string can be '8th and 9th grade' or '-8th and -9th grade'
)
str_list = re.findall(
pattern, num_string
) # ['8', '', '9'] or ['-', '8', ' ', ' -', '9', ' ']
# combine -ve sign and number
if str_list[0] == "-":
str_list = [str_list[0] + str_list[1]]
# print(num_string, str_list)
return int(str_list[0]) # take lower grade number in the range e.g. 8
def calculate_index_mean(pdFeatures, dictCorrValues, lstFeatures):
pdInput = pdFeatures.copy()
for item in dictCorrValues:
# print("\n", item, dictCorrValues[item])
pdInput[item] = dictCorrValues[item] * pdInput[item]
# calculate_rmse(pdInput['target'], pdInput[item])
return pdInput[lstFeatures].apply(lambda row: np.mean(row), axis=1)
def get_feature_corr_index(pdInput, lstFeatures):
dictCorrValues = {}
for item in lstFeatures:
dictCorrValues[item] = pdInput.corr(method="pearson")[["target"]].loc[item][
"target"
]
print("\nTraining: Calculated corelation values: ", dictCorrValues)
return dictCorrValues
# Normalise data: the z-score method in Pandas same can be done using sklearn lib in one line code
def z_score(df):
lstColumns = df.columns
for column in lstColumns:
df[column] = (df[column] - df[column].mean()) / df[column].std()
return df
# Compute Outliers
def get_summary_statistics(dataset):
mean = np.round(np.mean(dataset), 2)
median = np.round(np.median(dataset), 2)
min_value = np.round(dataset.min(), 2)
max_value = np.round(dataset.max(), 2)
quartile_1 = np.round(dataset.quantile(0.25), 2)
quartile_3 = np.round(dataset.quantile(0.75), 2)
pdHoldValues = DataFrame(
columns={"Min", "Max", "Mean", "Q1_25", "Median", "Q3_75th", "IQR"}
)
# Interquartile range
iqr = np.round(quartile_3 - quartile_1, 2)
pdHoldValues["Min"] = min_value
pdHoldValues["Mean"] = mean
pdHoldValues["Max"] = max_value
pdHoldValues["Q1_25"] = quartile_1
pdHoldValues["Q3_75th"] = quartile_3
pdHoldValues["Median"] = median
pdHoldValues["IQR"] = iqr
return pdHoldValues
# Compute values for the outliers
def treate_outliers(X, lstInputFeatures, pdIQRValues, strTreatmentType):
if strTreatmentType == "LowerBound":
for colName in lstInputFeatures:
floatLowerLim = pdIQRValues[pdIQRValues["index"] == colName][
"floatLowerLim"
].values[0]
# print(colName, "imputing lowest value as: ", floatLowerLim)
# print("Before imputing: min:",round(X[colName].min(), 5), "mean:",round(X[colName].mean(), 5), "max:",round(X[colName].max(),5))
X.loc[X[colName] < floatLowerLim, colName] = floatLowerLim
# print("After imputing: min:", X[colName].min(), "mean:",X[colName].mean(), "max:",X[colName].max())
elif strTreatmentType == "UpperBound":
for colName in lstInputFeatures:
floatUpperLim = pdIQRValues[pdIQRValues["index"] == colName][
"floatUpperLim"
].values[0]
# print("\n",colName, "imputing highest value as: ", floatUpperLim)
# print("Before: ",X[colName].min(), X[colName].mean(), X[colName].max())
X.loc[X[colName] >= floatUpperLim, colName] = floatUpperLim
# print("After: ", X[colName].min(), X[colName].mean(), X[colName].max())
return X
def outlier_treatment(X, lstInputFeatures):
pdIQRValues = get_summary_statistics(X)
pdIQRValues["floatLowerLim"] = pdIQRValues["Q1_25"] - pdIQRValues["IQR"] * 1.5
pdIQRValues["floatUpperLim"] = pdIQRValues["Q3_75th"] + pdIQRValues["IQR"] * 1.5
pdIQRValues = pdIQRValues[["floatLowerLim", "floatUpperLim"]].reset_index()
plt.figure(figsize=(17, 7))
plt.title(f"Before computing for the outlier values for each para:")
sns.boxplot(data=X, orient="h", palette="Set1")
plt.show()
X = treate_outliers(X, lstInputFeatures, pdIQRValues, "LowerBound")
X = treate_outliers(X, lstInputFeatures, pdIQRValues, "UpperBound")
plt.figure(figsize=(17, 7))
plt.title(f"After computing for the outlier values for each para:")
sns.boxplot(data=X, orient="h", palette="Set1")
plt.show()
return X
def calculate_rmse(serActual, serPredicted):
MSE = metrics.mean_squared_error(serActual, serPredicted)
print("MSE: ", MSE, " RMSE: ", round(math.sqrt(MSE), 5))
pdFeatures = return_all_index_framework(pdInputTrainData, "training")
#
# # 2. EDA
# 1. Find corlation between derived parameters and target if present any.
plt.figure(figsize=(17, 8))
sns.heatmap(
pdFeatures.corr(method="pearson").sort_values("target"), annot=True, fmt="f"
)
plt.xticks(rotation=45)
plt.show()
lstInputFeatures = [
"Flesch",
"SMOG",
"FleschKincaid",
"ColemanLiau",
"Automated",
"DaleChall",
"DifficultWords",
"LinsearWrite",
"GunningFog",
"TextStd",
"FernandezHuerta",
"SzigrisztPazos",
"GutierrezPolini",
"CrawFord",
"PosMean",
"NegMean",
]
pdFeatures = pdFeatures[lstInputFeatures + ["target"]]
sns.pairplot(data=pdFeatures, y_vars="target", kind="reg")
pdFeatures.plot(figsize=(17, 6))
pdFeatures[["target", "PosMean", "NegMean"]].plot(figsize=(17, 6))
#
# # 3. Multi Linear Model training-testing to finalise the model(s)
def linear_models(X_train, y_train, X_test, y_test):
lstClassifiers = [
svm.SVR(
kernel="poly", degree=2, max_iter=-1
), # SVR: Epsilon-Support Vector Regression.
linear_model.SGDRegressor(penalty="l1", learning_rate="adaptive"),
linear_model.BayesianRidge(),
# linear_model.LassoLars(),
linear_model.ARDRegression(),
linear_model.PassiveAggressiveRegressor(),
linear_model.TheilSenRegressor(),
linear_model.LinearRegression(),
]
pdModelsOp = DataFrame()
# Train models
for clf in lstClassifiers:
clf.fit(X_train.values, y_train.values)
strModelName = str(clf)
pdModelsOp[strModelName] = clf.predict(X_test)
print(strModelName)
calculate_rmse(y_test, pdModelsOp[strModelName])
pdModelsOp["Actual"] = y_test.values
pdModelsOp = pdModelsOp.rename(
columns={
"""SVR(degree=2, kernel='poly')""": "SVR",
"""SGDRegressor(learning_rate='adaptive', penalty='l1')""": "SGDR",
"BayesianRidge()": "BR",
"ARDRegression()": "ARDR",
"PassiveAggressiveRegressor()": "PAR",
"TheilSenRegressor(max_subpopulation=10000)": "TSR",
"LinearRegression()": "LR",
#'LassoLars()':'LL',
"Actual": "target",
}
)
return pdModelsOp
# #### OBSERVATION:
# - After rying out different combinations of parameters and cmparing their RMSE finalised parameters to use.
X, Y = pdFeatures[lstInputFeatures], pdFeatures["target"]
X_train, X_test, y_train, y_test = train_test_split(
X, Y, test_size=0.2, random_state=333
)
pdModelsOp = linear_models(X_train, y_train, X_test, y_test)
plt.figure(figsize=(17, 8))
sns.heatmap(
pdModelsOp.corr(method="pearson").sort_values("target"), annot=True, fmt="f"
)
plt.xticks(rotation=45)
plt.show()
pdModelsOp[["SGDR", "BR", "ARDR", "TSR", "LR", "target"]].plot(figsize=(17, 7))
# #### OBSERVATION: Model selected
# - After comparing performance of all the linear models I am selecting SGDRegressor is selected for the final prediction
# Train SVR model on training data for the prediction on test data
SGDRegressor = linear_model.SGDRegressor(penalty="l1", learning_rate="adaptive")
X, Y = pdFeatures[lstInputFeatures], pdFeatures["target"]
SGDRegressor.fit(X.values, Y.values)
#
# # 4. Submit Final Output
# 1. Preparing Test data to predict possible readability value
pdTest = read_csv("/kaggle/input/commonlitreadabilityprize/test.csv")
pdSample = read_csv("/kaggle/input/commonlitreadabilityprize/sample_submission.csv")
y_pred = SGDRegressor.predict(pdTest)
pdSample["target"] = y_pred
pdSample
pdSample.to_csv("submission.csv", index=False)
#
# # 5. Save Final Model
# To save and reuse the model use following code
def save_model(model, strModelName):
import pickle
# save the model to disk
# filename = 'finalized_model.sav'
filename = strModelName + ".sav"
pickle.dump(model, open(filename, "wb"))
return
# save_model(clf, strModelName)
def load_model(strModelName):
filename = strModelName + ".sav"
# load the model from disk
loaded_model = pickle.load(open(filename, "rb"))
return loaded_model
# loaded_model = load_model(strModelName)
# result = loaded_model.score(X_test, y_test)
# print(result)
| false | 0 | 4,769 | 0 | 4,769 | 4,769 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.