file_id
stringlengths 5
9
| content
stringlengths 100
5.25M
| local_path
stringlengths 66
70
| kaggle_dataset_name
stringlengths 3
50
⌀ | kaggle_dataset_owner
stringlengths 3
20
⌀ | kversion
stringlengths 497
763
⌀ | kversion_datasetsources
stringlengths 71
5.46k
⌀ | dataset_versions
stringlengths 338
235k
⌀ | datasets
stringlengths 334
371
⌀ | users
stringlengths 111
264
⌀ | script
stringlengths 100
5.25M
| df_info
stringlengths 0
4.87M
| has_data_info
bool 2
classes | nb_filenames
int64 0
370
| retreived_data_description
stringlengths 0
4.44M
| script_nb_tokens
int64 25
663k
| upvotes
int64 0
1.65k
| tokens_description
int64 25
663k
| tokens_script
int64 25
663k
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
129401136
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# импортируем библиотеки для визуализации
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import category_encoders as ce
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 42
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# Подгрузим наши данные из соревнования
DATA_DIR = "/kaggle/input/sf-booking/"
df_train = pd.read_csv(DATA_DIR + "/hotels_train.csv") # датасет для обучения
df_test = pd.read_csv(DATA_DIR + "hotels_test.csv") # датасет для предсказания
sample_submission = pd.read_csv(DATA_DIR + "/submission.csv") # са бмишн
df_train.info()
df_train.head(2)
df_test.info()
df_test.head(2)
sample_submission.head(2)
sample_submission.info()
# ВАЖНО! для корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, поэтому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Проанализируем признак "дата отзыва"
# переведем формат признака "review_date" в формат даты и времени
data["review_date"] = pd.to_datetime(data["review_date"])
# Выделим из даты отзыва год и месяц в отдельные столбцы
data["review_year"] = data["review_date"].dt.year
data["review_month"] = data["review_date"].dt.month
# удалим признак даты из данных
data.drop("review_date", axis=1, inplace=True)
# Посмотрим на количество отзывов относительно месяца
data_month = (
data["review_month"]
.value_counts()
.reset_index()
.rename(columns={"index": "month number", "review_month": "count"})
)
display(data_month)
# Построим для признака 'review_month' столбчатую диаграмму
fig = px.bar(data_month, x="month number", y="count", text_auto=True, log_y=True)
fig.update_traces(textposition="outside")
fig.show()
# Создадим признак с временами года
# Функция season возвращает номер сезона
def season(month):
if month in [3, 4, 5]:
return 1 # Весна
elif month in [6, 7, 8]:
return 2 # Лето
elif month in [12, 1, 2]:
return 3 # Зима
else:
return 4 # Осень
data["season"] = data["review_month"].apply(season)
# Посмотрим на количество отзывов по временам года
data_season = (
data["season"]
.value_counts()
.reset_index()
.rename(columns={"index": "season_number", "season": "count"})
)
display(data_season)
# Построим столбчатую диаграмму для признака
fig = px.bar(data_season, x="season_number", y="count", text_auto=True, log_y=True)
fig.show()
# Проанализируем отзывы пользователей
# Посмотрим негативные отзывы "negative_review"
data["negative_review"].value_counts()
data["neg_neutral"] = data["negative_review"].apply(
lambda x: "0"
if x == "No Negative"
or x == " Nothing"
or x == " nothing"
or x == " N A"
or x == " Nothing "
else "neg"
)
data["neg_neutral"].value_counts()
# Проанализируем отзывы пользователей
# Посмотрим негативные отзывы "negative_review"
data["positive_review"].value_counts()
data["pos_neutral"] = data["positive_review"].apply(
lambda x: "1" if x == "No Positive" or x == " Nothing" else "pos"
)
data["pos_neutral"].value_counts()
# Удалим признаки позитивной и негативной оценки
data.drop(["negative_review", "positive_review"], axis=1, inplace=True)
# Проанализируем признак "теги"
# Создадим признак с количеством слов в тегах
data["cnt_words_tag"] = data["tags"].apply(lambda x: len(list(x.split(","))))
data["cnt_words_tag"].head(10)
# Составим словарь из уникальных тегов и посчитаем их количество
# Функция возвращает словарь, в котором ключом является уникальный тег,
# а значением количество его повторений
count_dict = {}
for tags_list in data["tags"]:
tags = "".join(tags_list)
tags = tags.replace("'", "")
tags = tags.replace("[", "")
tags = tags.replace("]", "")
tags = tags.split(",")
for tag in tags:
tag = tag.strip()
if tag in count_dict:
count_dict[tag] += 1
else:
count_dict[tag] = 1
print(len(count_dict))
# Создадим признак "Тип поездки", если тип поездки не указан, то = 0
def type_trip(tags):
if "Leisure trip" in tags:
return "1"
elif "Business trip" in tags:
return "2"
return 0
data["type_trip"] = data["tags"].apply(type_trip)
# Создадим признак "Количество гостей"
def reviewer_count(tags):
reviewer_list = [
"Solo traveler",
"Travelers with friends",
"Couple",
"Group",
"Family with older children",
"Family with young children",
]
for tag in reviewer_list:
if tag in tags:
return tag
return 0
data["reviewer_count"] = data["tags"].apply(reviewer_count)
# Посмотрим на количество отзывов относительно количества гостей
data["reviewer_count"].value_counts()
# Из тегов выделим на сколько ночей чаще всего останавливаются гости в отелях,для этого создадим функцию
def number_of_nights(arg):
tags_list = arg.split(",")
for tag in tags_list:
idx = tag.find("night")
if idx > 0:
word_list = tag.split()
for word in word_list:
if word.isnumeric():
return int(word)
# создадим признак "количество ночей в отеле"
data["number_of_nights"] = data["tags"].apply(number_of_nights)
# Пустые значения признака "количество ночей в отеле" заменим на медиану
data["number_of_nights"] = data["number_of_nights"].fillna(
data["number_of_nights"].median()
)
# Посмотрим на количество ночей в отеле относительно гостей
data["number_of_nights"].value_counts()
# Проанализируем признак адреса отеля
data["hotel_address"].value_counts()
# Извлечем название страны и города в отдельные признаки, создадим для этого функцию и применим ее к столбцу с адресом
def city_and_country(address):
words_list = address.split(" ")
if "United Kingdom" in address:
return ("Uni ted Kingdom", words_list[-5])
else:
return (words_list[-1], words_list[-2])
data[["country_hotel", "city_hotel"]] = (
data["hotel_address"].apply(city_and_country).apply(pd.Series)
)
# Выведем результат
data.country_hotel.value_counts()
data.city_hotel.value_counts()
# Удалим признак адреса и страны так как каждая из страна представлена один городом
data.drop(["hotel_address", "country_hotel"], axis=1, inplace=True)
data.info()
# Проанализируем признак страны путешественника
count_dict = {}
for country in data.reviewer_nationality:
country = country.strip()
if country in count_dict:
count_dict[country] += 1
else:
count_dict[country] = 1
print(len(count_dict))
print(count_dict)
# Количество стран - 227.
# Сократим количество стран, выделив 20 наиболее повторяющихся, остальные обозначим как other
countries = data.reviewer_nationality.value_counts()
countries_list = countries[:20].index
def country_freq(country):
if country in countries_list:
return country
else:
return "other"
data["country_reviewer"] = data["reviewer_nationality"].apply(country_freq)
data.country_reviewer.value_counts()
# Посмотрим на распределение по категориям
data_nationality = (
data["country_reviewer"]
.value_counts()
.reset_index()
.rename(columns={"index": "nationality", "reviewer_nationality": "count"})
)
data_nationality
# Построим горизонтальную столбчатую диаграмму, чтобы посмотреть на распределение признака
fig = px.bar(
data_nationality,
x="country_reviewer",
y="nationality",
text_auto=True,
title="Топ-10 национальностей гостей по количеству отзывов",
)
fig.show()
# Удалим признак reviewer_nationality
data.drop("reviewer_nationality", axis=1, inplace=True)
# Проверим данные на пустые значения
data = data.fillna(0)
data.isnull().sum()
# Столбцы широты и долготы содержат много пропусков. Учитывая то, что мы смогли извлечь названия стран из адреса, удалим широту и долготу
data.drop(["lat", "lng"], axis=1, inplace=True)
data["hotel_name"].nunique()
# Признак days_since_review не являяется полезным, удалим его
# Название отеля важный признак, но ввиду их огромного количества прийдется его удалить
# признак tags удалим так как необходимую информацию получили
data.drop(["days_since_review", "hotel_name", "tags"], axis=1, inplace=True)
# Выведем основную информацию о нечисловых признаках
data.describe(include="object")
# Преобразуем признак "Тип поездки" в числовой
data["type_trip"] = data["type_trip"].astype("int64")
# Создадим список колонок с категориальными признаками
category_list = [x for x in data.columns if data[x].dtypes == "object"]
category_list
# Объединим категориальные признаки в датасет
data_cat = data[category_list]
data_cat
# Для выбора способа кодирования посмотрим на количество категорий в признаках
for col in data_cat.columns:
print(f"В признаке {col} количество категорий: {data_cat[col].nunique()}")
# признак 'country_reviewer' содержит 21 категорию, поэтому здесь используем бинарное кодирование
bin_encoder = ce.BinaryEncoder(cols=["country_reviewer"])
type_bin = bin_encoder.fit_transform(data["country_reviewer"])
data = pd.concat([data, type_bin], axis=1)
# Удалим признак "country_reviewer"
data = data.drop("country_reviewer", axis=1)
# Для остальных признаков воспользуемся OneHotEncoder
# Снова создаем список категориальных признаковдля кодирования
cat_list = [x for x in data.columns if data[x].dtypes == "object"]
print(cat_list)
# Кодируем признаки с помощью OneHotEncoder
onehot = ce.OneHotEncoder(cols=cat_list, use_cat_names=True)
type_bin = onehot.fit_transform(data[cat_list])
data = pd.concat([data, type_bin], axis=1)
# Удалим категориальные признаки
data = data.drop(cat_list, axis=1)
data.info()
# Построим тепловую карту для визаулизации корреляции признаков
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True, cmap="Spectral")
# выберем и удалим признаки из пар с высоким уровнем корреляции
drop_list = ["neg_neutral_0", "pos_neutral_1", "additional_number_of_scoring"]
data.drop(drop_list, axis=1, inplace=True)
data.head(3)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True, cmap="Spectral")
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.reviewer_score.values # наш таргет
X = train_data.drop(["reviewer_score"], axis=1)
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = model.predict(X_test)
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAPE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
test_data.sample(10)
test_data = test_data.drop(["reviewer_score"], axis=1)
sample_submission
predict_submission = model.predict(test_data)
predict_submission
list(sample_submission)
sample_submission["reviewer_score"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/401/129401136.ipynb
| null | null |
[{"Id": 129401136, "ScriptId": 38257076, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14370563, "CreationDate": "05/13/2023 13:23:00", "VersionNumber": 1.0, "Title": "BaseLine Saveleva v1", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 389.0, "LinesInsertedFromPrevious": 278.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 111.0, "LinesInsertedFromFork": 278.0, "LinesDeletedFromFork": 22.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 111.0, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# импортируем библиотеки для визуализации
import plotly.express as px
import matplotlib.pyplot as plt
import seaborn as sns
# Загружаем специальный удобный инструмент для разделения датасета:
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import category_encoders as ce
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn import metrics
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# всегда фиксируйте RANDOM_SEED, чтобы ваши эксперименты были воспроизводимы!
RANDOM_SEED = 42
# зафиксируем версию пакетов, чтобы эксперименты были воспроизводимы:
# Подгрузим наши данные из соревнования
DATA_DIR = "/kaggle/input/sf-booking/"
df_train = pd.read_csv(DATA_DIR + "/hotels_train.csv") # датасет для обучения
df_test = pd.read_csv(DATA_DIR + "hotels_test.csv") # датасет для предсказания
sample_submission = pd.read_csv(DATA_DIR + "/submission.csv") # са бмишн
df_train.info()
df_train.head(2)
df_test.info()
df_test.head(2)
sample_submission.head(2)
sample_submission.info()
# ВАЖНО! для корректной обработки признаков объединяем трейн и тест в один датасет
df_train["sample"] = 1 # помечаем где у нас трейн
df_test["sample"] = 0 # помечаем где у нас тест
df_test[
"reviewer_score"
] = 0 # в тесте у нас нет значения reviewer_score, мы его должны предсказать, поэтому пока просто заполняем нулями
data = df_test.append(df_train, sort=False).reset_index(drop=True) # объединяем
data.info()
# Проанализируем признак "дата отзыва"
# переведем формат признака "review_date" в формат даты и времени
data["review_date"] = pd.to_datetime(data["review_date"])
# Выделим из даты отзыва год и месяц в отдельные столбцы
data["review_year"] = data["review_date"].dt.year
data["review_month"] = data["review_date"].dt.month
# удалим признак даты из данных
data.drop("review_date", axis=1, inplace=True)
# Посмотрим на количество отзывов относительно месяца
data_month = (
data["review_month"]
.value_counts()
.reset_index()
.rename(columns={"index": "month number", "review_month": "count"})
)
display(data_month)
# Построим для признака 'review_month' столбчатую диаграмму
fig = px.bar(data_month, x="month number", y="count", text_auto=True, log_y=True)
fig.update_traces(textposition="outside")
fig.show()
# Создадим признак с временами года
# Функция season возвращает номер сезона
def season(month):
if month in [3, 4, 5]:
return 1 # Весна
elif month in [6, 7, 8]:
return 2 # Лето
elif month in [12, 1, 2]:
return 3 # Зима
else:
return 4 # Осень
data["season"] = data["review_month"].apply(season)
# Посмотрим на количество отзывов по временам года
data_season = (
data["season"]
.value_counts()
.reset_index()
.rename(columns={"index": "season_number", "season": "count"})
)
display(data_season)
# Построим столбчатую диаграмму для признака
fig = px.bar(data_season, x="season_number", y="count", text_auto=True, log_y=True)
fig.show()
# Проанализируем отзывы пользователей
# Посмотрим негативные отзывы "negative_review"
data["negative_review"].value_counts()
data["neg_neutral"] = data["negative_review"].apply(
lambda x: "0"
if x == "No Negative"
or x == " Nothing"
or x == " nothing"
or x == " N A"
or x == " Nothing "
else "neg"
)
data["neg_neutral"].value_counts()
# Проанализируем отзывы пользователей
# Посмотрим негативные отзывы "negative_review"
data["positive_review"].value_counts()
data["pos_neutral"] = data["positive_review"].apply(
lambda x: "1" if x == "No Positive" or x == " Nothing" else "pos"
)
data["pos_neutral"].value_counts()
# Удалим признаки позитивной и негативной оценки
data.drop(["negative_review", "positive_review"], axis=1, inplace=True)
# Проанализируем признак "теги"
# Создадим признак с количеством слов в тегах
data["cnt_words_tag"] = data["tags"].apply(lambda x: len(list(x.split(","))))
data["cnt_words_tag"].head(10)
# Составим словарь из уникальных тегов и посчитаем их количество
# Функция возвращает словарь, в котором ключом является уникальный тег,
# а значением количество его повторений
count_dict = {}
for tags_list in data["tags"]:
tags = "".join(tags_list)
tags = tags.replace("'", "")
tags = tags.replace("[", "")
tags = tags.replace("]", "")
tags = tags.split(",")
for tag in tags:
tag = tag.strip()
if tag in count_dict:
count_dict[tag] += 1
else:
count_dict[tag] = 1
print(len(count_dict))
# Создадим признак "Тип поездки", если тип поездки не указан, то = 0
def type_trip(tags):
if "Leisure trip" in tags:
return "1"
elif "Business trip" in tags:
return "2"
return 0
data["type_trip"] = data["tags"].apply(type_trip)
# Создадим признак "Количество гостей"
def reviewer_count(tags):
reviewer_list = [
"Solo traveler",
"Travelers with friends",
"Couple",
"Group",
"Family with older children",
"Family with young children",
]
for tag in reviewer_list:
if tag in tags:
return tag
return 0
data["reviewer_count"] = data["tags"].apply(reviewer_count)
# Посмотрим на количество отзывов относительно количества гостей
data["reviewer_count"].value_counts()
# Из тегов выделим на сколько ночей чаще всего останавливаются гости в отелях,для этого создадим функцию
def number_of_nights(arg):
tags_list = arg.split(",")
for tag in tags_list:
idx = tag.find("night")
if idx > 0:
word_list = tag.split()
for word in word_list:
if word.isnumeric():
return int(word)
# создадим признак "количество ночей в отеле"
data["number_of_nights"] = data["tags"].apply(number_of_nights)
# Пустые значения признака "количество ночей в отеле" заменим на медиану
data["number_of_nights"] = data["number_of_nights"].fillna(
data["number_of_nights"].median()
)
# Посмотрим на количество ночей в отеле относительно гостей
data["number_of_nights"].value_counts()
# Проанализируем признак адреса отеля
data["hotel_address"].value_counts()
# Извлечем название страны и города в отдельные признаки, создадим для этого функцию и применим ее к столбцу с адресом
def city_and_country(address):
words_list = address.split(" ")
if "United Kingdom" in address:
return ("Uni ted Kingdom", words_list[-5])
else:
return (words_list[-1], words_list[-2])
data[["country_hotel", "city_hotel"]] = (
data["hotel_address"].apply(city_and_country).apply(pd.Series)
)
# Выведем результат
data.country_hotel.value_counts()
data.city_hotel.value_counts()
# Удалим признак адреса и страны так как каждая из страна представлена один городом
data.drop(["hotel_address", "country_hotel"], axis=1, inplace=True)
data.info()
# Проанализируем признак страны путешественника
count_dict = {}
for country in data.reviewer_nationality:
country = country.strip()
if country in count_dict:
count_dict[country] += 1
else:
count_dict[country] = 1
print(len(count_dict))
print(count_dict)
# Количество стран - 227.
# Сократим количество стран, выделив 20 наиболее повторяющихся, остальные обозначим как other
countries = data.reviewer_nationality.value_counts()
countries_list = countries[:20].index
def country_freq(country):
if country in countries_list:
return country
else:
return "other"
data["country_reviewer"] = data["reviewer_nationality"].apply(country_freq)
data.country_reviewer.value_counts()
# Посмотрим на распределение по категориям
data_nationality = (
data["country_reviewer"]
.value_counts()
.reset_index()
.rename(columns={"index": "nationality", "reviewer_nationality": "count"})
)
data_nationality
# Построим горизонтальную столбчатую диаграмму, чтобы посмотреть на распределение признака
fig = px.bar(
data_nationality,
x="country_reviewer",
y="nationality",
text_auto=True,
title="Топ-10 национальностей гостей по количеству отзывов",
)
fig.show()
# Удалим признак reviewer_nationality
data.drop("reviewer_nationality", axis=1, inplace=True)
# Проверим данные на пустые значения
data = data.fillna(0)
data.isnull().sum()
# Столбцы широты и долготы содержат много пропусков. Учитывая то, что мы смогли извлечь названия стран из адреса, удалим широту и долготу
data.drop(["lat", "lng"], axis=1, inplace=True)
data["hotel_name"].nunique()
# Признак days_since_review не являяется полезным, удалим его
# Название отеля важный признак, но ввиду их огромного количества прийдется его удалить
# признак tags удалим так как необходимую информацию получили
data.drop(["days_since_review", "hotel_name", "tags"], axis=1, inplace=True)
# Выведем основную информацию о нечисловых признаках
data.describe(include="object")
# Преобразуем признак "Тип поездки" в числовой
data["type_trip"] = data["type_trip"].astype("int64")
# Создадим список колонок с категориальными признаками
category_list = [x for x in data.columns if data[x].dtypes == "object"]
category_list
# Объединим категориальные признаки в датасет
data_cat = data[category_list]
data_cat
# Для выбора способа кодирования посмотрим на количество категорий в признаках
for col in data_cat.columns:
print(f"В признаке {col} количество категорий: {data_cat[col].nunique()}")
# признак 'country_reviewer' содержит 21 категорию, поэтому здесь используем бинарное кодирование
bin_encoder = ce.BinaryEncoder(cols=["country_reviewer"])
type_bin = bin_encoder.fit_transform(data["country_reviewer"])
data = pd.concat([data, type_bin], axis=1)
# Удалим признак "country_reviewer"
data = data.drop("country_reviewer", axis=1)
# Для остальных признаков воспользуемся OneHotEncoder
# Снова создаем список категориальных признаковдля кодирования
cat_list = [x for x in data.columns if data[x].dtypes == "object"]
print(cat_list)
# Кодируем признаки с помощью OneHotEncoder
onehot = ce.OneHotEncoder(cols=cat_list, use_cat_names=True)
type_bin = onehot.fit_transform(data[cat_list])
data = pd.concat([data, type_bin], axis=1)
# Удалим категориальные признаки
data = data.drop(cat_list, axis=1)
data.info()
# Построим тепловую карту для визаулизации корреляции признаков
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True, cmap="Spectral")
# выберем и удалим признаки из пар с высоким уровнем корреляции
drop_list = ["neg_neutral_0", "pos_neutral_1", "additional_number_of_scoring"]
data.drop(drop_list, axis=1, inplace=True)
data.head(3)
plt.rcParams["figure.figsize"] = (15, 10)
sns.heatmap(data.drop(["sample"], axis=1).corr(), annot=True, cmap="Spectral")
# Теперь выделим тестовую часть
train_data = data.query("sample == 1").drop(["sample"], axis=1)
test_data = data.query("sample == 0").drop(["sample"], axis=1)
y = train_data.reviewer_score.values # наш таргет
X = train_data.drop(["reviewer_score"], axis=1)
# Воспользуемся специальной функцие train_test_split для разбивки тестовых данных
# выделим 20% данных на валидацию (параметр test_size)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=RANDOM_SEED
)
# проверяем
test_data.shape, train_data.shape, X.shape, X_train.shape, X_test.shape
# Импортируем необходимые библиотеки:
from sklearn.ensemble import (
RandomForestRegressor,
) # инструмент для создания и обучения модели
from sklearn import metrics # инструменты для оценки точности модели
# Создаём модель (НАСТРОЙКИ НЕ ТРОГАЕМ)
model = RandomForestRegressor(
n_estimators=100, verbose=1, n_jobs=-1, random_state=RANDOM_SEED
)
# Обучаем модель на тестовом наборе данных
model.fit(X_train, y_train)
# Используем обученную модель для предсказания рейтинга ресторанов в тестовой выборке.
# Предсказанные значения записываем в переменную y_pred
y_pred = model.predict(X_test)
# Сравниваем предсказанные значения (y_pred) с реальными (y_test), и смотрим насколько они в среднем отличаются
# Метрика называется Mean Absolute Error (MAE) и показывает среднее отклонение предсказанных значений от фактических.
print("MAPE:", metrics.mean_absolute_error(y_test, y_pred))
# в RandomForestRegressor есть возможность вывести самые важные признаки для модели
plt.rcParams["figure.figsize"] = (10, 10)
feat_importances = pd.Series(model.feature_importances_, index=X.columns)
feat_importances.nlargest(15).plot(kind="barh")
test_data.sample(10)
test_data = test_data.drop(["reviewer_score"], axis=1)
sample_submission
predict_submission = model.predict(test_data)
predict_submission
list(sample_submission)
sample_submission["reviewer_score"] = predict_submission
sample_submission.to_csv("submission.csv", index=False)
sample_submission.head(10)
| false | 0 | 4,952 | 1 | 4,952 | 4,952 |
||
129921129
|
<jupyter_start><jupyter_text>Retinal Vessel Segmentation
Kaggle dataset identifier: retinal-vessel-segmentation
<jupyter_script>from PIL import Image
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
import matplotlib.pyplot as plt
import tensorflow_io as tfio
import tensorflow as tf
root = "../input/retinal-vessel-segmentation"
exts = ("jpg", "JPG", "png", "PNG", "tif", "gif", "ppm")
print(tf.__version__)
print(tfio.__version__)
# # DRIVE
# The dataset comes with pair of input retina image and target mask. Among all retina image, we will only use this dataset for a quick baseline. However, rest of the dataset can be replaces easily on this pipeline.
#
input_data = os.path.join(root, "DRIVE/training/images")
images = sorted(
[
os.path.join(input_data, fname)
for fname in os.listdir(input_data)
if fname.endswith(exts) and not fname.startswith(".")
]
)
images
target_data = os.path.join(root, "DRIVE/training/1st_manual")
masks = sorted(
[
os.path.join(target_data, fname)
for fname in os.listdir(target_data)
if fname.endswith(exts) and not fname.startswith(".")
]
)
masks
print("Number of samples:", len(images), len(masks))
for input_path, target_path in zip(images[:10], masks[:10]):
print(input_path[-31:], "|", target_path[-34:])
IMAGE_SIZE = 512
BATCH_SIZE = 12
def read_files(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.io.decode_gif(image) # out: (1, h, w, 3)
image = tf.squeeze(image) # out: (h, w, 3)
image = tf.image.rgb_to_grayscale(image) # out: (h, w, 1)
image = tf.divide(image, 128)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = tf.cast(image, tf.int32)
else:
image = tfio.experimental.image.decode_tiff(image) # out: (h, w, 4)
image = image[:, :, :3] # out: (h, w, 3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 255.0
return image
def load_data(image_list, mask_list):
image = read_files(image_list)
mask = read_files(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=False)
return dataset
train_dataset = data_generator(images, masks)
train_dataset
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(20, 20))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(" ".join(name.split("_")).title())
plt.imshow(image, cmap="gray")
plt.show()
image, mask = next(iter(train_dataset.take(1)))
print(image.shape, mask.shape)
for img, msk in zip(image[:2], mask[:2]):
print(mask.numpy().min(), mask.numpy().max())
print(np.unique(mask.numpy()))
visualize(
image=img.numpy(),
gt_mask=msk.numpy(),
)
# # Model
from tensorflow import keras
# Free up RAM in case the model definition cells were run multiple times
keras.backend.clear_session()
# improvement 1
BACKBONE = "vgg19"
n_classes = 1
activation = "sigmoid"
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
model.summary(line_length=110)
# # Callback : Monitoring Training Progress
class DisplayCallback(keras.callbacks.Callback):
def __init__(self, dataset, epoch_interval=5):
self.dataset = dataset
self.epoch_interval = epoch_interval
def display(self, display_list, extra_title=""):
plt.figure(figsize=(15, 15))
title = ["Input Image", "True Mask", "Predicted Mask"]
if len(display_list) > len(title):
title.append(extra_title)
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(display_list[i], cmap="gray")
plt.axis("off")
plt.show()
def create_mask(self, pred_mask):
pred_mask = (pred_mask > 0.5).astype("int32")
return pred_mask[0]
def show_predictions(self, dataset, num=1):
for image, mask in dataset.take(num):
pred_mask = model.predict(image)
self.display([image[0], mask[0], self.create_mask(pred_mask)])
def on_epoch_end(self, epoch, logs=None):
if epoch and epoch % self.epoch_interval == 0:
self.show_predictions(self.dataset)
print("\nSample Prediction after epoch {}\n".format(epoch + 1))
# # Compile and Fit
# define optomizer
optim = keras.optimizers.Adam(0.0001)
bce = keras.losses.BinaryCrossentropy()
metrics = ["accuracy"]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, bce, metrics)
history = model.fit(
train_dataset, callbacks=[DisplayCallback(train_dataset)], epochs=400
)
valid_dataset = train_dataset
test_masks = np.concatenate([y for x, y in valid_dataset], axis=0)
masks = test_masks.ravel()
test_imgs = np.concatenate([x for x, y in valid_dataset], axis=0)
predictions = model.predict(test_imgs)
predictions.shape
y_pred = predictions.ravel()
y_pred = (y_pred > 0.5).astype("int32")
y_pred
from sklearn.metrics import classification_report
print(classification_report(masks, y_pred))
def jaccard(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
print("The Jaccard Score is: ", jaccard(y_pred, masks))
def my_dice(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
dice = (2 * np.sum(intersection)) / (np.sum(union) + np.sum(intersection))
return dice
print("The Dice Score is: ", my_dice(y_pred, masks))
image = test_imgs[4]
mask = test_masks[4]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[3]
mask = test_masks[3]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[11]
mask = test_masks[11]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[1]
mask = test_masks[1]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/921/129921129.ipynb
|
retinal-vessel-segmentation
|
ipythonx
|
[{"Id": 129921129, "ScriptId": 38075491, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14286518, "CreationDate": "05/17/2023 12:48:33", "VersionNumber": 2.0, "Title": "Teamproject model tuning", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 266.0, "LinesInsertedFromPrevious": 88.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 178.0, "LinesInsertedFromFork": 104.0, "LinesDeletedFromFork": 150.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 162.0, "TotalVotes": 0}]
|
[{"Id": 186341474, "KernelVersionId": 129921129, "SourceDatasetVersionId": 3043853}]
|
[{"Id": 3043853, "DatasetId": 1864103, "DatasourceVersionId": 3092339, "CreatorUserId": 1984321, "LicenseName": "Data files \u00a9 Original Authors", "CreationDate": "01/14/2022 15:22:31", "VersionNumber": 1.0, "Title": "Retinal Vessel Segmentation", "Slug": "retinal-vessel-segmentation", "Subtitle": "a collection of retinal vessel images", "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1864103, "CreatorUserId": 1984321, "OwnerUserId": 1984321.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3043853.0, "CurrentDatasourceVersionId": 3092339.0, "ForumId": 1887162, "Type": 2, "CreationDate": "01/14/2022 15:22:31", "LastActivityDate": "01/14/2022", "TotalViews": 5792, "TotalDownloads": 859, "TotalVotes": 13, "TotalKernels": 11}]
|
[{"Id": 1984321, "UserName": "ipythonx", "DisplayName": "Innat", "RegisterDate": "06/11/2018", "PerformanceTier": 3}]
|
from PIL import Image
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
os.environ["SM_FRAMEWORK"] = "tf.keras"
import segmentation_models as sm
import matplotlib.pyplot as plt
import tensorflow_io as tfio
import tensorflow as tf
root = "../input/retinal-vessel-segmentation"
exts = ("jpg", "JPG", "png", "PNG", "tif", "gif", "ppm")
print(tf.__version__)
print(tfio.__version__)
# # DRIVE
# The dataset comes with pair of input retina image and target mask. Among all retina image, we will only use this dataset for a quick baseline. However, rest of the dataset can be replaces easily on this pipeline.
#
input_data = os.path.join(root, "DRIVE/training/images")
images = sorted(
[
os.path.join(input_data, fname)
for fname in os.listdir(input_data)
if fname.endswith(exts) and not fname.startswith(".")
]
)
images
target_data = os.path.join(root, "DRIVE/training/1st_manual")
masks = sorted(
[
os.path.join(target_data, fname)
for fname in os.listdir(target_data)
if fname.endswith(exts) and not fname.startswith(".")
]
)
masks
print("Number of samples:", len(images), len(masks))
for input_path, target_path in zip(images[:10], masks[:10]):
print(input_path[-31:], "|", target_path[-34:])
IMAGE_SIZE = 512
BATCH_SIZE = 12
def read_files(image_path, mask=False):
image = tf.io.read_file(image_path)
if mask:
image = tf.io.decode_gif(image) # out: (1, h, w, 3)
image = tf.squeeze(image) # out: (h, w, 3)
image = tf.image.rgb_to_grayscale(image) # out: (h, w, 1)
image = tf.divide(image, 128)
image.set_shape([None, None, 1])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = tf.cast(image, tf.int32)
else:
image = tfio.experimental.image.decode_tiff(image) # out: (h, w, 4)
image = image[:, :, :3] # out: (h, w, 3)
image.set_shape([None, None, 3])
image = tf.image.resize(images=image, size=[IMAGE_SIZE, IMAGE_SIZE])
image = image / 255.0
return image
def load_data(image_list, mask_list):
image = read_files(image_list)
mask = read_files(mask_list, mask=True)
return image, mask
def data_generator(image_list, mask_list):
dataset = tf.data.Dataset.from_tensor_slices((image_list, mask_list))
dataset = dataset.map(load_data, num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=False)
return dataset
train_dataset = data_generator(images, masks)
train_dataset
def visualize(**images):
"""PLot images in one row."""
n = len(images)
plt.figure(figsize=(20, 20))
for i, (name, image) in enumerate(images.items()):
plt.subplot(1, n, i + 1)
plt.xticks([])
plt.yticks([])
plt.title(" ".join(name.split("_")).title())
plt.imshow(image, cmap="gray")
plt.show()
image, mask = next(iter(train_dataset.take(1)))
print(image.shape, mask.shape)
for img, msk in zip(image[:2], mask[:2]):
print(mask.numpy().min(), mask.numpy().max())
print(np.unique(mask.numpy()))
visualize(
image=img.numpy(),
gt_mask=msk.numpy(),
)
# # Model
from tensorflow import keras
# Free up RAM in case the model definition cells were run multiple times
keras.backend.clear_session()
# improvement 1
BACKBONE = "vgg19"
n_classes = 1
activation = "sigmoid"
model = sm.Unet(BACKBONE, classes=n_classes, activation=activation)
model.summary(line_length=110)
# # Callback : Monitoring Training Progress
class DisplayCallback(keras.callbacks.Callback):
def __init__(self, dataset, epoch_interval=5):
self.dataset = dataset
self.epoch_interval = epoch_interval
def display(self, display_list, extra_title=""):
plt.figure(figsize=(15, 15))
title = ["Input Image", "True Mask", "Predicted Mask"]
if len(display_list) > len(title):
title.append(extra_title)
for i in range(len(display_list)):
plt.subplot(1, len(display_list), i + 1)
plt.title(title[i])
plt.imshow(display_list[i], cmap="gray")
plt.axis("off")
plt.show()
def create_mask(self, pred_mask):
pred_mask = (pred_mask > 0.5).astype("int32")
return pred_mask[0]
def show_predictions(self, dataset, num=1):
for image, mask in dataset.take(num):
pred_mask = model.predict(image)
self.display([image[0], mask[0], self.create_mask(pred_mask)])
def on_epoch_end(self, epoch, logs=None):
if epoch and epoch % self.epoch_interval == 0:
self.show_predictions(self.dataset)
print("\nSample Prediction after epoch {}\n".format(epoch + 1))
# # Compile and Fit
# define optomizer
optim = keras.optimizers.Adam(0.0001)
bce = keras.losses.BinaryCrossentropy()
metrics = ["accuracy"]
# compile keras model with defined optimozer, loss and metrics
model.compile(optim, bce, metrics)
history = model.fit(
train_dataset, callbacks=[DisplayCallback(train_dataset)], epochs=400
)
valid_dataset = train_dataset
test_masks = np.concatenate([y for x, y in valid_dataset], axis=0)
masks = test_masks.ravel()
test_imgs = np.concatenate([x for x, y in valid_dataset], axis=0)
predictions = model.predict(test_imgs)
predictions.shape
y_pred = predictions.ravel()
y_pred = (y_pred > 0.5).astype("int32")
y_pred
from sklearn.metrics import classification_report
print(classification_report(masks, y_pred))
def jaccard(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
iou_score = np.sum(intersection) / np.sum(union)
return iou_score
print("The Jaccard Score is: ", jaccard(y_pred, masks))
def my_dice(target, prediction):
intersection = np.logical_and(target, prediction)
union = np.logical_or(target, prediction)
dice = (2 * np.sum(intersection)) / (np.sum(union) + np.sum(intersection))
return dice
print("The Dice Score is: ", my_dice(y_pred, masks))
image = test_imgs[4]
mask = test_masks[4]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[3]
mask = test_masks[3]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[11]
mask = test_masks[11]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
image = test_imgs[1]
mask = test_masks[1]
image = np.expand_dims(image, axis=0)
pred_mask = model.predict(image)
pred_mask = (pred_mask > 0.5).astype("int32")
plt.figure(figsize=(10, 5))
plt.subplot(121)
plt.title("Original Mask")
plt.imshow(mask, cmap="gray")
plt.axis("off")
plt.subplot(122)
plt.title("Predicted Mask")
plt.imshow(pred_mask[0], cmap="gray")
plt.axis("off")
| false | 0 | 2,532 | 0 | 2,559 | 2,532 |
||
129921388
|
# ### Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from autogluon.tabular import TabularPredictor
from autogluon.core.metrics import make_scorer
from sklearn.preprocessing import MinMaxScaler, StandardScaler
warnings.filterwarnings("ignore")
# ### Data Prep
COMP_PATH = "/kaggle/input/icr-identify-age-related-conditions"
train = pd.read_csv(f"{COMP_PATH}/train.csv")
test = pd.read_csv(f"{COMP_PATH}/test.csv")
sample_submission = pd.read_csv(f"{COMP_PATH}/sample_submission.csv")
greeks = pd.read_csv(f"{COMP_PATH}/greeks.csv")
train.drop("Id", axis=1, inplace=True)
train.head()
# ### Metric
def competition_log_loss(y_true, y_pred):
N_0 = np.sum(1 - y_true)
N_1 = np.sum(y_true)
p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
p_0 = 1 - p_1
log_loss_0 = -np.sum((1 - y_true) * np.log(p_0)) / N_0
log_loss_1 = -np.sum(y_true * np.log(p_1)) / N_1
return (log_loss_0 + log_loss_1) / 2
def balanced_log_loss(y_true, y_pred):
# y_true: correct labels 0, 1
# y_pred: predicted probabilities of class=1
# calculate the number of observations for each class
N_0 = np.sum(1 - y_true)
N_1 = np.sum(y_true)
# calculate the weights for each class to balance classes
w_0 = 1 / N_0
w_1 = 1 / N_1
# calculate the predicted probabilities for each class
p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
p_0 = 1 - p_1
# calculate the summed log loss for each class
log_loss_0 = -np.sum((1 - y_true) * np.log(p_0))
log_loss_1 = -np.sum(y_true * np.log(p_1))
# calculate the weighted summed logarithmic loss
# (factgor of 2 included to give same result as LL with balanced input)
balanced_log_loss = 2 * (w_0 * log_loss_0 + w_1 * log_loss_1) / (w_0 + w_1)
# return the average log loss
return balanced_log_loss / (N_0 + N_1)
cols = train.columns
print("Length:", len(cols))
corr = train.corr()["Class"].apply(abs).sort_values(ascending=False)
threshold = 0.1
corr_cols = list(corr[corr > threshold].index)
print("Corr Cols:", corr_cols)
print("Length:", len(corr_cols))
scaler = StandardScaler()
num_cols = [x for x in train.columns if train[x].dtype not in ["object", "int64"]]
df = train.copy()
df[num_cols] = scaler.fit_transform(train[num_cols])
df
sns.heatmap(train.corr())
plt.show()
log_loss_scorer = make_scorer(
name="bal_log_loss",
score_func=balanced_log_loss,
optimum=0,
greater_is_better=False,
)
clf = TabularPredictor(label="Class", path="model", eval_metric=log_loss_scorer)
clf.fit(train[corr_cols])
test_df = test.copy()
test_df[num_cols] = scaler.transform(test[num_cols])
preds = clf.predict_proba(test_df)
preds.head()
test["class_0"] = preds.iloc[:, 0].round(4)
test["class_1"] = preds.iloc[:, 1].round(4)
test[["Id", "class_0", "class_1"]].to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/921/129921388.ipynb
| null | null |
[{"Id": 129921388, "ScriptId": 38643276, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7210035, "CreationDate": "05/17/2023 12:50:25", "VersionNumber": 1.0, "Title": "ICR 2023: Baseline", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ### Imports
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
from autogluon.tabular import TabularPredictor
from autogluon.core.metrics import make_scorer
from sklearn.preprocessing import MinMaxScaler, StandardScaler
warnings.filterwarnings("ignore")
# ### Data Prep
COMP_PATH = "/kaggle/input/icr-identify-age-related-conditions"
train = pd.read_csv(f"{COMP_PATH}/train.csv")
test = pd.read_csv(f"{COMP_PATH}/test.csv")
sample_submission = pd.read_csv(f"{COMP_PATH}/sample_submission.csv")
greeks = pd.read_csv(f"{COMP_PATH}/greeks.csv")
train.drop("Id", axis=1, inplace=True)
train.head()
# ### Metric
def competition_log_loss(y_true, y_pred):
N_0 = np.sum(1 - y_true)
N_1 = np.sum(y_true)
p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
p_0 = 1 - p_1
log_loss_0 = -np.sum((1 - y_true) * np.log(p_0)) / N_0
log_loss_1 = -np.sum(y_true * np.log(p_1)) / N_1
return (log_loss_0 + log_loss_1) / 2
def balanced_log_loss(y_true, y_pred):
# y_true: correct labels 0, 1
# y_pred: predicted probabilities of class=1
# calculate the number of observations for each class
N_0 = np.sum(1 - y_true)
N_1 = np.sum(y_true)
# calculate the weights for each class to balance classes
w_0 = 1 / N_0
w_1 = 1 / N_1
# calculate the predicted probabilities for each class
p_1 = np.clip(y_pred, 1e-15, 1 - 1e-15)
p_0 = 1 - p_1
# calculate the summed log loss for each class
log_loss_0 = -np.sum((1 - y_true) * np.log(p_0))
log_loss_1 = -np.sum(y_true * np.log(p_1))
# calculate the weighted summed logarithmic loss
# (factgor of 2 included to give same result as LL with balanced input)
balanced_log_loss = 2 * (w_0 * log_loss_0 + w_1 * log_loss_1) / (w_0 + w_1)
# return the average log loss
return balanced_log_loss / (N_0 + N_1)
cols = train.columns
print("Length:", len(cols))
corr = train.corr()["Class"].apply(abs).sort_values(ascending=False)
threshold = 0.1
corr_cols = list(corr[corr > threshold].index)
print("Corr Cols:", corr_cols)
print("Length:", len(corr_cols))
scaler = StandardScaler()
num_cols = [x for x in train.columns if train[x].dtype not in ["object", "int64"]]
df = train.copy()
df[num_cols] = scaler.fit_transform(train[num_cols])
df
sns.heatmap(train.corr())
plt.show()
log_loss_scorer = make_scorer(
name="bal_log_loss",
score_func=balanced_log_loss,
optimum=0,
greater_is_better=False,
)
clf = TabularPredictor(label="Class", path="model", eval_metric=log_loss_scorer)
clf.fit(train[corr_cols])
test_df = test.copy()
test_df[num_cols] = scaler.transform(test[num_cols])
preds = clf.predict_proba(test_df)
preds.head()
test["class_0"] = preds.iloc[:, 0].round(4)
test["class_1"] = preds.iloc[:, 1].round(4)
test[["Id", "class_0", "class_1"]].to_csv("submission.csv", index=False)
| false | 0 | 1,063 | 0 | 1,063 | 1,063 |
||
129498680
|
# # 🧮 **Blueberry?**
# ---
# via GIPHY
# <span style="padding: 10px;
# color:white;font-size:200%;
# font-family:Time News Roman; color: DarkGreen">
# 🪔 Please Upvote my kernel if you liked it ⬆️
# ---
# <div style="color:white;
# display:fill;
# text-align:center;
# border-radius:5px;
# background-color:lightgreen;
# letter-spacing:0.5px">
# <span style="padding: 10px;
# color:white;font-size:310%;
# font-family:Time News Roman; color: DarkGreen">
# 📋 Table of Contents
#
# ## **1. [Introduction](#introduction)**
# ## **2. [Body](#body)**
# > **2.1. [Change the theme of the notebook](#appearance)**
# > **2.2. [Import important libraries, Read the data](#import)**
# > **2.3. [Overview](#overview)**
# > **2.4. [EDA](#eda)**
# > **2.5. [Feature Engineering](#feature-engineering)**
# > **2.6. [Model Selection](#model-selection)**
# > **2.7. [Final Evaluation](#final-evaluation)**
# > **2.8. [Result](#result)**
# ## **3. [Conclusion](#conclusion)**
# * * *
# # 📖 **INTRODUCTION**
# ---
# asd
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# # 🦴 **BODY**
# ---
# ## 🖌️ Change the theme of the notebook
# ---
#
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🛠️ Import important libraries, Read the data
# ---
#
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from xgboost import XGBRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id")
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 👁️ Overview
# ---
#
train.shape
train.head()
train.tail()
train.info()
train.describe()
train.dtypes
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🔎 EDA
# ---
#
train.corr()
sns.heatmap(train.corr())
plt.show()
train.corr()["yield"].sort_values(ascending=False)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## ⚙️ Feature Engineering
# ---
#
# train.drop(["osmia","bumbles","andrena","honeybee",
# "AverageOfUpperTRange","AverageOfLowerTRange",
# "MinOfLowerTRange","MinOfUpperTRange",
# "MaxOfUpperTRange","MaxOfLowerTRange"],axis = 1, inplace = True)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🧑🔬 Model Selection
# ---
#
def scaling(feature):
global X_train, X_test
scaler = MinMaxScaler()
scaler.fit
scaler.fit(X_train[feature].to_numpy().reshape(-1, 1))
X_train[feature] = scaler.transform(X_train[feature].to_numpy().reshape(-1, 1))
X_test[feature] = scaler.transform(X_test[feature].to_numpy().reshape(-1, 1))
RS = 13
X = train.drop(["yield"], axis=1)
y = train[["yield"]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=RS
)
scale_needed_features = [
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"seeds",
]
for feature in scale_needed_features:
scaling(feature)
xgb = XGBRegressor(random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae")
xgb.fit(X_train, y_train)
xgb_prediction = xgb.predict(X_test)
mae_xgb = mean_absolute_error(y_test, xgb_prediction)
mae_xgb
# Why XGB choosen
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🧫 Final Evaluation
# ---
#
X_train = train.drop(["yield"], axis=1)
y_train = train[["yield"]]
X_test = test.copy()
xgb_final = XGBRegressor(
random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae"
)
xgb_final.fit(X_train, y_train)
xgb_final_prediction = xgb_final.predict(X_test)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 📋 Result
# ---
#
result = pd.DataFrame({"yield": xgb_final_prediction}).set_index(X_test.index)
result
result.to_csv("second_sub.csv")
# Author: amyrmahdy
# Date: 12 May 2023
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/498/129498680.ipynb
| null | null |
[{"Id": 129498680, "ScriptId": 38412702, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7867890, "CreationDate": "05/14/2023 10:17:08", "VersionNumber": 16.0, "Title": "playground-series-s3e14-wild-blueberry", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 231.0, "LinesInsertedFromPrevious": 138.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 93.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # 🧮 **Blueberry?**
# ---
# via GIPHY
# <span style="padding: 10px;
# color:white;font-size:200%;
# font-family:Time News Roman; color: DarkGreen">
# 🪔 Please Upvote my kernel if you liked it ⬆️
# ---
# <div style="color:white;
# display:fill;
# text-align:center;
# border-radius:5px;
# background-color:lightgreen;
# letter-spacing:0.5px">
# <span style="padding: 10px;
# color:white;font-size:310%;
# font-family:Time News Roman; color: DarkGreen">
# 📋 Table of Contents
#
# ## **1. [Introduction](#introduction)**
# ## **2. [Body](#body)**
# > **2.1. [Change the theme of the notebook](#appearance)**
# > **2.2. [Import important libraries, Read the data](#import)**
# > **2.3. [Overview](#overview)**
# > **2.4. [EDA](#eda)**
# > **2.5. [Feature Engineering](#feature-engineering)**
# > **2.6. [Model Selection](#model-selection)**
# > **2.7. [Final Evaluation](#final-evaluation)**
# > **2.8. [Result](#result)**
# ## **3. [Conclusion](#conclusion)**
# * * *
# # 📖 **INTRODUCTION**
# ---
# asd
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# # 🦴 **BODY**
# ---
# ## 🖌️ Change the theme of the notebook
# ---
#
from IPython.core.display import HTML
with open("./CSS.css", "r") as file:
custom_css = file.read()
HTML(custom_css)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🛠️ Import important libraries, Read the data
# ---
#
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from xgboost import XGBRegressor
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv", index_col="id")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv", index_col="id")
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 👁️ Overview
# ---
#
train.shape
train.head()
train.tail()
train.info()
train.describe()
train.dtypes
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🔎 EDA
# ---
#
train.corr()
sns.heatmap(train.corr())
plt.show()
train.corr()["yield"].sort_values(ascending=False)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## ⚙️ Feature Engineering
# ---
#
# train.drop(["osmia","bumbles","andrena","honeybee",
# "AverageOfUpperTRange","AverageOfLowerTRange",
# "MinOfLowerTRange","MinOfUpperTRange",
# "MaxOfUpperTRange","MaxOfLowerTRange"],axis = 1, inplace = True)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🧑🔬 Model Selection
# ---
#
def scaling(feature):
global X_train, X_test
scaler = MinMaxScaler()
scaler.fit
scaler.fit(X_train[feature].to_numpy().reshape(-1, 1))
X_train[feature] = scaler.transform(X_train[feature].to_numpy().reshape(-1, 1))
X_test[feature] = scaler.transform(X_test[feature].to_numpy().reshape(-1, 1))
RS = 13
X = train.drop(["yield"], axis=1)
y = train[["yield"]]
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=RS
)
scale_needed_features = [
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"seeds",
]
for feature in scale_needed_features:
scaling(feature)
xgb = XGBRegressor(random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae")
xgb.fit(X_train, y_train)
xgb_prediction = xgb.predict(X_test)
mae_xgb = mean_absolute_error(y_test, xgb_prediction)
mae_xgb
# Why XGB choosen
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 🧫 Final Evaluation
# ---
#
X_train = train.drop(["yield"], axis=1)
y_train = train[["yield"]]
X_test = test.copy()
xgb_final = XGBRegressor(
random_state=RS, max_depth=3, n_estimators=100, eval_metric="mae"
)
xgb_final.fit(X_train, y_train)
xgb_final_prediction = xgb_final.predict(X_test)
# **[⬆️Back to Table of Contents ⬆️](#toc)**
# * * *
# ## 📋 Result
# ---
#
result = pd.DataFrame({"yield": xgb_final_prediction}).set_index(X_test.index)
result
result.to_csv("second_sub.csv")
# Author: amyrmahdy
# Date: 12 May 2023
| false | 0 | 1,567 | 0 | 1,567 | 1,567 |
||
129498814
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
Bike_T = pd.read_csv("../input/bike-sharing-demand/train.csv")
type(Bike_T)
Bike_T.head()
Bike_T.index
Bike_T.info()
Bike_T.shape
Bike_T.columns
Bike_T.dtypes
Bike_T.isnull().sum()
Bike_T.nunique()
Bike_T.describe()
# Trouver la corrélation entre les caractéristiques
corr = Bike_T.corr()
corr.shape
plt.figure(figsize=(25, 25))
sns.heatmap(
corr,
cbar=True,
square=True,
fmt=".1f",
annot=True,
annot_kws={"size": 15},
cmap="YlGnBu",
)
corr
pd.crosstab(Bike_T.temp, Bike_T.season, normalize="index")
pd.crosstab(Bike_T.workingday, Bike_T.season, normalize="index")
pd.crosstab(Bike_T.weather, Bike_T.season, normalize="index")
# # Visualization
sns.displot(Bike_T, x="count", kind="kde")
sns.catplot(x="season", y="count", hue="weather", kind="violin", data=Bike_T)
# # regression models
sns.regplot(
x="count",
y="registered",
data=Bike_T,
marker="+",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="casual",
data=Bike_T,
marker="*",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="atemp",
data=Bike_T,
marker="*",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="windspeed",
data=Bike_T,
marker="*",
scatter_kws={"color": "b"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="season",
data=Bike_T,
marker="*",
scatter_kws={"color": "y"},
line_kws={"color": "red"},
)
# # LinearRegression
X = Bike_T[["registered"]]
y = Bike_T["count"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
# Import library for Linear Regression
from sklearn.linear_model import LinearRegression
# Create a Linear regressor
Simple_LR = LinearRegression()
# Train the model using the training sets
Simple_LR.fit(X_train, y_train)
y_pred = Simple_LR.predict(X_test)
X_test
y_test
y_pred
sns.histplot(y_test - y_pred)
# # Model linear_ regression by atemp(this good relationship)
X = Bike_T[["atemp"]]
y = Bike_T["count"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
# Import library for Linear Regression
from sklearn.linear_model import LinearRegression
# Create a Linear regressor
Simple_LR_atemp = LinearRegression()
# Train the model using the training sets
Simple_LR_atemp.fit(X_train, y_train)
y_pred = Simple_LR_atemp.predict(X_test)
sns.histplot(y_test - y_pred)
plt.scatter(y_test, y_pred, marker="*", color="r")
plt.xlabel("count")
plt.ylabel("Predicted count")
plt.title("Prices vs Predicted count")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/498/129498814.ipynb
| null | null |
[{"Id": 129498814, "ScriptId": 38473628, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14297831, "CreationDate": "05/14/2023 10:18:37", "VersionNumber": 1.0, "Title": "Basile - Projet personnel - Bike Sharing Demand", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 124.0, "LinesInsertedFromPrevious": 124.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
Bike_T = pd.read_csv("../input/bike-sharing-demand/train.csv")
type(Bike_T)
Bike_T.head()
Bike_T.index
Bike_T.info()
Bike_T.shape
Bike_T.columns
Bike_T.dtypes
Bike_T.isnull().sum()
Bike_T.nunique()
Bike_T.describe()
# Trouver la corrélation entre les caractéristiques
corr = Bike_T.corr()
corr.shape
plt.figure(figsize=(25, 25))
sns.heatmap(
corr,
cbar=True,
square=True,
fmt=".1f",
annot=True,
annot_kws={"size": 15},
cmap="YlGnBu",
)
corr
pd.crosstab(Bike_T.temp, Bike_T.season, normalize="index")
pd.crosstab(Bike_T.workingday, Bike_T.season, normalize="index")
pd.crosstab(Bike_T.weather, Bike_T.season, normalize="index")
# # Visualization
sns.displot(Bike_T, x="count", kind="kde")
sns.catplot(x="season", y="count", hue="weather", kind="violin", data=Bike_T)
# # regression models
sns.regplot(
x="count",
y="registered",
data=Bike_T,
marker="+",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="casual",
data=Bike_T,
marker="*",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="atemp",
data=Bike_T,
marker="*",
scatter_kws={"color": "g"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="windspeed",
data=Bike_T,
marker="*",
scatter_kws={"color": "b"},
line_kws={"color": "red"},
)
sns.regplot(
x="count",
y="season",
data=Bike_T,
marker="*",
scatter_kws={"color": "y"},
line_kws={"color": "red"},
)
# # LinearRegression
X = Bike_T[["registered"]]
y = Bike_T["count"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
# Import library for Linear Regression
from sklearn.linear_model import LinearRegression
# Create a Linear regressor
Simple_LR = LinearRegression()
# Train the model using the training sets
Simple_LR.fit(X_train, y_train)
y_pred = Simple_LR.predict(X_test)
X_test
y_test
y_pred
sns.histplot(y_test - y_pred)
# # Model linear_ regression by atemp(this good relationship)
X = Bike_T[["atemp"]]
y = Bike_T["count"]
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.25, random_state=4
)
# Import library for Linear Regression
from sklearn.linear_model import LinearRegression
# Create a Linear regressor
Simple_LR_atemp = LinearRegression()
# Train the model using the training sets
Simple_LR_atemp.fit(X_train, y_train)
y_pred = Simple_LR_atemp.predict(X_test)
sns.histplot(y_test - y_pred)
plt.scatter(y_test, y_pred, marker="*", color="r")
plt.xlabel("count")
plt.ylabel("Predicted count")
plt.title("Prices vs Predicted count")
plt.show()
| false | 0 | 1,059 | 0 | 1,059 | 1,059 |
||
129498643
|
<jupyter_start><jupyter_text>FER-2013
The data consists of 48x48 pixel grayscale images of faces. The faces have been automatically registered so that the face is more or less centred and occupies about the same amount of space in each image.
The task is to categorize each face based on the emotion shown in the facial expression into one of seven categories (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral). The training set consists of 28,709 examples and the public test set consists of 3,589 examples.
Kaggle dataset identifier: fer2013
<jupyter_script>from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout, Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
import os
from tensorflow.keras.preprocessing.image import load_img, img_to_array
def plot_example_images(plt):
img_size = 48
plt.figure(0, figsize=(12, 20))
ctr = 0
for expression in os.listdir("train/"):
for i in range(1, 6):
ctr += 1
plt.subplot(7, 5, ctr)
img = load_img(
"train/" + expression + "/" + os.listdir("train/" + expression)[i],
target_size=(img_size, img_size),
)
plt.imshow(img, cmap="gray")
plt.tight_layout()
return plt
for expression in os.listdir("train/"):
print(str(len(os.listdir("train/" + expression))) + " " + expression + " images.")
img_size = 48
batch_size = 64
# Data generator to augment data for training
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory(
"train/",
target_size=(img_size, img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode="categorical",
shuffle=True,
)
# Data generator to augment data for validation
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_train.flow_from_directory(
"test/",
target_size=(img_size, img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode="categorical",
shuffle=False,
)
model = Sequential()
# Conv Block 1
model.add(Conv2D(64, (3, 3), padding="same", input_shape=(48, 48, 1)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Conv Block 2
model.add(Conv2D(128, (5, 5), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Conv Block 3
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# # Conv Block 3
# model.add(Conv2D(512,(3,3), padding='same'))
# model.add(BatchNormalization())
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Dropout(0.25))
model.add(Flatten())
# Fully connected Block 1
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.25))
# Fully connected Block 2
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.25))
model.add(Dense(7, activation="softmax"))
opt = Adam(lr=0.0005)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
epochs = 50
steps_per_epoch = train_generator.n // train_generator.batch_size
validation_steps = validation_generator.n // validation_generator.batch_size
checkpoint = ModelCheckpoint(
"model_weights.h5",
monitor="val_accuracy",
save_weights_only=True,
mode="max",
verbose=1,
)
reduce_lr = ReduceLROnPlateau(
monitor="val_loss", factor=0.1, patience=2, min_lr=0.00001, model="auto"
)
callbacks = [checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
)
# file_path = os.path.join(
# os.getcwd(), 'AI_Folder', 'Models/model_architecture.json')
# json_file = open(file_path, 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# emotion_model = model_from_json(loaded_model_json)
model_file_path = os.path.join(os.getcwd(), "model_weights.h5")
model.load_weights(model_file_path)
model.compile(
loss="categorical_crossentropy", optimizer=Adam(lr=0.0005), metrics=["accuracy"]
)
test_dataset_file_path = os.path.join(os.getcwd(), "test")
scores = model.evaluate_generator(validation_generator)
print("Test accuracy:", scores[1])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/498/129498643.ipynb
|
fer2013
|
msambare
|
[{"Id": 129498643, "ScriptId": 38506439, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6822455, "CreationDate": "05/14/2023 10:16:40", "VersionNumber": 1.0, "Title": "CNN+Batch Normalization", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 143.0, "LinesInsertedFromPrevious": 143.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185617431, "KernelVersionId": 129498643, "SourceDatasetVersionId": 1351797}]
|
[{"Id": 1351797, "DatasetId": 786787, "DatasourceVersionId": 1384195, "CreatorUserId": 3187350, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "07/19/2020 12:24:26", "VersionNumber": 1.0, "Title": "FER-2013", "Slug": "fer2013", "Subtitle": "Learn facial expressions from an image", "Description": "The data consists of 48x48 pixel grayscale images of faces. The faces have been automatically registered so that the face is more or less centred and occupies about the same amount of space in each image. \n\nThe task is to categorize each face based on the emotion shown in the facial expression into one of seven categories (0=Angry, 1=Disgust, 2=Fear, 3=Happy, 4=Sad, 5=Surprise, 6=Neutral). The training set consists of 28,709 examples and the public test set consists of 3,589 examples.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 786787, "CreatorUserId": 3187350, "OwnerUserId": 3187350.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1351797.0, "CurrentDatasourceVersionId": 1384195.0, "ForumId": 801807, "Type": 2, "CreationDate": "07/19/2020 12:24:26", "LastActivityDate": "07/19/2020", "TotalViews": 404940, "TotalDownloads": 72694, "TotalVotes": 864, "TotalKernels": 237}]
|
[{"Id": 3187350, "UserName": "msambare", "DisplayName": "Manas Sambare", "RegisterDate": "05/06/2019", "PerformanceTier": 1}]
|
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.layers import Dense, Input, Dropout, Flatten, Conv2D
from tensorflow.keras.layers import BatchNormalization, Activation, MaxPooling2D
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.utils import plot_model
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import os
import os
from tensorflow.keras.preprocessing.image import load_img, img_to_array
def plot_example_images(plt):
img_size = 48
plt.figure(0, figsize=(12, 20))
ctr = 0
for expression in os.listdir("train/"):
for i in range(1, 6):
ctr += 1
plt.subplot(7, 5, ctr)
img = load_img(
"train/" + expression + "/" + os.listdir("train/" + expression)[i],
target_size=(img_size, img_size),
)
plt.imshow(img, cmap="gray")
plt.tight_layout()
return plt
for expression in os.listdir("train/"):
print(str(len(os.listdir("train/" + expression))) + " " + expression + " images.")
img_size = 48
batch_size = 64
# Data generator to augment data for training
datagen_train = ImageDataGenerator(horizontal_flip=True)
train_generator = datagen_train.flow_from_directory(
"train/",
target_size=(img_size, img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode="categorical",
shuffle=True,
)
# Data generator to augment data for validation
datagen_validation = ImageDataGenerator(horizontal_flip=True)
validation_generator = datagen_train.flow_from_directory(
"test/",
target_size=(img_size, img_size),
color_mode="grayscale",
batch_size=batch_size,
class_mode="categorical",
shuffle=False,
)
model = Sequential()
# Conv Block 1
model.add(Conv2D(64, (3, 3), padding="same", input_shape=(48, 48, 1)))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Conv Block 2
model.add(Conv2D(128, (5, 5), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# Conv Block 3
model.add(Conv2D(512, (3, 3), padding="same"))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# # Conv Block 3
# model.add(Conv2D(512,(3,3), padding='same'))
# model.add(BatchNormalization())
# model.add(Activation('relu'))
# model.add(MaxPooling2D(pool_size=(2,2)))
# model.add(Dropout(0.25))
model.add(Flatten())
# Fully connected Block 1
model.add(Dense(256))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.25))
# Fully connected Block 2
model.add(Dense(512))
model.add(BatchNormalization())
model.add(Activation("relu"))
model.add(Dropout(0.25))
model.add(Dense(7, activation="softmax"))
opt = Adam(lr=0.0005)
model.compile(optimizer=opt, loss="categorical_crossentropy", metrics=["accuracy"])
model.summary()
epochs = 50
steps_per_epoch = train_generator.n // train_generator.batch_size
validation_steps = validation_generator.n // validation_generator.batch_size
checkpoint = ModelCheckpoint(
"model_weights.h5",
monitor="val_accuracy",
save_weights_only=True,
mode="max",
verbose=1,
)
reduce_lr = ReduceLROnPlateau(
monitor="val_loss", factor=0.1, patience=2, min_lr=0.00001, model="auto"
)
callbacks = [checkpoint, reduce_lr]
history = model.fit(
x=train_generator,
steps_per_epoch=steps_per_epoch,
epochs=epochs,
validation_data=validation_generator,
validation_steps=validation_steps,
callbacks=callbacks,
)
# file_path = os.path.join(
# os.getcwd(), 'AI_Folder', 'Models/model_architecture.json')
# json_file = open(file_path, 'r')
# loaded_model_json = json_file.read()
# json_file.close()
# emotion_model = model_from_json(loaded_model_json)
model_file_path = os.path.join(os.getcwd(), "model_weights.h5")
model.load_weights(model_file_path)
model.compile(
loss="categorical_crossentropy", optimizer=Adam(lr=0.0005), metrics=["accuracy"]
)
test_dataset_file_path = os.path.join(os.getcwd(), "test")
scores = model.evaluate_generator(validation_generator)
print("Test accuracy:", scores[1])
| false | 0 | 1,404 | 1 | 1,570 | 1,404 |
||
129851720
|
# **Google Data Analytics Capstone:Bellabeat Case Study**
# **About Bellabeat**
# Bellabeat is a high-tech company that manufactures health-focused smart products. Collecting data on activity, sleep, stress, and reproductive health has allowed Bellabeat to empower women with knowledge about their own health and habits. Since it was founded in 2013, Bellabeat has grown rapidly and quickly positioned itself as a tech-driven wellness company for women.
# **Products**
# ○ Bellabeat app: The Bellabeat app provides users with health data related to their activity, sleep, stress,menstrual cycle, and mindfulness habits. This data can help users better understand their current habits and make healthy decisions. The Bellabeat app connects to their line of smart wellness products.
# ○ Leaf: Bellabeat’s classic wellness tracker can be worn as a bracelet, necklace, or clip. The Leaf tracker connects to the Bellabeat app to track activity, sleep, and stress.
# ○ Time: This wellness watch combines the timeless look of a classic timepiece with smart technology to track user activity, sleep, and stress. The Time watch connects to the Bellabeat app to provide you with insights into your daily wellness.
# ○ Spring: This is a water bottle that tracks daily water intake using smart technology to ensure that you are appropriately hydrated throughout the day. The Spring bottle connects to the Bellabeat app to track your hydration levels.
# ○ Bellabeat membership: Bellabeat also offers a subscription-based membership program for users.
# Membership gives users 24/7 access to fully personalized guidance on nutrition, activity, sleep, health and beauty, and mindfulness based on their lifestyle and goals.
# # 1.ASK
# **Business Task**
# * Analyze smart device usage data in order to gain insight into how consumers use non-Bellabeat smart devices.
# * Identify potential opportunities for growth and recommendations for the Bellabeat marketing strategy improvement based on trends in smart device usage.
# In this case study, I will focus on Bellabeat App for recommendations.
# **Key Stakeholders**
# * Urška Sršen: Cofounder and Chief Creative Officer
# * Sando Mur: Cofounder and Mathematician
# * Bellabeat Marketing Analytics team
# # 2.PREPARE
# **2.1 Dataset**
# * The FitBit Fitness Tracker Data (CC0: Public Domain, dataset made available through Mobius) is a Kaggle data set that contains personal fitness tracker from thirty fitbit users.
# * Thirty eligible Fitbit users consented to the submission of personal tracker data, including minute-level output for physical activity, heart rate, and sleep monitoring.
# * It includes information about daily activity, steps, and heart rate that can be used to explore users’ habits.
# * The data is available as 18 csv files in long format.
# **2.2 Limitations of Data**
# * Due to the limitation of size (30 users) and not having any demographic information such as age,gender and health condition,we could encounter a sampling bias. We are not sure if the sample is representative of the population as a whole.
# * Another problem we would encounter is that the dataset is not current and also the time limitation of the survey (4.12.2016 - 5.12.2016).
# # 3.PROCESS
# **3.1 Data Cleaning**
# Knowing the datasets we have, we will upload the datasets that will help us answer our business task. On our analysis we will focus on the following datasets:
# * dailyActivity_merged
# * sleepDay_merged
# * hourlySteps_merged
# Due to the the small sample we won't consider Weight (8 Users) and heart rate (6 users)for this analysis.
# A copy of the unedited file was saved as a backup prior to cleaning.Data was then checked for null values, mistyped values, extra spaces, duplicates, truncated data, misleading column headings, and duplicates. All dates were standardized and formatted to MM/DD/YY.Pivot tables,Conditional Formating,Remove Duplicates and Trim whitespace in Google sheets were used for this step.
# Next we will focus our analysis in R due to the accessibility, amount of data and to be able to create data visualization to share the results with stakeholders.We will choose the packages that will help us on our analysis and open them. We will use the following packages for our analysis:
library(tidyverse)
library(lubridate)
library(dplyr)
library(ggplot2)
library(tidyr)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/851/129851720.ipynb
| null | null |
[{"Id": 129851720, "ScriptId": 38610027, "ParentScriptVersionId": NaN, "ScriptLanguageId": 12, "AuthorUserId": 13810880, "CreationDate": "05/17/2023 01:06:55", "VersionNumber": 2.0, "Title": "Bellabeat Case Study-2023", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 70.0, "LinesInsertedFromPrevious": 19.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 51.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# **Google Data Analytics Capstone:Bellabeat Case Study**
# **About Bellabeat**
# Bellabeat is a high-tech company that manufactures health-focused smart products. Collecting data on activity, sleep, stress, and reproductive health has allowed Bellabeat to empower women with knowledge about their own health and habits. Since it was founded in 2013, Bellabeat has grown rapidly and quickly positioned itself as a tech-driven wellness company for women.
# **Products**
# ○ Bellabeat app: The Bellabeat app provides users with health data related to their activity, sleep, stress,menstrual cycle, and mindfulness habits. This data can help users better understand their current habits and make healthy decisions. The Bellabeat app connects to their line of smart wellness products.
# ○ Leaf: Bellabeat’s classic wellness tracker can be worn as a bracelet, necklace, or clip. The Leaf tracker connects to the Bellabeat app to track activity, sleep, and stress.
# ○ Time: This wellness watch combines the timeless look of a classic timepiece with smart technology to track user activity, sleep, and stress. The Time watch connects to the Bellabeat app to provide you with insights into your daily wellness.
# ○ Spring: This is a water bottle that tracks daily water intake using smart technology to ensure that you are appropriately hydrated throughout the day. The Spring bottle connects to the Bellabeat app to track your hydration levels.
# ○ Bellabeat membership: Bellabeat also offers a subscription-based membership program for users.
# Membership gives users 24/7 access to fully personalized guidance on nutrition, activity, sleep, health and beauty, and mindfulness based on their lifestyle and goals.
# # 1.ASK
# **Business Task**
# * Analyze smart device usage data in order to gain insight into how consumers use non-Bellabeat smart devices.
# * Identify potential opportunities for growth and recommendations for the Bellabeat marketing strategy improvement based on trends in smart device usage.
# In this case study, I will focus on Bellabeat App for recommendations.
# **Key Stakeholders**
# * Urška Sršen: Cofounder and Chief Creative Officer
# * Sando Mur: Cofounder and Mathematician
# * Bellabeat Marketing Analytics team
# # 2.PREPARE
# **2.1 Dataset**
# * The FitBit Fitness Tracker Data (CC0: Public Domain, dataset made available through Mobius) is a Kaggle data set that contains personal fitness tracker from thirty fitbit users.
# * Thirty eligible Fitbit users consented to the submission of personal tracker data, including minute-level output for physical activity, heart rate, and sleep monitoring.
# * It includes information about daily activity, steps, and heart rate that can be used to explore users’ habits.
# * The data is available as 18 csv files in long format.
# **2.2 Limitations of Data**
# * Due to the limitation of size (30 users) and not having any demographic information such as age,gender and health condition,we could encounter a sampling bias. We are not sure if the sample is representative of the population as a whole.
# * Another problem we would encounter is that the dataset is not current and also the time limitation of the survey (4.12.2016 - 5.12.2016).
# # 3.PROCESS
# **3.1 Data Cleaning**
# Knowing the datasets we have, we will upload the datasets that will help us answer our business task. On our analysis we will focus on the following datasets:
# * dailyActivity_merged
# * sleepDay_merged
# * hourlySteps_merged
# Due to the the small sample we won't consider Weight (8 Users) and heart rate (6 users)for this analysis.
# A copy of the unedited file was saved as a backup prior to cleaning.Data was then checked for null values, mistyped values, extra spaces, duplicates, truncated data, misleading column headings, and duplicates. All dates were standardized and formatted to MM/DD/YY.Pivot tables,Conditional Formating,Remove Duplicates and Trim whitespace in Google sheets were used for this step.
# Next we will focus our analysis in R due to the accessibility, amount of data and to be able to create data visualization to share the results with stakeholders.We will choose the packages that will help us on our analysis and open them. We will use the following packages for our analysis:
library(tidyverse)
library(lubridate)
library(dplyr)
library(ggplot2)
library(tidyr)
| false | 0 | 1,094 | 0 | 1,094 | 1,094 |
||
129851924
|
# ####
# # EDA - Hospedagens do AIRBNB para a cidade de Amsterdã
# * ### Autor: Alexandre Bastos
# * ### Esta análise visa responder algumas perguntas a partir da análise de dados disponibilizados pelo próprio AirBnb em seu site: http://insideairbnb.com/get-the-data/.
# ### Entre os questionamentos teríamos:
# ## Em quantos bairros é possível se hospedar pelo Airbnb em Amsterdã?
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Leitura dos dados para Dataframes do Pandas
calen = pd.read_csv("/kaggle/input/airbnb-amsterdam/calendar.csv")
listi = pd.read_csv("/kaggle/input/airbnb-amsterdam/listings.csv")
revie = pd.read_csv("/kaggle/input/airbnb-amsterdam/reviews.csv")
# ## Verificando os dados existentes
# ### Tabela com lista de imóveis disponíveis na base de dados (listings.csv)
listi.columns
listi.count()
listi.head(3)
listi.neighbourhood[0:10]
listi.neighbourhood_cleansed[0:10]
listi.describe()
# * ### Número de Bairros contemplados pelo Airbnb em Amsterdã:
listi.neighbourhood_cleansed.nunique(dropna=True)
# ## **Resposta:** Logo, a resposta à primeira pergunta indica que existem 22 bairros com hospedagens disponíveis na cidade de Amsterdã.
# ## Quais os bairros da cidade mais ofertam reservas?
# ### Lista em ordem decrescente dos bairros com maior disponibilidade de espaços para locação do Airbnb em Amsterdã:
Conta_Hosped = (
listi.loc[:, ["neighbourhood_cleansed", "id"]]
.groupby("neighbourhood_cleansed")
.nunique()
.sort_values(["id", "neighbourhood_cleansed"], ascending=False)
.head(10)
)
pd.to_numeric(Conta_Hosped.id, errors="raise")
Conta_Hosped
# Conta_Hosped = Conta_Hosped.sort_values(by =['id'], ascending = True)
ax = Conta_Hosped.plot.barh(color="green", rot=0)
ax.set_title("Quantidade de Hospedagens por Bairro", fontsize=16, pad=20)
ax.set_xlabel("Número de Hospedagens", fontsize=14, labelpad=10)
ax.set_ylabel("Bairros", fontsize=14, labelpad=10)
for container in ax.containers:
ax.bar_label(container)
# * # Qual o Bairro com as melhores avaliações de hospedagem?
# #### Para isto analisaremos a tabela 'listi' que contém as avaliações por bairro, tirando a média de todas as avaliações percebidas
Melhor_Aval = pd.DataFrame(
listi.groupby("neighbourhood_cleansed")["review_scores_value"]
.mean()
.sort_values(ascending=False)
)
Melhor_Aval
ax = Melhor_Aval.plot.line(color="darkblue", rot=0)
ax.set_title("Média das Avaliações por Bairro", fontsize=16, pad=20)
ax.set_xlabel("Bairros", fontsize=14, labelpad=10)
ax.set_ylabel("Média das Avaliações", fontsize=14, labelpad=10)
for container in ax.containers:
ax.bar_label(container)
# ## VIsualização de Dados em Mapas
# ### Uso da biblioteca "folium"
Teste_Localizar = listi.loc[0:10, ["latitude", "longitude"]]
Teste_Localizar
import folium
mapa = folium.Map(location=[52.364350, 4.943580])
folium.Marker(location=[52.364350, 4.943580], popup="XYZ").add_to(mapa)
mapa
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/851/129851924.ipynb
| null | null |
[{"Id": 129851924, "ScriptId": 38437703, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 696377, "CreationDate": "05/17/2023 01:10:17", "VersionNumber": 9.0, "Title": "amb_AirBnb_Amsterdam", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 100.0, "LinesInsertedFromPrevious": 17.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 83.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ####
# # EDA - Hospedagens do AIRBNB para a cidade de Amsterdã
# * ### Autor: Alexandre Bastos
# * ### Esta análise visa responder algumas perguntas a partir da análise de dados disponibilizados pelo próprio AirBnb em seu site: http://insideairbnb.com/get-the-data/.
# ### Entre os questionamentos teríamos:
# ## Em quantos bairros é possível se hospedar pelo Airbnb em Amsterdã?
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Leitura dos dados para Dataframes do Pandas
calen = pd.read_csv("/kaggle/input/airbnb-amsterdam/calendar.csv")
listi = pd.read_csv("/kaggle/input/airbnb-amsterdam/listings.csv")
revie = pd.read_csv("/kaggle/input/airbnb-amsterdam/reviews.csv")
# ## Verificando os dados existentes
# ### Tabela com lista de imóveis disponíveis na base de dados (listings.csv)
listi.columns
listi.count()
listi.head(3)
listi.neighbourhood[0:10]
listi.neighbourhood_cleansed[0:10]
listi.describe()
# * ### Número de Bairros contemplados pelo Airbnb em Amsterdã:
listi.neighbourhood_cleansed.nunique(dropna=True)
# ## **Resposta:** Logo, a resposta à primeira pergunta indica que existem 22 bairros com hospedagens disponíveis na cidade de Amsterdã.
# ## Quais os bairros da cidade mais ofertam reservas?
# ### Lista em ordem decrescente dos bairros com maior disponibilidade de espaços para locação do Airbnb em Amsterdã:
Conta_Hosped = (
listi.loc[:, ["neighbourhood_cleansed", "id"]]
.groupby("neighbourhood_cleansed")
.nunique()
.sort_values(["id", "neighbourhood_cleansed"], ascending=False)
.head(10)
)
pd.to_numeric(Conta_Hosped.id, errors="raise")
Conta_Hosped
# Conta_Hosped = Conta_Hosped.sort_values(by =['id'], ascending = True)
ax = Conta_Hosped.plot.barh(color="green", rot=0)
ax.set_title("Quantidade de Hospedagens por Bairro", fontsize=16, pad=20)
ax.set_xlabel("Número de Hospedagens", fontsize=14, labelpad=10)
ax.set_ylabel("Bairros", fontsize=14, labelpad=10)
for container in ax.containers:
ax.bar_label(container)
# * # Qual o Bairro com as melhores avaliações de hospedagem?
# #### Para isto analisaremos a tabela 'listi' que contém as avaliações por bairro, tirando a média de todas as avaliações percebidas
Melhor_Aval = pd.DataFrame(
listi.groupby("neighbourhood_cleansed")["review_scores_value"]
.mean()
.sort_values(ascending=False)
)
Melhor_Aval
ax = Melhor_Aval.plot.line(color="darkblue", rot=0)
ax.set_title("Média das Avaliações por Bairro", fontsize=16, pad=20)
ax.set_xlabel("Bairros", fontsize=14, labelpad=10)
ax.set_ylabel("Média das Avaliações", fontsize=14, labelpad=10)
for container in ax.containers:
ax.bar_label(container)
# ## VIsualização de Dados em Mapas
# ### Uso da biblioteca "folium"
Teste_Localizar = listi.loc[0:10, ["latitude", "longitude"]]
Teste_Localizar
import folium
mapa = folium.Map(location=[52.364350, 4.943580])
folium.Marker(location=[52.364350, 4.943580], popup="XYZ").add_to(mapa)
mapa
| false | 0 | 1,252 | 0 | 1,252 | 1,252 |
||
129851596
|
<jupyter_start><jupyter_text>Tennis Weather
Kaggle dataset identifier: tennis-weather
<jupyter_script>import pandas as pd
import numpy as np
data = pd.read_csv("/kaggle/input/tennis-weather/tennis.csv")
data
# # Internal working of Naive Bayes Algorithm
# for any specific case 1
# outlook = sunny , temp = hot , humidity = high , wind = weak
# play or no play?
# as we know Naive Bayes would use formula for this specific case as
# **P(yes/sunny, hot, high, weak) = P(sunny/yes)*P(hot/yes)*P(high/yes)*P(weak/yes)*P(yes)**
# **P(no/sunny, hot, high, weak) = P(sunny/no)*P(hot/no)*P(high/no)*P(weak/no)*P(no)**
# out of two which ever case has higher probability will be selected by Naive Bayes. **This is called Posteriori Rule.**
# what if the problem changes?
# outlook = overcast , temp = cold , humidity = low , wind = weak
# play or no play?
# **P(yes/overcast , cold , low , weak) = P(overcast/yes)*P(cold/yes)*P(low/yes)*P(weak/yes)*P(yes)**
# **P(no/overcast , cold , low , weak) = P(overcast/no)*P(cold/no)*P(low/no)*P(weak/no)*P(no)**
# again, out of two which ever case has higher probability will be selected by Naive Bayes
# In the training stage , Naive bayes creates a outlook table which it stores in a Dictionary. This outlook table contains all the
# possible probabilities from the data
# In the testing stage, when a specific input comes, it only looks for each probability, applies in the formula and gives the output.
data["play"].value_counts()
# probabilty of yes and no
Pyes = 9 / 14
Pno = 5 / 14
print(Pyes)
print(Pno)
# lets create a outlook table, finding probabilities in each case.
# column = outlook
pd.crosstab(data["outlook"], data["play"])
# probabilities
PsunnyNo = 3 / 5
PrainyNo = 2 / 5
PovercastNo = 0
PsunnyYes = 2 / 9
PrainyYes = 3 / 9
PovercastYes = 4 / 9
# column = temp
pd.crosstab(data["temp"], data["play"])
# probabilities
PcoolNo = 1 / 5
PhotNo = 2 / 5
PmildNo = 2 / 5
PcoolYes = 3 / 9
PhotYes = 2 / 9
PmildYes = 4 / 9
# column = humidity
pd.crosstab(data["humidity"], data["play"])
# probabilities
PhighNo = 4 / 5
PnormalNo = 1 / 5
PhighYes = 3 / 9
PnormalYes = 6 / 9
# column = windy
# true = strong wind , false = weak wind
pd.crosstab(data["windy"], data["play"])
# probabilities
PweakNo = 2 / 5
PstrongNo = 3 / 5
PweakYes = 6 / 9
PstrongYes = 3 / 9
# calculating the probabilty of yes for outlook = sunny , temp = hot , humidity = high , wind = weak
# P(yes/sunny, hot, high, weak) = P(sunny/yes)*P(hot/yes)*P(high/yes)*P(weak/yes)*P(yes)
Prob_yes = PsunnyYes * PhotYes * PhighYes * PweakYes * Pyes
print(Prob_yes)
Prob_no = PsunnyNo * PhotNo * PhighNo * PweakNo * Pno
print(Prob_no)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/851/129851596.ipynb
|
tennis-weather
|
pranavpandey2511
|
[{"Id": 129851596, "ScriptId": 38620660, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13615539, "CreationDate": "05/17/2023 01:04:54", "VersionNumber": 1.0, "Title": "Naive Bayes working without sklearn libraries :)", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 117.0, "LinesInsertedFromPrevious": 117.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186241616, "KernelVersionId": 129851596, "SourceDatasetVersionId": 113400}]
|
[{"Id": 113400, "DatasetId": 58414, "DatasourceVersionId": 123839, "CreatorUserId": 926694, "LicenseName": "Unknown", "CreationDate": "10/02/2018 17:27:58", "VersionNumber": 1.0, "Title": "Tennis Weather", "Slug": "tennis-weather", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 423.0, "TotalUncompressedBytes": 423.0}]
|
[{"Id": 58414, "CreatorUserId": 926694, "OwnerUserId": 926694.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 113400.0, "CurrentDatasourceVersionId": 123839.0, "ForumId": 67259, "Type": 2, "CreationDate": "10/02/2018 17:27:58", "LastActivityDate": "10/02/2018", "TotalViews": 13345, "TotalDownloads": 3097, "TotalVotes": 10, "TotalKernels": 8}]
|
[{"Id": 926694, "UserName": "pranavpandey2511", "DisplayName": "Pranav Pandey", "RegisterDate": "02/23/2017", "PerformanceTier": 0}]
|
import pandas as pd
import numpy as np
data = pd.read_csv("/kaggle/input/tennis-weather/tennis.csv")
data
# # Internal working of Naive Bayes Algorithm
# for any specific case 1
# outlook = sunny , temp = hot , humidity = high , wind = weak
# play or no play?
# as we know Naive Bayes would use formula for this specific case as
# **P(yes/sunny, hot, high, weak) = P(sunny/yes)*P(hot/yes)*P(high/yes)*P(weak/yes)*P(yes)**
# **P(no/sunny, hot, high, weak) = P(sunny/no)*P(hot/no)*P(high/no)*P(weak/no)*P(no)**
# out of two which ever case has higher probability will be selected by Naive Bayes. **This is called Posteriori Rule.**
# what if the problem changes?
# outlook = overcast , temp = cold , humidity = low , wind = weak
# play or no play?
# **P(yes/overcast , cold , low , weak) = P(overcast/yes)*P(cold/yes)*P(low/yes)*P(weak/yes)*P(yes)**
# **P(no/overcast , cold , low , weak) = P(overcast/no)*P(cold/no)*P(low/no)*P(weak/no)*P(no)**
# again, out of two which ever case has higher probability will be selected by Naive Bayes
# In the training stage , Naive bayes creates a outlook table which it stores in a Dictionary. This outlook table contains all the
# possible probabilities from the data
# In the testing stage, when a specific input comes, it only looks for each probability, applies in the formula and gives the output.
data["play"].value_counts()
# probabilty of yes and no
Pyes = 9 / 14
Pno = 5 / 14
print(Pyes)
print(Pno)
# lets create a outlook table, finding probabilities in each case.
# column = outlook
pd.crosstab(data["outlook"], data["play"])
# probabilities
PsunnyNo = 3 / 5
PrainyNo = 2 / 5
PovercastNo = 0
PsunnyYes = 2 / 9
PrainyYes = 3 / 9
PovercastYes = 4 / 9
# column = temp
pd.crosstab(data["temp"], data["play"])
# probabilities
PcoolNo = 1 / 5
PhotNo = 2 / 5
PmildNo = 2 / 5
PcoolYes = 3 / 9
PhotYes = 2 / 9
PmildYes = 4 / 9
# column = humidity
pd.crosstab(data["humidity"], data["play"])
# probabilities
PhighNo = 4 / 5
PnormalNo = 1 / 5
PhighYes = 3 / 9
PnormalYes = 6 / 9
# column = windy
# true = strong wind , false = weak wind
pd.crosstab(data["windy"], data["play"])
# probabilities
PweakNo = 2 / 5
PstrongNo = 3 / 5
PweakYes = 6 / 9
PstrongYes = 3 / 9
# calculating the probabilty of yes for outlook = sunny , temp = hot , humidity = high , wind = weak
# P(yes/sunny, hot, high, weak) = P(sunny/yes)*P(hot/yes)*P(high/yes)*P(weak/yes)*P(yes)
Prob_yes = PsunnyYes * PhotYes * PhighYes * PweakYes * Pyes
print(Prob_yes)
Prob_no = PsunnyNo * PhotNo * PhighNo * PweakNo * Pno
print(Prob_no)
| false | 1 | 948 | 1 | 971 | 948 |
||
129826477
|
# Quick EDA
import pandas as pd
import numpy as np
from skimpy import skim
import matplotlib.pyplot as plt
import seaborn as sns
diamonds = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv")
diamonds.head(20)
diamonds.info()
skim(diamonds)
diamonds.describe()
diamonds.hist(bins=50, figsize=(20, 15))
diamonds.isnull().sum()
diamonds["carat"].plot(kind="hist")
diamonds["price"].plot(kind="hist")
diamonds["cut"].value_counts()
diamonds["color"].value_counts()
diamonds["clarity"].value_counts()
# Visualization
plt.scatter(x=diamonds["carat"], y=diamonds["price"], alpha=0.3)
plt.xlabel("Carat")
plt.ylabel("Price")
plt.title("Scatter plot between Carat and price")
sns.boxplot(x="cut", y="price", data=diamonds)
for cut in diamonds["cut"].unique():
plt.hist(diamonds[diamonds["cut"] == cut]["price"], bins=20, alpha=0.5, label=cut)
plt.xlabel("Price")
plt.ylabel("Count")
plt.title("Histogram of Price for Each Cut")
plt.legend()
plt.show()
num_data = diamonds[["price", "x", "y", "z", "carat"]]
sns.pairplot(num_data)
plt.title("Pairplot of All Numeric Columns")
plt.show()
# Feature Engineering
corr_matrix = diamonds.corr()
corr_matrix["price"].sort_values(ascending=False)
num_cols = diamonds.select_dtypes(include=np.number)
corr = num_cols.corr()
sns.heatmap(corr, cmap="YlGnBu", annot=False)
plt.title("Heatmap of Correlation Matrix")
plt.show()
# Prepare the Data for Machine Learning Algorithm
# Data Cleaning
diamonds_predictors = diamonds.drop("price", axis=1)
diamonds_labels = diamonds["price"].copy()
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
cut_clarity = diamonds[["cut", "clarity"]]
color = diamonds[["color"]]
one_hot = OneHotEncoder()
ordinal_encoder = OrdinalEncoder()
diamond_encode = ordinal_encoder.fit_transform(cut_clarity)
hot_1_encode = one_hot.fit_transform(color)
diamond_encode
hot_1_encode.toarray()
diamonds.columns.get_loc("table")
rows_with_zero_xyz = diamonds.loc[
(diamonds["x"] == 0) | (diamonds["y"] == 0) | (diamonds["z"] == 0)
]
rows_with_zero_xyz
diamonds.head()
diamonds_num = diamonds_predictors.drop(["color", "clarity", "cut", "Id"], axis=1)
diamonds_num = diamonds_num[
(diamonds_num["x"] != 0) & (diamonds_num["y"] != 0) & (diamonds_num["z"] != 0)
]
diamonds_num
diamonds_num.columns.get_loc("carat")
# a custom transformer class called DiamondFeatureTransformer that adds two additional features, volume and surface_area, to a dataset. These features are calculated based on specific columns of the dataset. The transform method performs the calculations and returns the transformed dataset with the added features. An instance of DiamondFeatureTransformer is created, and its transform method is called on a dataset called diamonds_num, resulting in a transformed dataset stored in diamond_extra_attribs. The shape of diamond_extra_attribs is printed.
carat_ix, table_ix, price_ix, x_ix, y_ix, z_ix = 0, 2, 3, 3, 4, 5
from sklearn.base import BaseEstimator, TransformerMixin
class DiamondFeatureTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = X.copy()
# price_per_carat = X_transformed.iloc[:, price_ix] / X_transformed.iloc[:, carat_ix]
volume = (
X_transformed.iloc[:, x_ix]
* X_transformed.iloc[:, y_ix]
* X_transformed.iloc[:, z_ix]
) * (1 / 3)
surface_area = 2 * (
X_transformed.iloc[:, x_ix] * X_transformed.iloc[:, y_ix]
+ X_transformed.iloc[:, x_ix] * X_transformed.iloc[:, z_ix]
+ X_transformed.iloc[:, y_ix] * X_transformed.iloc[:, z_ix]
)
# length_to_width = X_transformed.iloc[:,x_ix] / X_transformed.iloc[:,y_ix]
return np.c_[X, volume, surface_area]
attr_adder = DiamondFeatureTransformer()
diamond_extra_attribs = attr_adder.transform(diamonds_num)
diamond_extra_attribs.shape
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline(
[
("attribs_adder", DiamondFeatureTransformer()),
("std_scaler", StandardScaler()),
]
)
diamonds_num_tr = num_pipeline.fit_transform(diamonds_num)
diamonds_num_tr.shape
from sklearn.compose import ColumnTransformer
num_attribs = list(diamonds_num)
cut_clarity_attribs = list(diamonds_predictors[["cut", "clarity"]])
color_attrib = ["color"]
one_hot = OneHotEncoder()
ordinal_encoder = OrdinalEncoder()
full_pipeline = ColumnTransformer(
[
("num", num_pipeline, num_attribs),
("color_cat", OneHotEncoder(), color_attrib),
("cut_clarity_cat", OrdinalEncoder(), cut_clarity_attribs),
]
)
df_prepared = full_pipeline.fit_transform(diamonds_predictors.drop("Id", axis=1))
df_prepared
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(df_prepared, diamonds_labels)
from sklearn.metrics import mean_squared_error
diamond_predictions = lin_reg.predict(df_prepared)
lin_mse = mean_squared_error(diamonds_labels, diamond_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
from sklearn.model_selection import cross_val_score
lin_scores = cross_val_score(
lin_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(df_prepared, diamonds_labels)
diamonds_predictions = tree_reg.predict(df_prepared)
tree_mse = mean_squared_error(diamonds_labels, diamonds_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
tree_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
tree_rmse_scores = np.sqrt(-scores)
display_scores(tree_rmse_scores)
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(df_prepared, diamonds_labels)
diamonds_predictions = forest_reg.predict(df_prepared)
forest_mse = mean_squared_error(diamonds_labels, diamonds_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
forest_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
forest_rmse_scores = np.sqrt(-scores)
display_scores(forest_rmse_scores)
test = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv")
prerdictions = full_pipeline.transform(test)
final_predictions = forest_reg.predict(prerdictions)
final_predictions
submission = test[["Id"]]
submission
submission["price"] = final_predictions
submission
submission.describe()
submission.to_csv("submission2.csv", index=None)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/826/129826477.ipynb
| null | null |
[{"Id": 129826477, "ScriptId": 38410433, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6888384, "CreationDate": "05/16/2023 18:51:24", "VersionNumber": 1.0, "Title": "Diamond_price_prediction", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 223.0, "LinesInsertedFromPrevious": 223.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Quick EDA
import pandas as pd
import numpy as np
from skimpy import skim
import matplotlib.pyplot as plt
import seaborn as sns
diamonds = pd.read_csv("/kaggle/input/diamond-price-prediction/train.csv")
diamonds.head(20)
diamonds.info()
skim(diamonds)
diamonds.describe()
diamonds.hist(bins=50, figsize=(20, 15))
diamonds.isnull().sum()
diamonds["carat"].plot(kind="hist")
diamonds["price"].plot(kind="hist")
diamonds["cut"].value_counts()
diamonds["color"].value_counts()
diamonds["clarity"].value_counts()
# Visualization
plt.scatter(x=diamonds["carat"], y=diamonds["price"], alpha=0.3)
plt.xlabel("Carat")
plt.ylabel("Price")
plt.title("Scatter plot between Carat and price")
sns.boxplot(x="cut", y="price", data=diamonds)
for cut in diamonds["cut"].unique():
plt.hist(diamonds[diamonds["cut"] == cut]["price"], bins=20, alpha=0.5, label=cut)
plt.xlabel("Price")
plt.ylabel("Count")
plt.title("Histogram of Price for Each Cut")
plt.legend()
plt.show()
num_data = diamonds[["price", "x", "y", "z", "carat"]]
sns.pairplot(num_data)
plt.title("Pairplot of All Numeric Columns")
plt.show()
# Feature Engineering
corr_matrix = diamonds.corr()
corr_matrix["price"].sort_values(ascending=False)
num_cols = diamonds.select_dtypes(include=np.number)
corr = num_cols.corr()
sns.heatmap(corr, cmap="YlGnBu", annot=False)
plt.title("Heatmap of Correlation Matrix")
plt.show()
# Prepare the Data for Machine Learning Algorithm
# Data Cleaning
diamonds_predictors = diamonds.drop("price", axis=1)
diamonds_labels = diamonds["price"].copy()
from sklearn.preprocessing import OrdinalEncoder, OneHotEncoder
cut_clarity = diamonds[["cut", "clarity"]]
color = diamonds[["color"]]
one_hot = OneHotEncoder()
ordinal_encoder = OrdinalEncoder()
diamond_encode = ordinal_encoder.fit_transform(cut_clarity)
hot_1_encode = one_hot.fit_transform(color)
diamond_encode
hot_1_encode.toarray()
diamonds.columns.get_loc("table")
rows_with_zero_xyz = diamonds.loc[
(diamonds["x"] == 0) | (diamonds["y"] == 0) | (diamonds["z"] == 0)
]
rows_with_zero_xyz
diamonds.head()
diamonds_num = diamonds_predictors.drop(["color", "clarity", "cut", "Id"], axis=1)
diamonds_num = diamonds_num[
(diamonds_num["x"] != 0) & (diamonds_num["y"] != 0) & (diamonds_num["z"] != 0)
]
diamonds_num
diamonds_num.columns.get_loc("carat")
# a custom transformer class called DiamondFeatureTransformer that adds two additional features, volume and surface_area, to a dataset. These features are calculated based on specific columns of the dataset. The transform method performs the calculations and returns the transformed dataset with the added features. An instance of DiamondFeatureTransformer is created, and its transform method is called on a dataset called diamonds_num, resulting in a transformed dataset stored in diamond_extra_attribs. The shape of diamond_extra_attribs is printed.
carat_ix, table_ix, price_ix, x_ix, y_ix, z_ix = 0, 2, 3, 3, 4, 5
from sklearn.base import BaseEstimator, TransformerMixin
class DiamondFeatureTransformer(BaseEstimator, TransformerMixin):
def __init__(self):
pass
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
X_transformed = X.copy()
# price_per_carat = X_transformed.iloc[:, price_ix] / X_transformed.iloc[:, carat_ix]
volume = (
X_transformed.iloc[:, x_ix]
* X_transformed.iloc[:, y_ix]
* X_transformed.iloc[:, z_ix]
) * (1 / 3)
surface_area = 2 * (
X_transformed.iloc[:, x_ix] * X_transformed.iloc[:, y_ix]
+ X_transformed.iloc[:, x_ix] * X_transformed.iloc[:, z_ix]
+ X_transformed.iloc[:, y_ix] * X_transformed.iloc[:, z_ix]
)
# length_to_width = X_transformed.iloc[:,x_ix] / X_transformed.iloc[:,y_ix]
return np.c_[X, volume, surface_area]
attr_adder = DiamondFeatureTransformer()
diamond_extra_attribs = attr_adder.transform(diamonds_num)
diamond_extra_attribs.shape
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
num_pipeline = Pipeline(
[
("attribs_adder", DiamondFeatureTransformer()),
("std_scaler", StandardScaler()),
]
)
diamonds_num_tr = num_pipeline.fit_transform(diamonds_num)
diamonds_num_tr.shape
from sklearn.compose import ColumnTransformer
num_attribs = list(diamonds_num)
cut_clarity_attribs = list(diamonds_predictors[["cut", "clarity"]])
color_attrib = ["color"]
one_hot = OneHotEncoder()
ordinal_encoder = OrdinalEncoder()
full_pipeline = ColumnTransformer(
[
("num", num_pipeline, num_attribs),
("color_cat", OneHotEncoder(), color_attrib),
("cut_clarity_cat", OrdinalEncoder(), cut_clarity_attribs),
]
)
df_prepared = full_pipeline.fit_transform(diamonds_predictors.drop("Id", axis=1))
df_prepared
from sklearn.metrics import mean_squared_error, r2_score, mean_absolute_error
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(df_prepared, diamonds_labels)
from sklearn.metrics import mean_squared_error
diamond_predictions = lin_reg.predict(df_prepared)
lin_mse = mean_squared_error(diamonds_labels, diamond_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_rmse
def display_scores(scores):
print("Scores:", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
from sklearn.model_selection import cross_val_score
lin_scores = cross_val_score(
lin_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
lin_rmse_scores = np.sqrt(-lin_scores)
display_scores(lin_rmse_scores)
from sklearn.tree import DecisionTreeRegressor
tree_reg = DecisionTreeRegressor()
tree_reg.fit(df_prepared, diamonds_labels)
diamonds_predictions = tree_reg.predict(df_prepared)
tree_mse = mean_squared_error(diamonds_labels, diamonds_predictions)
tree_rmse = np.sqrt(tree_mse)
tree_rmse
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
tree_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
tree_rmse_scores = np.sqrt(-scores)
display_scores(tree_rmse_scores)
from sklearn.ensemble import RandomForestRegressor
forest_reg = RandomForestRegressor()
forest_reg.fit(df_prepared, diamonds_labels)
diamonds_predictions = forest_reg.predict(df_prepared)
forest_mse = mean_squared_error(diamonds_labels, diamonds_predictions)
forest_rmse = np.sqrt(forest_mse)
forest_rmse
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
forest_reg, df_prepared, diamonds_labels, scoring="neg_mean_squared_error", cv=10
)
forest_rmse_scores = np.sqrt(-scores)
display_scores(forest_rmse_scores)
test = pd.read_csv("/kaggle/input/diamond-price-prediction/test.csv")
prerdictions = full_pipeline.transform(test)
final_predictions = forest_reg.predict(prerdictions)
final_predictions
submission = test[["Id"]]
submission
submission["price"] = final_predictions
submission
submission.describe()
submission.to_csv("submission2.csv", index=None)
| false | 0 | 2,253 | 0 | 2,253 | 2,253 |
||
129826552
|
<jupyter_start><jupyter_text>Human Faces (Object Detection)
A diverse compilation of human facial images encompassing various races, age groups, and profiles, with the aim of creating an unbiased dataset that includes coordinates of facial regions suitable for training object detection models.
Buy me a coffee: https://bmc.link/baghbidi
Kaggle dataset identifier: human-faces-object-detection
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
break
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from matplotlib import patches
import tensorflow as tf
from PIL import Image
from tensorflow.keras.applications import InceptionResNetV2
boxes = pd.read_csv("/kaggle/input/human-faces-object-detection/faces.csv")
boxes.set_index("image_name", inplace=True)
boxes.head()
images_dir = "/kaggle/input/human-faces-object-detection/images/"
image_name = "00001722.jpg"
box_x, box_y = boxes.loc[image_name].x0, boxes.loc[image_name].y0
box_width, box_height = (
boxes.loc[image_name].x1 - box_x,
boxes.loc[image_name].y1 - box_y,
)
image = cv.imread(images_dir + image_name)[
..., ::-1
] # cv.imread returns in brg format, not rgb
plt.imshow(image)
plt.gca().add_patch(
patches.Rectangle(
(box_x, box_y), box_width, box_height, edgecolor="g", facecolor="none"
)
)
# ## All images need to be reshaped to the same shape, so are boxes
reshaped_size = 256
max_number_of_faces = 6
boxes.x0 = (boxes.x0 / boxes.width * reshaped_size).astype(int)
boxes.x1 = (boxes.x1 / boxes.width * reshaped_size).astype(int)
boxes.y0 = (boxes.y0 / boxes.height * reshaped_size).astype(int)
boxes.y1 = (boxes.y1 / boxes.height * reshaped_size).astype(int)
boxes = boxes.drop(["width", "height"], axis=1)
box_x, box_y = boxes.loc[image_name].x0, boxes.loc[image_name].y0
box_width, box_height = (
boxes.loc[image_name].x1 - box_x,
boxes.loc[image_name].y1 - box_y,
)
plt.imshow(cv.resize(image, (reshaped_size, reshaped_size)))
plt.gca().add_patch(
patches.Rectangle(
(box_x, box_y), box_width, box_height, edgecolor="g", facecolor="none"
)
)
def load_image_and_boxes(path):
# loads image and box for this image from given path
# also normalizes image and box coords
image = cv.imread(path.decode("utf-8"))[..., ::-1]
image = cv.resize(image, (reshaped_size, reshaped_size)) / 255.0
label = boxes.loc[path.decode("utf-8").split("/")[-1]].values / 255.0
n_of_faces = label.shape[0] if len(label.shape) > 1 else 1
if n_of_faces > max_number_of_faces:
label = label[:max_number_of_faces].flatten()
else:
label = np.hstack(
[label.flatten(), np.zeros((max_number_of_faces - n_of_faces) * 4)]
)
label = np.hstack(
[[min(n_of_faces, max_number_of_faces) / max_number_of_faces], label]
) # number of faces in image
return [image.astype(np.float32), label.astype(np.float32)]
images_path = [os.path.join(images_dir, f) for f in os.listdir(images_dir)]
ds = tf.data.Dataset.from_tensor_slices(images_path)
ds = ds.map(
lambda x: tf.numpy_function(load_image_and_boxes, [x], [np.float32, np.float32])
)
for i in ds.take(1):
plt.imshow(i[0])
box = i[1][1:] * 255
plt.gca().add_patch(
patches.Rectangle(
(box[0], box[1]),
box[2] - box[0],
box[3] - box[1],
edgecolor="g",
facecolor="none",
)
)
# Create horizontal motion-blur kernel
kernel_size = 10
kernel = np.zeros((kernel_size, kernel_size, 3))
kernel[int((kernel_size - 1) / 2), :, :] = np.ones((kernel_size, 3)) / kernel_size
# Apply convolution to perform bluring
def blur_image(image):
blurred_array = np.zeros_like(image, dtype=np.float32)
for channel in range(3):
blurred_array[:, :, channel] = np.convolve(
image[:, :, channel].flatten(), kernel[:, :, channel].flatten(), mode="same"
).reshape(image.shape[:2])
return blurred_array
image = ds.take(1)
for i in image:
blurred_array = blur_image(np.array(i[0]))
plt.imshow(blurred_array)
# ## Model
resnet = InceptionResNetV2(
weights="imagenet",
include_top=False,
input_tensor=tf.keras.layers.Input(shape=(reshaped_size, reshaped_size, 3)),
)
model_top = resnet.output
model_top = tf.keras.layers.Flatten()(model_top)
model_top = tf.keras.layers.Dense(500, activation="relu")(model_top)
model_top = tf.keras.layers.Dense(250, activation="relu")(model_top)
model_top = tf.keras.layers.Dense(4 * max_number_of_faces + 1, activation="sigmoid")(
model_top
)
model = tf.keras.models.Model(inputs=resnet.input, outputs=model_top)
def iou(box_true, box_pred):
n_of_boxes = tf.round(box_true[:, 0] * max_number_of_faces)
bb1, bb2 = box_true[:, 1:], box_pred[:, 1:]
# Intersection over Union
score = 0
for i in range(max_number_of_faces):
x_left = tf.maximum(bb1[:, 4 * i], bb2[:, 4 * i])
y_top = tf.maximum(bb1[:, 4 * i + 1], bb2[:, 4 * i + 1])
x_right = tf.minimum(bb1[:, 4 * i + 2], bb2[:, 4 * i + 2])
y_bottom = tf.minimum(bb1[:, 4 * i + 3], bb2[:, 4 * i + 3])
intersection_area = tf.maximum(x_right - x_left, 0) * tf.maximum(
y_bottom - y_top, 0
)
bb1_area = (bb1[:, 4 * i + 2] - bb1[:, 4 * i]) * (
bb1[:, 4 * i + 3] - bb1[:, 4 * i + 1]
)
bb2_area = (bb2[:, 4 * i + 2] - bb2[:, 4 * i]) * (
bb2[:, 4 * i + 3] - bb2[:, 4 * i + 1]
)
score += (
intersection_area / (bb1_area + bb2_area - intersection_area)
) * tf.clip_by_value(
tf.sign(n_of_boxes - i), clip_value_min=0.0, clip_value_max=tf.float32.max
)
# n_of_boxes[n_of_boxes == 0] = 1
return tf.reduce_mean(
score
/ tf.clip_by_value(
n_of_boxes, clip_value_min=0.0, clip_value_max=tf.float32.max
)
)
# ### Firstly, freeze all pretrained layers and train only new one, that we added
for layer in model.layers[:-3]:
layer.trainble = False
model.compile(loss="mse", optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4))
history = model.fit(ds.batch(10).prefetch(1), epochs=10)
# ### Now unfreeze all layers an train model
for layer in model.layers:
layer.trainble = True
model.compile(
loss="mse", optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), metrics=[iou]
)
history = model.fit(ds.batch(10).prefetch(1), epochs=2)
def draw_image_with_box(image, box1, box2=None):
"""
draws image and box on it.
if function receives two boxes, meaning that one is true, and other is predicted,
it will label image with intersection over union score
"""
plt.imshow(image)
plt.axis("off")
n_of_boxes = int(tf.round(box1[0] * max_number_of_faces).numpy())
box1_coords = box1[1:] * 255
for i in range(n_of_boxes):
plt.gca().add_patch(
patches.Rectangle(
(box1_coords[4 * i], box1_coords[4 * i + 1]),
box1_coords[4 * i + 2] - box1_coords[4 * i],
box1_coords[4 * i + 3] - box1_coords[4 * i + 1],
edgecolor="g",
facecolor="none",
linewidth=2,
)
)
if box2 is not None:
box2_coords = box2[1:] * 255
n_of_boxes = int(tf.round(box2[0] * max_number_of_faces).numpy())
for i in range(n_of_boxes):
plt.gca().add_patch(
patches.Rectangle(
(box2_coords[4 * i], box2_coords[4 * i + 1]),
box2_coords[4 * i + 2] - box2_coords[4 * i],
box2_coords[4 * i + 3] - box2_coords[4 * i + 1],
edgecolor="r",
facecolor="none",
linewidth=2,
)
)
plt.title(
"Intersection over Union: "
+ str(iou(box1[np.newaxis, ...], box2[np.newaxis, ...]).numpy()),
fontsize=10,
)
n_examples = 10
images = next(iter(ds.shuffle(buffer_size=100).batch(n_examples)))
box_predicted = model(images[0])
plt.figure(figsize=(10, 30))
for i in range(n_examples):
plt.subplot(n_examples, 2, i + 1)
draw_image_with_box(images[0][i], images[1][i], box_predicted[i])
# print(iou(box_predicted[i][np.newaxis, ...], images[1][i][np.newaxis, ...]))
path = "/kaggle/input/nazar/nazar.jpg"
image = cv.imread(path)[..., ::-1]
image = cv.resize(image, (reshaped_size, reshaped_size)) / 255.0
image = image.reshape(1, reshaped_size, reshaped_size, 3)
box = model(image)
draw_image_with_box(image[0], box[0])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/826/129826552.ipynb
|
human-faces-object-detection
|
sbaghbidi
|
[{"Id": 129826552, "ScriptId": 38556904, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5477307, "CreationDate": "05/16/2023 18:52:27", "VersionNumber": 5.0, "Title": "face_recognition", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 204.0, "LinesInsertedFromPrevious": 112.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 92.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186205433, "KernelVersionId": 129826552, "SourceDatasetVersionId": 5377440}]
|
[{"Id": 5377440, "DatasetId": 3119215, "DatasourceVersionId": 5451066, "CreatorUserId": 2371623, "LicenseName": "CC0: Public Domain", "CreationDate": "04/12/2023 01:38:47", "VersionNumber": 1.0, "Title": "Human Faces (Object Detection)", "Slug": "human-faces-object-detection", "Subtitle": "A curated collection of human facial images for training object detection models", "Description": "A diverse compilation of human facial images encompassing various races, age groups, and profiles, with the aim of creating an unbiased dataset that includes coordinates of facial regions suitable for training object detection models.\n\nBuy me a coffee: https://bmc.link/baghbidi", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3119215, "CreatorUserId": 2371623, "OwnerUserId": 2371623.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5377440.0, "CurrentDatasourceVersionId": 5451066.0, "ForumId": 3182683, "Type": 2, "CreationDate": "04/12/2023 01:38:47", "LastActivityDate": "04/12/2023", "TotalViews": 16459, "TotalDownloads": 2207, "TotalVotes": 59, "TotalKernels": 12}]
|
[{"Id": 2371623, "UserName": "sbaghbidi", "DisplayName": "Saeid", "RegisterDate": "10/17/2018", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
break
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
from matplotlib import patches
import tensorflow as tf
from PIL import Image
from tensorflow.keras.applications import InceptionResNetV2
boxes = pd.read_csv("/kaggle/input/human-faces-object-detection/faces.csv")
boxes.set_index("image_name", inplace=True)
boxes.head()
images_dir = "/kaggle/input/human-faces-object-detection/images/"
image_name = "00001722.jpg"
box_x, box_y = boxes.loc[image_name].x0, boxes.loc[image_name].y0
box_width, box_height = (
boxes.loc[image_name].x1 - box_x,
boxes.loc[image_name].y1 - box_y,
)
image = cv.imread(images_dir + image_name)[
..., ::-1
] # cv.imread returns in brg format, not rgb
plt.imshow(image)
plt.gca().add_patch(
patches.Rectangle(
(box_x, box_y), box_width, box_height, edgecolor="g", facecolor="none"
)
)
# ## All images need to be reshaped to the same shape, so are boxes
reshaped_size = 256
max_number_of_faces = 6
boxes.x0 = (boxes.x0 / boxes.width * reshaped_size).astype(int)
boxes.x1 = (boxes.x1 / boxes.width * reshaped_size).astype(int)
boxes.y0 = (boxes.y0 / boxes.height * reshaped_size).astype(int)
boxes.y1 = (boxes.y1 / boxes.height * reshaped_size).astype(int)
boxes = boxes.drop(["width", "height"], axis=1)
box_x, box_y = boxes.loc[image_name].x0, boxes.loc[image_name].y0
box_width, box_height = (
boxes.loc[image_name].x1 - box_x,
boxes.loc[image_name].y1 - box_y,
)
plt.imshow(cv.resize(image, (reshaped_size, reshaped_size)))
plt.gca().add_patch(
patches.Rectangle(
(box_x, box_y), box_width, box_height, edgecolor="g", facecolor="none"
)
)
def load_image_and_boxes(path):
# loads image and box for this image from given path
# also normalizes image and box coords
image = cv.imread(path.decode("utf-8"))[..., ::-1]
image = cv.resize(image, (reshaped_size, reshaped_size)) / 255.0
label = boxes.loc[path.decode("utf-8").split("/")[-1]].values / 255.0
n_of_faces = label.shape[0] if len(label.shape) > 1 else 1
if n_of_faces > max_number_of_faces:
label = label[:max_number_of_faces].flatten()
else:
label = np.hstack(
[label.flatten(), np.zeros((max_number_of_faces - n_of_faces) * 4)]
)
label = np.hstack(
[[min(n_of_faces, max_number_of_faces) / max_number_of_faces], label]
) # number of faces in image
return [image.astype(np.float32), label.astype(np.float32)]
images_path = [os.path.join(images_dir, f) for f in os.listdir(images_dir)]
ds = tf.data.Dataset.from_tensor_slices(images_path)
ds = ds.map(
lambda x: tf.numpy_function(load_image_and_boxes, [x], [np.float32, np.float32])
)
for i in ds.take(1):
plt.imshow(i[0])
box = i[1][1:] * 255
plt.gca().add_patch(
patches.Rectangle(
(box[0], box[1]),
box[2] - box[0],
box[3] - box[1],
edgecolor="g",
facecolor="none",
)
)
# Create horizontal motion-blur kernel
kernel_size = 10
kernel = np.zeros((kernel_size, kernel_size, 3))
kernel[int((kernel_size - 1) / 2), :, :] = np.ones((kernel_size, 3)) / kernel_size
# Apply convolution to perform bluring
def blur_image(image):
blurred_array = np.zeros_like(image, dtype=np.float32)
for channel in range(3):
blurred_array[:, :, channel] = np.convolve(
image[:, :, channel].flatten(), kernel[:, :, channel].flatten(), mode="same"
).reshape(image.shape[:2])
return blurred_array
image = ds.take(1)
for i in image:
blurred_array = blur_image(np.array(i[0]))
plt.imshow(blurred_array)
# ## Model
resnet = InceptionResNetV2(
weights="imagenet",
include_top=False,
input_tensor=tf.keras.layers.Input(shape=(reshaped_size, reshaped_size, 3)),
)
model_top = resnet.output
model_top = tf.keras.layers.Flatten()(model_top)
model_top = tf.keras.layers.Dense(500, activation="relu")(model_top)
model_top = tf.keras.layers.Dense(250, activation="relu")(model_top)
model_top = tf.keras.layers.Dense(4 * max_number_of_faces + 1, activation="sigmoid")(
model_top
)
model = tf.keras.models.Model(inputs=resnet.input, outputs=model_top)
def iou(box_true, box_pred):
n_of_boxes = tf.round(box_true[:, 0] * max_number_of_faces)
bb1, bb2 = box_true[:, 1:], box_pred[:, 1:]
# Intersection over Union
score = 0
for i in range(max_number_of_faces):
x_left = tf.maximum(bb1[:, 4 * i], bb2[:, 4 * i])
y_top = tf.maximum(bb1[:, 4 * i + 1], bb2[:, 4 * i + 1])
x_right = tf.minimum(bb1[:, 4 * i + 2], bb2[:, 4 * i + 2])
y_bottom = tf.minimum(bb1[:, 4 * i + 3], bb2[:, 4 * i + 3])
intersection_area = tf.maximum(x_right - x_left, 0) * tf.maximum(
y_bottom - y_top, 0
)
bb1_area = (bb1[:, 4 * i + 2] - bb1[:, 4 * i]) * (
bb1[:, 4 * i + 3] - bb1[:, 4 * i + 1]
)
bb2_area = (bb2[:, 4 * i + 2] - bb2[:, 4 * i]) * (
bb2[:, 4 * i + 3] - bb2[:, 4 * i + 1]
)
score += (
intersection_area / (bb1_area + bb2_area - intersection_area)
) * tf.clip_by_value(
tf.sign(n_of_boxes - i), clip_value_min=0.0, clip_value_max=tf.float32.max
)
# n_of_boxes[n_of_boxes == 0] = 1
return tf.reduce_mean(
score
/ tf.clip_by_value(
n_of_boxes, clip_value_min=0.0, clip_value_max=tf.float32.max
)
)
# ### Firstly, freeze all pretrained layers and train only new one, that we added
for layer in model.layers[:-3]:
layer.trainble = False
model.compile(loss="mse", optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4))
history = model.fit(ds.batch(10).prefetch(1), epochs=10)
# ### Now unfreeze all layers an train model
for layer in model.layers:
layer.trainble = True
model.compile(
loss="mse", optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4), metrics=[iou]
)
history = model.fit(ds.batch(10).prefetch(1), epochs=2)
def draw_image_with_box(image, box1, box2=None):
"""
draws image and box on it.
if function receives two boxes, meaning that one is true, and other is predicted,
it will label image with intersection over union score
"""
plt.imshow(image)
plt.axis("off")
n_of_boxes = int(tf.round(box1[0] * max_number_of_faces).numpy())
box1_coords = box1[1:] * 255
for i in range(n_of_boxes):
plt.gca().add_patch(
patches.Rectangle(
(box1_coords[4 * i], box1_coords[4 * i + 1]),
box1_coords[4 * i + 2] - box1_coords[4 * i],
box1_coords[4 * i + 3] - box1_coords[4 * i + 1],
edgecolor="g",
facecolor="none",
linewidth=2,
)
)
if box2 is not None:
box2_coords = box2[1:] * 255
n_of_boxes = int(tf.round(box2[0] * max_number_of_faces).numpy())
for i in range(n_of_boxes):
plt.gca().add_patch(
patches.Rectangle(
(box2_coords[4 * i], box2_coords[4 * i + 1]),
box2_coords[4 * i + 2] - box2_coords[4 * i],
box2_coords[4 * i + 3] - box2_coords[4 * i + 1],
edgecolor="r",
facecolor="none",
linewidth=2,
)
)
plt.title(
"Intersection over Union: "
+ str(iou(box1[np.newaxis, ...], box2[np.newaxis, ...]).numpy()),
fontsize=10,
)
n_examples = 10
images = next(iter(ds.shuffle(buffer_size=100).batch(n_examples)))
box_predicted = model(images[0])
plt.figure(figsize=(10, 30))
for i in range(n_examples):
plt.subplot(n_examples, 2, i + 1)
draw_image_with_box(images[0][i], images[1][i], box_predicted[i])
# print(iou(box_predicted[i][np.newaxis, ...], images[1][i][np.newaxis, ...]))
path = "/kaggle/input/nazar/nazar.jpg"
image = cv.imread(path)[..., ::-1]
image = cv.resize(image, (reshaped_size, reshaped_size)) / 255.0
image = image.reshape(1, reshaped_size, reshaped_size, 3)
box = model(image)
draw_image_with_box(image[0], box[0])
| false | 1 | 3,042 | 0 | 3,134 | 3,042 |
||
129506119
|
<jupyter_start><jupyter_text>Legal Citation Text Classification
### Context
This dataset contains Australian legal cases from the Federal Court of Australia (FCA). The cases were downloaded from AustLII. All cases from the year 2006,2007,2008 and 2009 are included. For each document , catchphrases, citations sentences, citation catchphrases, and citation classes are captured. Citation classes are indicated in the document, and indicate the type of treatment given to the cases cited by the present case.
### Exploration Ideas
- Create a model to perform text classification on legal data
- EDA to identify top keywords related to every type of case category
Kaggle dataset identifier: legal-citation-text-classification
<jupyter_script>import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
df.head()
print(df.head() + " " + df.head())
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
data_top = data.head()
data_top
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
n = 9
series = data["case_id"]
top = series.head(n=n)
top
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
n = 9
series = data["case_title"]
top = series.head(n=n)
top
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/506/129506119.ipynb
|
legal-citation-text-classification
|
shivamb
|
[{"Id": 129506119, "ScriptId": 38420227, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15051796, "CreationDate": "05/14/2023 11:35:11", "VersionNumber": 3.0, "Title": "Legal Citation Text", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 31.0, "LinesInsertedFromPrevious": 22.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 9.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185631823, "KernelVersionId": 129506119, "SourceDatasetVersionId": 2800141}]
|
[{"Id": 2800141, "DatasetId": 1710559, "DatasourceVersionId": 2846196, "CreatorUserId": 1571785, "LicenseName": "CC0: Public Domain", "CreationDate": "11/11/2021 15:48:25", "VersionNumber": 1.0, "Title": "Legal Citation Text Classification", "Slug": "legal-citation-text-classification", "Subtitle": "Legal Industry - Citations Text Classification", "Description": "### Context\n\nThis dataset contains Australian legal cases from the Federal Court of Australia (FCA). The cases were downloaded from AustLII. All cases from the year 2006,2007,2008 and 2009 are included. For each document , catchphrases, citations sentences, citation catchphrases, and citation classes are captured. Citation classes are indicated in the document, and indicate the type of treatment given to the cases cited by the present case.\n\n### Exploration Ideas \n\n- Create a model to perform text classification on legal data\n- EDA to identify top keywords related to every type of case category\n\n### Acknowledgements \n\nCredits: Filippo Galgani\ngalganif '@' cse.unsw.edu.au\nSchool of Computer Science and Engineering\nThe Univeristy of New South Wales, Australia", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1710559, "CreatorUserId": 1571785, "OwnerUserId": 1571785.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2800141.0, "CurrentDatasourceVersionId": 2846196.0, "ForumId": 1732236, "Type": 2, "CreationDate": "11/11/2021 15:48:25", "LastActivityDate": "11/11/2021", "TotalViews": 16249, "TotalDownloads": 1241, "TotalVotes": 46, "TotalKernels": 7}]
|
[{"Id": 1571785, "UserName": "shivamb", "DisplayName": "Shivam Bansal", "RegisterDate": "01/22/2018", "PerformanceTier": 4}]
|
import numpy as np
import pandas as pd
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
df = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
df.head()
print(df.head() + " " + df.head())
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
data_top = data.head()
data_top
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
n = 9
series = data["case_id"]
top = series.head(n=n)
top
import pandas as pd
data = pd.read_csv(
"/kaggle/input/legal-citation-text-classification/legal_text_classification.csv"
)
n = 9
series = data["case_title"]
top = series.head(n=n)
top
| false | 1 | 290 | 0 | 467 | 290 |
||
129492785
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import glob
import numpy as np
import pandas as pd
import scipy.io
from skimage.transform import resize
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
from tqdm import tqdm
import gc
gc.collect()
import nibabel as nib
dataset_dir = "../input/lung-segementation/Task06_Lung"
training_file_pattern = os.path.join(dataset_dir, "*.nii.gz")
print(training_file_pattern)
image_pattern = os.path.join(dataset_dir, "imagesTr", "lung_*.nii.gz")
label_pattern = os.path.join(dataset_dir, "labelsTr", "lung_*.nii.gz")
print(image_pattern)
image_files = sorted(glob.glob(image_pattern))
label_files = sorted(glob.glob(label_pattern))
print(image_files)
print(label_files)
assert len(image_files) == len(
label_files
), "Number of image and label files don't match"
import json
f = open(os.path.join(dataset_dir, "dataset.json"), "r")
# Reading from file
data = json.loads(f.read())
# Iterating through the json
# list
# for i in data:
# print(i)
# print(data["training"])
image_files = []
label_files = []
for el in data["training"]:
image = dataset_dir + el["image"][1:-3]
label = dataset_dir + el["label"][1:]
image_files.append(image)
label_files.append(label)
image_files = sorted(image_files)
label_files = sorted(label_files)
print(image_files)
print(label_files)
# Closing file
f.close()
assert len(image_files) == len(label_files)
import os
print(os.listdir("../input/lung-segementation/Task06_Lung/imagesTr"))
from glob import glob
glob(os.path.join(dataset_dir, "imagesTr"))
os.listdir(os.path.join(dataset_dir, "imagesTr"))
import nibabel as nib
nib.load("/kaggle/input/test99/._lung_001.nii.gz")
# /kaggle/input/lung-segementation/Task06_Lung/imagesTr/lung_001.nii
# /kaggle/input/lung-segementation/Task06_Lung/imagesTr/lung_005.nii
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/492/129492785.ipynb
| null | null |
[{"Id": 129492785, "ScriptId": 38499658, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5259483, "CreationDate": "05/14/2023 09:16:10", "VersionNumber": 1.0, "Title": "notebook7b7529372d", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 98.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
import glob
import numpy as np
import pandas as pd
import scipy.io
from skimage.transform import resize
import scipy.ndimage as ndi
import matplotlib.pyplot as plt
from tqdm import tqdm
import gc
gc.collect()
import nibabel as nib
dataset_dir = "../input/lung-segementation/Task06_Lung"
training_file_pattern = os.path.join(dataset_dir, "*.nii.gz")
print(training_file_pattern)
image_pattern = os.path.join(dataset_dir, "imagesTr", "lung_*.nii.gz")
label_pattern = os.path.join(dataset_dir, "labelsTr", "lung_*.nii.gz")
print(image_pattern)
image_files = sorted(glob.glob(image_pattern))
label_files = sorted(glob.glob(label_pattern))
print(image_files)
print(label_files)
assert len(image_files) == len(
label_files
), "Number of image and label files don't match"
import json
f = open(os.path.join(dataset_dir, "dataset.json"), "r")
# Reading from file
data = json.loads(f.read())
# Iterating through the json
# list
# for i in data:
# print(i)
# print(data["training"])
image_files = []
label_files = []
for el in data["training"]:
image = dataset_dir + el["image"][1:-3]
label = dataset_dir + el["label"][1:]
image_files.append(image)
label_files.append(label)
image_files = sorted(image_files)
label_files = sorted(label_files)
print(image_files)
print(label_files)
# Closing file
f.close()
assert len(image_files) == len(label_files)
import os
print(os.listdir("../input/lung-segementation/Task06_Lung/imagesTr"))
from glob import glob
glob(os.path.join(dataset_dir, "imagesTr"))
os.listdir(os.path.join(dataset_dir, "imagesTr"))
import nibabel as nib
nib.load("/kaggle/input/test99/._lung_001.nii.gz")
# /kaggle/input/lung-segementation/Task06_Lung/imagesTr/lung_001.nii
# /kaggle/input/lung-segementation/Task06_Lung/imagesTr/lung_005.nii
| false | 0 | 845 | 0 | 845 | 845 |
||
129136916
|
<jupyter_start><jupyter_text>News Headlines Dataset For Sarcasm Detection
###Context
** **Please cite the dataset using the BibTex provided in one of the following sections if you are using it in your research, thank you!** **
Past studies in Sarcasm Detection mostly make use of Twitter datasets collected using hashtag based supervision but such datasets are noisy in terms of labels and language. Furthermore, many tweets are replies to other tweets and detecting sarcasm in these requires the availability of contextual tweets.
To overcome the limitations related to noise in Twitter datasets, this **News Headlines dataset for Sarcasm Detection** is collected from two news website. [*TheOnion*](https://www.theonion.com/) aims at producing sarcastic versions of current events and we collected all the headlines from News in Brief and News in Photos categories (which are sarcastic). We collect real (and non-sarcastic) news headlines from [*HuffPost*](https://www.huffingtonpost.com/).
This new dataset has following advantages over the existing Twitter datasets:
* Since news headlines are written by professionals in a formal manner, there are no spelling mistakes and informal usage. This reduces the sparsity and also increases the chance of finding pre-trained embeddings.
* Furthermore, since the sole purpose of *TheOnion* is to publish sarcastic news, we get high-quality labels with much less noise as compared to Twitter datasets.
* Unlike tweets which are replies to other tweets, the news headlines we obtained are self-contained. This would help us in teasing apart the real sarcastic elements.
### Content
Each record consists of three attributes:
* ```is_sarcastic```: 1 if the record is sarcastic otherwise 0
* ```headline```: the headline of the news article
* ```article_link```: link to the original news article. Useful in collecting supplementary data
General statistics of data, instructions on how to read the data in python, and basic exploratory analysis could be found at [this GitHub repo](https://github.com/rishabhmisra/News-Headlines-Dataset-For-Sarcasm-Detection). A hybrid NN architecture trained on this dataset can be found at [this GitHub repo](https://github.com/rishabhmisra/Sarcasm-Detection-using-NN).
### Citation
If you're using this dataset for your work, please cite the following articles:
Citation in text format:
```
1. Misra, Rishabh and Prahal Arora. "Sarcasm Detection using News Headlines Dataset." AI Open (2023).
2. Misra, Rishabh and Jigyasa Grover. "Sculpting Data for ML: The first act of Machine Learning." ISBN 9798585463570 (2021).
```
Citation in BibTex format:
```
@article{misra2023Sarcasm,
title = {Sarcasm Detection using News Headlines Dataset},
journal = {AI Open},
volume = {4},
pages = {13-18},
year = {2023},
issn = {2666-6510},
doi = {https://doi.org/10.1016/j.aiopen.2023.01.001},
url = {https://www.sciencedirect.com/science/article/pii/S2666651023000013},
author = {Rishabh Misra and Prahal Arora},
}
@book{misra2021sculpting,
author = {Misra, Rishabh and Grover, Jigyasa},
year = {2021},
month = {01},
pages = {},
title = {Sculpting Data for ML: The first act of Machine Learning},
isbn = {9798585463570}
}
```
Please link to [rishabhmisra.github.io/publications](https://rishabhmisra.github.io/publications/) as the source of this dataset. Thanks!
### Inspiration
Can you identify sarcastic sentences? Can you distinguish between fake news and legitimate news?
### Reading the data
Following code snippet could be used to read the data:
```
import json
def parse_data(file):
for l in open(file,'r'):
yield json.loads(l)
data = list(parse_data('./Sarcasm_Headlines_Dataset.json'))
```
### Want to contribute your own datasets?
If you are interested in learning how to collect high-quality datasets for various ML tasks and the overall importance of data in the ML ecosystem, consider reading my book [Sculpting Data for ML](https://www.amazon.com/dp/B08RN47C5T).
### Other datasets
Please also checkout the following datasets collected by me:
* [News Category Dataset](https://www.kaggle.com/rmisra/news-category-dataset)
* [Clothing Fit Dataset for Size Recommendation](https://www.kaggle.com/rmisra/clothing-fit-dataset-for-size-recommendation)
* [IMDB Spoiler Dataset](https://www.kaggle.com/rmisra/imdb-spoiler-dataset)
* [Politifact Fact Check Dataset](https://www.kaggle.com/datasets/rmisra/politifact-fact-check-dataset)
Kaggle dataset identifier: news-headlines-dataset-for-sarcasm-detection
<jupyter_script># ## DATA SET DISCRIPTION
# The dataset I am using is available at the Kaggle link:https://www.kaggle.com/datasets/rmisra/news-headlines-dataset-for-sarcasm-detection, is called the "News Headlines Dataset for Sarcasm Detection." It is a collection of news headlines that have been labeled to indicate whether they are sarcastic or not.
# The dataset contains two files: "Sarcasm_Headlines_Dataset.json" and "Sarcasm_Headlines_Dataset_v2.json". The first file is the original version of the dataset, while the second file is an updated version with more data.
# Each file contains records in JSON format, where each record represents a news headline along with its label. The records consist of three key-value pairs: "headline," "article_link," and "is_sarcastic." The "headline" key stores the text of the news headline, the "article_link" key contains the URL of the original article, and the "is_sarcastic" key indicates whether the headline is sarcastic (1) or not (0).
# The dataset is designed to facilitate the task of sarcasm detection in news headlines, which is a challenging problem in natural language processing. It can be used for various purposes, such as training and evaluating machine learning models or conducting research on sarcasm detection algorithms.
# By utilizing this dataset, you can develop models or algorithms to automatically identify sarcasm in news headlines, contributing to the advancement of natural language understanding and sentiment analysis.
# 
# ## IMPORTS
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from sklearn.model_selection import train_test_split
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import SnowballStemmer
from unidecode import unidecode
import collections
import re
import os
import warnings
warnings.filterwarnings("ignore")
# ## I am Using Kaggle NoteBook So i just added the Data.
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Read into DataFrame
address = "/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json"
json_df = pd.read_json(address, lines=True)
df = pd.DataFrame(json_df)
# ## EDA
df.head()
df.tail()
# ## The article_link column is not usefull so lets just drop it.
df = df.drop("article_link", axis=1)
df.columns
df.info()
df.head()
df.shape
df.isna().sum()
# ## Checking For Duplicate Values
df.duplicated().sum()
# ### So Our data has some Duplicates Lets Drop them
df.drop_duplicates(subset=["headline"], inplace=True)
df.duplicated().sum()
df.describe(include="all")
df["headline"][10]
# ## VISUALIZING THE DATA
# ### Ploting the is_sarcastic data to analyze positive and negative counts.
sns.countplot(x="is_sarcastic", data=df)
# ### Let's check the length of the texts.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
text_len = df[df["is_sarcastic"] == 1]["headline"].apply(len)
ax1.hist(text_len, color="red")
ax1.set_title("Sarcastic text length")
text_len = df[df["is_sarcastic"] == 0]["headline"].apply(len)
ax2.hist(text_len, color="black")
ax2.set_title("Not Sarcastic text length")
# ## Let's check the word count of the texts.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
text_len = df[df["is_sarcastic"] == 1]["headline"].str.split().map(lambda x: len(x))
ax1.hist(text_len, color="lightgreen")
ax1.set_title("Sarcastic text word count")
text_len = df[df["is_sarcastic"] == 0]["headline"].str.split().map(lambda x: len(x))
ax2.hist(text_len, color="lightblue")
ax2.set_title("Not Sarcastic text word count")
plt.style.use("dark_background")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
word = (
df[df["is_sarcastic"] == 1]["headline"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1, color="red")
ax1.set_title("Sarcastic average word length")
word = (
df[df["is_sarcastic"] == 0]["headline"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2, color="lightblue")
ax2.set_title("Not Sarcastic average word length")
# # **PREPROCESSING**
# ## DATA CLEANING
df["cleaned_headline"] = df["headline"].apply(
lambda x: re.sub(r"[^a-zA-Z\s]", "", x.lower())
)
print(df["cleaned_headline"])
nltk.download("stopwords")
# ## Tokenization, stopword removal, stemming, and joining
stop_words = set(stopwords.words("english"))
stemmer = SnowballStemmer("english")
df["final_text"] = df["cleaned_headline"].apply(
lambda x: " ".join(
[
stemmer.stem(word)
for word in word_tokenize(x.lower())
if word not in stop_words
]
)
)
df["final_text"]
# ## TOP 10 MOST USED WORDS BEFORE CLEANING AND PREPROCESSING
words = []
for text in df["headline"]:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.style.use("dark_background")
plt.figure(figsize=(12, 8))
plt.bar(range(len(top_words)), list(top_words.values()), align="center")
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title("Top 10 most used words", fontsize=18)
plt.xlabel("Words")
plt.ylabel("Frequency")
plt.show()
# ## TOP 10 MOST USED WORDS AFTER CLEANING AND PREPROCESSING
words = []
for text in df["final_text"]:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.style.use("dark_background")
plt.figure(figsize=(12, 8))
plt.bar(range(len(top_words)), list(top_words.values()), align="center")
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title("Top 10 most used words", fontsize=18)
plt.xlabel("Words")
plt.ylabel("Frequency")
plt.show()
# Now we see there is a clear change that is now the text is preprocessed cleaned and free from stopwords and other special characters and stuff
labels = np.array(df.is_sarcastic)
sentences = np.array(df.final_text)
print("Number of sentences and labels: ", len(labels), len(sentences))
sentences
labels
# ## WORD GRAPH
text = " ".join(caption for caption in df["final_text"])
wordcloud = WordCloud(
width=800, height=500, background_color="black", min_font_size=10
).generate(text)
plt.figure(figsize=(10, 6), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ## WORD GRAPH FOR SARCASTIC AND NON SARCASTIC HEADLINES
sarcastic_text = " ".join(df[df["is_sarcastic"] == 1]["final_text"])
non_sarcastic_text = " ".join(df[df["is_sarcastic"] == 0]["final_text"])
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
wordcloud_sarcastic = WordCloud(width=800, height=400).generate(sarcastic_text)
plt.imshow(wordcloud_sarcastic, interpolation="bilinear")
plt.title("Sarcastic Headlines")
plt.axis("off")
plt.subplot(1, 2, 2)
wordcloud_non_sarcastic = WordCloud(width=800, height=400).generate(non_sarcastic_text)
plt.imshow(wordcloud_non_sarcastic, interpolation="bilinear")
plt.title("Non-Sarcastic Headlines")
plt.axis("off")
plt.tight_layout()
plt.show()
# ## Sarcastic and Non Sarcastic Headline Lengths
df["headline_length"] = df["headline"].apply(len)
plt.figure(figsize=(10, 5))
plt.hist(
df[df["is_sarcastic"] == 1]["headline_length"],
bins=20,
alpha=0.5,
label="Sarcastic",
)
plt.hist(
df[df["is_sarcastic"] == 0]["headline_length"],
bins=20,
alpha=0.5,
label="Non-Sarcastic",
)
plt.xlabel("Headline Length")
plt.ylabel("Count")
plt.legend()
plt.title("Length of Headlines")
plt.show()
# # TRAIN TEST SPLIT
x_train, x_test, y_train, y_test = train_test_split(sentences, labels, test_size=0.2)
print(
"Train and Test set distribution: ",
len(x_train),
len(x_test),
len(y_train),
len(y_test),
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/136/129136916.ipynb
|
news-headlines-dataset-for-sarcasm-detection
|
rmisra
|
[{"Id": 129136916, "ScriptId": 38363813, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11086653, "CreationDate": "05/11/2023 09:13:06", "VersionNumber": 2.0, "Title": "News_Headlines_Sarcasam_Detection", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 238.0, "LinesInsertedFromPrevious": 18.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 220.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184921940, "KernelVersionId": 129136916, "SourceDatasetVersionId": 533474}]
|
[{"Id": 533474, "DatasetId": 30764, "DatasourceVersionId": 549839, "CreatorUserId": 270038, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/03/2019 23:52:57", "VersionNumber": 2.0, "Title": "News Headlines Dataset For Sarcasm Detection", "Slug": "news-headlines-dataset-for-sarcasm-detection", "Subtitle": "High quality dataset for the task of Sarcasm and Fake News Detection", "Description": "###Context\n** **Please cite the dataset using the BibTex provided in one of the following sections if you are using it in your research, thank you!** **\n\nPast studies in Sarcasm Detection mostly make use of Twitter datasets collected using hashtag based supervision but such datasets are noisy in terms of labels and language. Furthermore, many tweets are replies to other tweets and detecting sarcasm in these requires the availability of contextual tweets.\n\nTo overcome the limitations related to noise in Twitter datasets, this **News Headlines dataset for Sarcasm Detection** is collected from two news website. [*TheOnion*](https://www.theonion.com/) aims at producing sarcastic versions of current events and we collected all the headlines from News in Brief and News in Photos categories (which are sarcastic). We collect real (and non-sarcastic) news headlines from [*HuffPost*](https://www.huffingtonpost.com/).\n\nThis new dataset has following advantages over the existing Twitter datasets:\n\n* Since news headlines are written by professionals in a formal manner, there are no spelling mistakes and informal usage. This reduces the sparsity and also increases the chance of finding pre-trained embeddings.\n\n* Furthermore, since the sole purpose of *TheOnion* is to publish sarcastic news, we get high-quality labels with much less noise as compared to Twitter datasets.\n\n* Unlike tweets which are replies to other tweets, the news headlines we obtained are self-contained. This would help us in teasing apart the real sarcastic elements.\n\n### Content\nEach record consists of three attributes:\n\n* ```is_sarcastic```: 1 if the record is sarcastic otherwise 0\n\n* ```headline```: the headline of the news article\n\n* ```article_link```: link to the original news article. Useful in collecting supplementary data\n\nGeneral statistics of data, instructions on how to read the data in python, and basic exploratory analysis could be found at [this GitHub repo](https://github.com/rishabhmisra/News-Headlines-Dataset-For-Sarcasm-Detection). A hybrid NN architecture trained on this dataset can be found at [this GitHub repo](https://github.com/rishabhmisra/Sarcasm-Detection-using-NN).\n\n### Citation\n\nIf you're using this dataset for your work, please cite the following articles:\n\nCitation in text format:\n```\n1. Misra, Rishabh and Prahal Arora. \"Sarcasm Detection using News Headlines Dataset.\" AI Open (2023).\n2. Misra, Rishabh and Jigyasa Grover. \"Sculpting Data for ML: The first act of Machine Learning.\" ISBN 9798585463570 (2021).\n```\nCitation in BibTex format:\n```\n@article{misra2023Sarcasm,\n title = {Sarcasm Detection using News Headlines Dataset},\n journal = {AI Open},\n volume = {4},\n pages = {13-18},\n year = {2023},\n issn = {2666-6510},\n doi = {https://doi.org/10.1016/j.aiopen.2023.01.001},\n url = {https://www.sciencedirect.com/science/article/pii/S2666651023000013},\n author = {Rishabh Misra and Prahal Arora},\n}\n@book{misra2021sculpting,\n author = {Misra, Rishabh and Grover, Jigyasa},\n year = {2021},\n month = {01},\n pages = {},\n title = {Sculpting Data for ML: The first act of Machine Learning},\n isbn = {9798585463570}\n}\n```\nPlease link to [rishabhmisra.github.io/publications](https://rishabhmisra.github.io/publications/) as the source of this dataset. Thanks!\n\n### Inspiration\n\nCan you identify sarcastic sentences? Can you distinguish between fake news and legitimate news?\n\n### Reading the data\nFollowing code snippet could be used to read the data:\n\n```\nimport json\n\ndef parse_data(file):\n for l in open(file,'r'):\n yield json.loads(l)\n\ndata = list(parse_data('./Sarcasm_Headlines_Dataset.json'))\n```\n\n### Want to contribute your own datasets?\n\nIf you are interested in learning how to collect high-quality datasets for various ML tasks and the overall importance of data in the ML ecosystem, consider reading my book [Sculpting Data for ML](https://www.amazon.com/dp/B08RN47C5T).\n\n### Other datasets\nPlease also checkout the following datasets collected by me:\n\n* [News Category Dataset](https://www.kaggle.com/rmisra/news-category-dataset)\n\n* [Clothing Fit Dataset for Size Recommendation](https://www.kaggle.com/rmisra/clothing-fit-dataset-for-size-recommendation)\n\n* [IMDB Spoiler Dataset](https://www.kaggle.com/rmisra/imdb-spoiler-dataset)\n\n* [Politifact Fact Check Dataset](https://www.kaggle.com/datasets/rmisra/politifact-fact-check-dataset)", "VersionNotes": "add more sarcastic headlines", "TotalCompressedBytes": 6057046.0, "TotalUncompressedBytes": 3425771.0}]
|
[{"Id": 30764, "CreatorUserId": 270038, "OwnerUserId": 270038.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 533474.0, "CurrentDatasourceVersionId": 549839.0, "ForumId": 39060, "Type": 2, "CreationDate": "06/09/2018 22:14:56", "LastActivityDate": "06/09/2018", "TotalViews": 275075, "TotalDownloads": 39180, "TotalVotes": 903, "TotalKernels": 246}]
|
[{"Id": 270038, "UserName": "rmisra", "DisplayName": "Rishabh Misra", "RegisterDate": "12/10/2014", "PerformanceTier": 2}]
|
# ## DATA SET DISCRIPTION
# The dataset I am using is available at the Kaggle link:https://www.kaggle.com/datasets/rmisra/news-headlines-dataset-for-sarcasm-detection, is called the "News Headlines Dataset for Sarcasm Detection." It is a collection of news headlines that have been labeled to indicate whether they are sarcastic or not.
# The dataset contains two files: "Sarcasm_Headlines_Dataset.json" and "Sarcasm_Headlines_Dataset_v2.json". The first file is the original version of the dataset, while the second file is an updated version with more data.
# Each file contains records in JSON format, where each record represents a news headline along with its label. The records consist of three key-value pairs: "headline," "article_link," and "is_sarcastic." The "headline" key stores the text of the news headline, the "article_link" key contains the URL of the original article, and the "is_sarcastic" key indicates whether the headline is sarcastic (1) or not (0).
# The dataset is designed to facilitate the task of sarcasm detection in news headlines, which is a challenging problem in natural language processing. It can be used for various purposes, such as training and evaluating machine learning models or conducting research on sarcasm detection algorithms.
# By utilizing this dataset, you can develop models or algorithms to automatically identify sarcasm in news headlines, contributing to the advancement of natural language understanding and sentiment analysis.
# 
# ## IMPORTS
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from wordcloud import WordCloud
from sklearn.model_selection import train_test_split
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
from nltk.stem import SnowballStemmer
from unidecode import unidecode
import collections
import re
import os
import warnings
warnings.filterwarnings("ignore")
# ## I am Using Kaggle NoteBook So i just added the Data.
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ## Read into DataFrame
address = "/kaggle/input/news-headlines-dataset-for-sarcasm-detection/Sarcasm_Headlines_Dataset_v2.json"
json_df = pd.read_json(address, lines=True)
df = pd.DataFrame(json_df)
# ## EDA
df.head()
df.tail()
# ## The article_link column is not usefull so lets just drop it.
df = df.drop("article_link", axis=1)
df.columns
df.info()
df.head()
df.shape
df.isna().sum()
# ## Checking For Duplicate Values
df.duplicated().sum()
# ### So Our data has some Duplicates Lets Drop them
df.drop_duplicates(subset=["headline"], inplace=True)
df.duplicated().sum()
df.describe(include="all")
df["headline"][10]
# ## VISUALIZING THE DATA
# ### Ploting the is_sarcastic data to analyze positive and negative counts.
sns.countplot(x="is_sarcastic", data=df)
# ### Let's check the length of the texts.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
text_len = df[df["is_sarcastic"] == 1]["headline"].apply(len)
ax1.hist(text_len, color="red")
ax1.set_title("Sarcastic text length")
text_len = df[df["is_sarcastic"] == 0]["headline"].apply(len)
ax2.hist(text_len, color="black")
ax2.set_title("Not Sarcastic text length")
# ## Let's check the word count of the texts.
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 5))
text_len = df[df["is_sarcastic"] == 1]["headline"].str.split().map(lambda x: len(x))
ax1.hist(text_len, color="lightgreen")
ax1.set_title("Sarcastic text word count")
text_len = df[df["is_sarcastic"] == 0]["headline"].str.split().map(lambda x: len(x))
ax2.hist(text_len, color="lightblue")
ax2.set_title("Not Sarcastic text word count")
plt.style.use("dark_background")
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
word = (
df[df["is_sarcastic"] == 1]["headline"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax1, color="red")
ax1.set_title("Sarcastic average word length")
word = (
df[df["is_sarcastic"] == 0]["headline"]
.str.split()
.apply(lambda x: [len(i) for i in x])
)
sns.distplot(word.map(lambda x: np.mean(x)), ax=ax2, color="lightblue")
ax2.set_title("Not Sarcastic average word length")
# # **PREPROCESSING**
# ## DATA CLEANING
df["cleaned_headline"] = df["headline"].apply(
lambda x: re.sub(r"[^a-zA-Z\s]", "", x.lower())
)
print(df["cleaned_headline"])
nltk.download("stopwords")
# ## Tokenization, stopword removal, stemming, and joining
stop_words = set(stopwords.words("english"))
stemmer = SnowballStemmer("english")
df["final_text"] = df["cleaned_headline"].apply(
lambda x: " ".join(
[
stemmer.stem(word)
for word in word_tokenize(x.lower())
if word not in stop_words
]
)
)
df["final_text"]
# ## TOP 10 MOST USED WORDS BEFORE CLEANING AND PREPROCESSING
words = []
for text in df["headline"]:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.style.use("dark_background")
plt.figure(figsize=(12, 8))
plt.bar(range(len(top_words)), list(top_words.values()), align="center")
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title("Top 10 most used words", fontsize=18)
plt.xlabel("Words")
plt.ylabel("Frequency")
plt.show()
# ## TOP 10 MOST USED WORDS AFTER CLEANING AND PREPROCESSING
words = []
for text in df["final_text"]:
words.extend(text.split())
word_count = collections.Counter(words)
top_words = dict(word_count.most_common(10))
plt.style.use("dark_background")
plt.figure(figsize=(12, 8))
plt.bar(range(len(top_words)), list(top_words.values()), align="center")
plt.xticks(range(len(top_words)), list(top_words.keys()))
plt.grid(alpha=0.5)
plt.title("Top 10 most used words", fontsize=18)
plt.xlabel("Words")
plt.ylabel("Frequency")
plt.show()
# Now we see there is a clear change that is now the text is preprocessed cleaned and free from stopwords and other special characters and stuff
labels = np.array(df.is_sarcastic)
sentences = np.array(df.final_text)
print("Number of sentences and labels: ", len(labels), len(sentences))
sentences
labels
# ## WORD GRAPH
text = " ".join(caption for caption in df["final_text"])
wordcloud = WordCloud(
width=800, height=500, background_color="black", min_font_size=10
).generate(text)
plt.figure(figsize=(10, 6), facecolor=None)
plt.imshow(wordcloud)
plt.axis("off")
plt.tight_layout(pad=0)
plt.show()
# ## WORD GRAPH FOR SARCASTIC AND NON SARCASTIC HEADLINES
sarcastic_text = " ".join(df[df["is_sarcastic"] == 1]["final_text"])
non_sarcastic_text = " ".join(df[df["is_sarcastic"] == 0]["final_text"])
plt.figure(figsize=(10, 5))
plt.subplot(1, 2, 1)
wordcloud_sarcastic = WordCloud(width=800, height=400).generate(sarcastic_text)
plt.imshow(wordcloud_sarcastic, interpolation="bilinear")
plt.title("Sarcastic Headlines")
plt.axis("off")
plt.subplot(1, 2, 2)
wordcloud_non_sarcastic = WordCloud(width=800, height=400).generate(non_sarcastic_text)
plt.imshow(wordcloud_non_sarcastic, interpolation="bilinear")
plt.title("Non-Sarcastic Headlines")
plt.axis("off")
plt.tight_layout()
plt.show()
# ## Sarcastic and Non Sarcastic Headline Lengths
df["headline_length"] = df["headline"].apply(len)
plt.figure(figsize=(10, 5))
plt.hist(
df[df["is_sarcastic"] == 1]["headline_length"],
bins=20,
alpha=0.5,
label="Sarcastic",
)
plt.hist(
df[df["is_sarcastic"] == 0]["headline_length"],
bins=20,
alpha=0.5,
label="Non-Sarcastic",
)
plt.xlabel("Headline Length")
plt.ylabel("Count")
plt.legend()
plt.title("Length of Headlines")
plt.show()
# # TRAIN TEST SPLIT
x_train, x_test, y_train, y_test = train_test_split(sentences, labels, test_size=0.2)
print(
"Train and Test set distribution: ",
len(x_train),
len(x_test),
len(y_train),
len(y_test),
)
| false | 0 | 2,657 | 0 | 3,985 | 2,657 |
||
129136983
|
# # Basic CNN in PyTorch
# We'll test our a simple CNN and see what accuracy we can achieve
import torch
import torchvision
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import random_split
from torchvision import datasets
from tqdm.notebook import tqdm, trange
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Utils: Create a mapping from string representations of the classes to integer
label2id = {
"bacterial_leaf_blight": 0,
"bacterial_leaf_streak": 1,
"bacterial_panicle_blight": 2,
"blast": 3,
"brown_spot": 4,
"dead_heart": 5,
"downy_mildew": 6,
"hispa": 7,
"normal": 8,
"tungro": 9,
}
# ## Load and prepare the data
train_dir = "../input/paddy-disease-classification/train_images/"
data = pd.read_csv("../input/paddy-disease-classification/train.csv")
data["image_path"] = data.apply(
lambda row: train_dir + row["label"] + "/" + row["image_id"], axis=1
)
data["label_enc"] = data.apply(lambda row: label2id[row["label"]], axis=1)
data["image_size"] = data.apply(lambda row: plt.imread(row["image_path"]).shape, axis=1)
data.head()
# Let's create a dataset
class PaddyDataset(torch.utils.data.Dataset):
def __init__(self, images_filepaths, targets, transform=None):
self.images_filepaths = images_filepaths
self.targets = targets
self.transform = transform
def __len__(self):
return len(self.images_filepaths)
def __getitem__(self, idx):
image_filepath = self.images_filepaths[idx]
image = plt.imread(image_filepath)
if image.shape == (480, 640, 3):
image = image.transpose(1, 0, 2)
if self.transform is not None:
image = self.transform(image)
label = torch.tensor(self.targets[idx]).long()
return image, label
X_train = data["image_path"]
y_train = data["label_enc"]
# As we have already learned we want to normalize our data. But we do not currently have the means to do it, i.e. we don't know the means and variance of the dataset to normalize it. But we can compute it as below
def get_mean_and_std(dataloader):
channels_sum, channels_squared_sum, num_batches = 0, 0, 0
for data, _ in dataloader:
# Mean over batch, height and width, but not over the channels
channels_sum += torch.mean(data, dim=[0, 2, 3])
channels_squared_sum += torch.mean(data**2, dim=[0, 2, 3])
num_batches += 1
mean = channels_sum / num_batches
# std = sqrt(E[X^2] - (E[X])^2)
std = (channels_squared_sum / num_batches - mean**2) ** 0.5
return mean, std
transform = transforms.Compose([transforms.ToTensor()])
# Pytorch Dataset Creation
train_dataset = PaddyDataset(
images_filepaths=X_train.values, targets=y_train.values, transform=transform
)
# check first image shape
train_dataset[0][0].shape
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
mean, std = get_mean_and_std(train_loader)
mean, std
# Now we can instantiate the Paddy Dataset with normalization
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4970, 0.5882, 0.2304), (0.2410, 0.2426, 0.2178)),
]
)
# Pytorch Dataset Creation
dataset = PaddyDataset(
images_filepaths=X_train.values, targets=y_train.values, transform=transform
)
train_share = int(len(dataset) * 0.8)
train_dataset, val_dataset = random_split(
dataset, [train_share, len(dataset) - train_share]
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
# Let's briefly check if the computation worked and we get batches where the mean is about zero and the std is about 1.
imgs, _ = next(iter(train_loader))
print("Batch mean", imgs.mean(dim=[0, 2, 3]))
print("Batch std", imgs.std(dim=[0, 2, 3]))
# ## CNN
class Paddy_CNN(nn.Module):
def __init__(self):
super().__init__()
# 2 Convolution layers and following two linear layers
self.conv1 = nn.Conv2d(3, 32, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.fc1 = nn.Linear(int(640 / 4 * 480 / 4 * 64), 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
# conv layer 1
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
# conv layer 2
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
# fc layer 1
x = x.view(x.shape[0], -1)
x = self.fc1(x)
x = F.relu(x)
# fc layer 2
x = self.fc2(x)
return x
# Training
## Training
# Instantiate model
model = Paddy_CNN().to(device)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Iterate through train set minibatchs
for epoch in trange(30):
for images, labels in tqdm(train_loader):
# Zero out the gradients
optimizer.zero_grad()
# Forward pass
x = images.to(device)
y = model(x)
loss = criterion(y, labels.to(device))
# Backward pass
loss.backward()
optimizer.step()
## Testing
correct = 0
total = len(val_dataset)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=False)
with torch.no_grad():
# Iterate through test set minibatchs
for images, labels in tqdm(val_loader):
# Forward pass
x = images.to(device)
y = model(x)
predictions = torch.argmax(y, dim=1)
correct += torch.sum((predictions == labels.to(device)).float())
print(f"Test accuracy: {correct/total}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/136/129136983.ipynb
| null | null |
[{"Id": 129136983, "ScriptId": 38201138, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3921195, "CreationDate": "05/11/2023 09:13:50", "VersionNumber": 2.0, "Title": "Opencampus Basic CNN in PyTorch", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 197.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 126.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Basic CNN in PyTorch
# We'll test our a simple CNN and see what accuracy we can achieve
import torch
import torchvision
from torchvision import transforms
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import random_split
from torchvision import datasets
from tqdm.notebook import tqdm, trange
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
torch.manual_seed(0)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
# Utils: Create a mapping from string representations of the classes to integer
label2id = {
"bacterial_leaf_blight": 0,
"bacterial_leaf_streak": 1,
"bacterial_panicle_blight": 2,
"blast": 3,
"brown_spot": 4,
"dead_heart": 5,
"downy_mildew": 6,
"hispa": 7,
"normal": 8,
"tungro": 9,
}
# ## Load and prepare the data
train_dir = "../input/paddy-disease-classification/train_images/"
data = pd.read_csv("../input/paddy-disease-classification/train.csv")
data["image_path"] = data.apply(
lambda row: train_dir + row["label"] + "/" + row["image_id"], axis=1
)
data["label_enc"] = data.apply(lambda row: label2id[row["label"]], axis=1)
data["image_size"] = data.apply(lambda row: plt.imread(row["image_path"]).shape, axis=1)
data.head()
# Let's create a dataset
class PaddyDataset(torch.utils.data.Dataset):
def __init__(self, images_filepaths, targets, transform=None):
self.images_filepaths = images_filepaths
self.targets = targets
self.transform = transform
def __len__(self):
return len(self.images_filepaths)
def __getitem__(self, idx):
image_filepath = self.images_filepaths[idx]
image = plt.imread(image_filepath)
if image.shape == (480, 640, 3):
image = image.transpose(1, 0, 2)
if self.transform is not None:
image = self.transform(image)
label = torch.tensor(self.targets[idx]).long()
return image, label
X_train = data["image_path"]
y_train = data["label_enc"]
# As we have already learned we want to normalize our data. But we do not currently have the means to do it, i.e. we don't know the means and variance of the dataset to normalize it. But we can compute it as below
def get_mean_and_std(dataloader):
channels_sum, channels_squared_sum, num_batches = 0, 0, 0
for data, _ in dataloader:
# Mean over batch, height and width, but not over the channels
channels_sum += torch.mean(data, dim=[0, 2, 3])
channels_squared_sum += torch.mean(data**2, dim=[0, 2, 3])
num_batches += 1
mean = channels_sum / num_batches
# std = sqrt(E[X^2] - (E[X])^2)
std = (channels_squared_sum / num_batches - mean**2) ** 0.5
return mean, std
transform = transforms.Compose([transforms.ToTensor()])
# Pytorch Dataset Creation
train_dataset = PaddyDataset(
images_filepaths=X_train.values, targets=y_train.values, transform=transform
)
# check first image shape
train_dataset[0][0].shape
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
mean, std = get_mean_and_std(train_loader)
mean, std
# Now we can instantiate the Paddy Dataset with normalization
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.4970, 0.5882, 0.2304), (0.2410, 0.2426, 0.2178)),
]
)
# Pytorch Dataset Creation
dataset = PaddyDataset(
images_filepaths=X_train.values, targets=y_train.values, transform=transform
)
train_share = int(len(dataset) * 0.8)
train_dataset, val_dataset = random_split(
dataset, [train_share, len(dataset) - train_share]
)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=32, shuffle=True)
# Let's briefly check if the computation worked and we get batches where the mean is about zero and the std is about 1.
imgs, _ = next(iter(train_loader))
print("Batch mean", imgs.mean(dim=[0, 2, 3]))
print("Batch std", imgs.std(dim=[0, 2, 3]))
# ## CNN
class Paddy_CNN(nn.Module):
def __init__(self):
super().__init__()
# 2 Convolution layers and following two linear layers
self.conv1 = nn.Conv2d(3, 32, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(32, 64, kernel_size=5, padding=2)
self.fc1 = nn.Linear(int(640 / 4 * 480 / 4 * 64), 256)
self.fc2 = nn.Linear(256, 10)
def forward(self, x):
# conv layer 1
x = self.conv1(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
# conv layer 2
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, kernel_size=2)
# fc layer 1
x = x.view(x.shape[0], -1)
x = self.fc1(x)
x = F.relu(x)
# fc layer 2
x = self.fc2(x)
return x
# Training
## Training
# Instantiate model
model = Paddy_CNN().to(device)
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
# Iterate through train set minibatchs
for epoch in trange(30):
for images, labels in tqdm(train_loader):
# Zero out the gradients
optimizer.zero_grad()
# Forward pass
x = images.to(device)
y = model(x)
loss = criterion(y, labels.to(device))
# Backward pass
loss.backward()
optimizer.step()
## Testing
correct = 0
total = len(val_dataset)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=32, shuffle=False)
with torch.no_grad():
# Iterate through test set minibatchs
for images, labels in tqdm(val_loader):
# Forward pass
x = images.to(device)
y = model(x)
predictions = torch.argmax(y, dim=1)
correct += torch.sum((predictions == labels.to(device)).float())
print(f"Test accuracy: {correct/total}")
| false | 0 | 1,845 | 0 | 1,845 | 1,845 |
||
129141207
|
<jupyter_start><jupyter_text>spamEmailData
Kaggle dataset identifier: spam-email-data
<jupyter_script># #Máy học được giám sát để phát hiện thư rác
# Ba mô hình ML để phân loại dữ liệu văn bản email là thư rác:
# 1. **Support Vector Machine (SVM)**
# 1. **Naive Bayes - Multinomial and Complement**
# 1. **Random Forest**
# Những mô hình này đã được chọn vì chúng tốt cho việc phân loại dữ liệu văn bản.
# Dữ liệu email spam được chia thành các tập hợp con đào tạo và kiểm tra theo tỷ lệ 0,75 đến 0,25.
# Đối với mỗi mô hình, chu trình phân tách, huấn luyện và kiểm tra được lặp lại 100 lần, ghi lại độ chính xác và báo cáo giá trị trung bình và độ lệch chuẩn (S.D.) của nó.
# #### **Libraries**
# First, importing the libraries.
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import wordcloud
# ML learning model selection, metrics, feature extraction
from sklearn import (
ensemble,
feature_extraction,
model_selection,
naive_bayes,
metrics,
svm,
)
# To see available files
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# #### **Lấy và làm sạch dữ liệu**
# Tiếp theo, đọc dữ liệu, xem xét các biến và xóa các cột không mong muốn.
# Read into dataframe
file_path = "../input/spam-email-data/spam.csv"
data = pd.read_csv(file_path)
# Drop extra columns; use data.columns to see column names
# data = data.drop(data.columns[2:], axis=1)
data.head(n=8) # Look at data, equivalent to data.iloc[:n]
# print(data.shape)
# #### **Exploring data**
# Tiếp theo, kiểm tra tỷ lệ email spam và không phải spam trong tập dữ liệu.
# Calculate spam ratio, to understand proportion of dataset that is spam:
spam_count = sum(True for entry in data.v1 if entry == "spam")
ham_count = sum(True for entry in data.v1 if entry == "ham")
spam_f = spam_count / len(data.v1)
ham_f = ham_count / len(data.v1)
print("Total count is {} \n".format(spam_count + ham_count))
print(
"Spam fraction = {} / {} = {:.3f} \n\nHam fraction = {} / {} = {:.3f} \n\n".format(
spam_count, len(data.v1), spam_f, ham_count, len(data.v1), ham_f
)
)
# Plot pie chart of spam and ham
colors = [(0.1, 0.9, 0.9, 0.7), (0.9, 0.2, 0.2, 0.7)]
plt.pie(
[ham_count, spam_count],
labels=["non-spam: {:.3f}".format(ham_f), "spam: {:.3f}".format(spam_f)],
colors=colors,
)
plt.title("Proportion of email that is spam")
plt.show
# Examine content of columns
# Check for Na/NaN
# Check for duplicates
# ### Generate word clouds
# See common words in spam / non-spam emails
# Select only spam emails and make one string with all spam contents
spam_only = data.v2[data.v1 == "spam"]
spam_text = " ".join(email for email in spam_only)
# similarly for non-spam
ham_only = data.v2[data.v1 == "ham"]
ham_text = " ".join(email for email in ham_only)
# spam_text[:1000]
# ham_text[:1000]
# Create word clouds for spam and ham texts using the .generate() method from wordcloud.WordCloud
spam_cloud = wordcloud.WordCloud(
collocations=False, background_color="white", width=2048, height=1080
).generate(spam_text)
ham_cloud = wordcloud.WordCloud(
collocations=False, background_color="white", width=2048, height=1080
).generate(ham_text)
# saving the image
spam_cloud.to_file("got_spam.png")
ham_cloud.to_file("got_ham.png")
# figure size in inches optional
mpl.rcParams["figure.figsize"] = 18, 6
# display two word clouds side by side
fig, ax = plt.subplots(1, 2, constrained_layout=False, sharey=True)
ax[0].set_title("Spam \n", fontsize=16)
ax[0].imshow(spam_cloud)
ax[0].axis("off") # turn off axes
# ax[0].set_xlabel('Spam \n',fontsize=16)
ax[1].set_title("Non-spam \n", fontsize=16)
ax[1].imshow(ham_cloud)
ax[1].axis("off")
fig.suptitle("Common words in spam and non-spam emails", fontsize=20)
# ### Đặt các tính năng dự đoán và mục tiêu
# * Tính năng dự đoán - nội dung email - biến độc lập
# * Mục tiêu dự đoán - thư rác hoặc nhãn ham - biến phụ thuộc
#
# Set prediction target and feature variable
X = data.v2 # Email text is the feature used in prediction
y = data.v1 # Spam/ ham classification - prediciton target
# ## Support Vector Machine (SVM)
# Đầu tiên, dữ liệu được chia thành các tập con huấn luyện (phù hợp) và thử nghiệm, mô hình SVM được tạo và huấn luyện, và cuối cùng được kiểm tra khả năng xác định chính xác thư rác trong dữ liệu không nhìn thấy.
# Quy trình này được lặp lại và ghi lại để tính giá trị trung bình và S.D. về độ chính xác của dự đoán.
# Record time to measure model runtime
start_time = time.time()
# List to log accuracy of N runs
accuracy_log = []
N = 40
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Create and train SVM model
model = svm.SVC()
model.fit(features, y_train)
# print('SVM Model fitted')
features_test = cv.transform(X_test)
accuracy_log.append(model.score(features_test, y_test))
# Log SVM model runtime
execution_time_svm = time.time() - start_time
# Calculate meand and SD of prediction accuracy for SVM model
svm_mean = np.mean(accuracy_log)
svm_sd = np.std(accuracy_log)
print("\n")
print(accuracy_log)
print("\n")
print("Data split for SVM train and test {} times.".format(N))
print("\n")
print("Execution time in seconds: " + str(round(execution_time_svm, 1)))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(svm_mean, svm_sd))
# #### Questions on SVM:
# * Tôi có thể xem các vectơ hỗ trợ để trợ giúp về khả năng giải thích, ngữ cảnh hóa và trực quan hóa mô hình không**
# * Lề - cứng/mềm - cách xác định
# ## Naive Bayes
# Trình phân loại Naive Bayes là một mô hình phân loại ML theo xác suất đơn giản. Dựa trên phân loại thống kê bằng Định lý Bayes, nó giả định không có sự phụ thuộc giữa các đặc điểm được sử dụng trong dự đoán và hoạt động tốt nhất khi điều này đúng.
# ### Mutlinomial Naive Bayes (MNB)
# The MNB is often used to classify and categorise text according to word frequency, and is suited to classifying email as spam.
start_time = time.time() # Record time to measure model runtime
mnb_log = [] # log accuracy of each split
N = 40 # number splits
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Fit MNB model
model_mnb = naive_bayes.MultinomialNB()
model_mnb.fit(features, y_train)
# Test model
features_test = cv.transform(X_test)
mnb_log.append(model_mnb.score(features_test, y_test))
# Log MNB model runtime
execution_time_mnb = time.time() - start_time
mnb_mean = np.mean(mnb_log)
mnb_sd = np.std(mnb_log)
print("\n")
print("Data split for multinomial NB train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(mnb_mean, mnb_sd))
print("\n")
print("Execution time in seconds: " + str(round(execution_time_mnb, 1)))
# ### Complement Naive Bayes (CNB)
# CNB là một biến thể của mô hình MNB đặc biệt phù hợp với các tập dữ liệu mất cân bằng
start_time = time.time() # Record time to measure model runtime
N = 40 # Number of cycles
cnb_log = [] # log accuracy of N cycles
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Fit model
model_cnb = naive_bayes.ComplementNB()
model_cnb.fit(features, y_train)
# Test model
features_test = cv.transform(X_test)
cnb_log.append(model_cnb.score(features_test, y_test))
# Log CNB model runtime
execution_time_cnb = time.time() - start_time
cnb_mean = np.mean(cnb_log)
cnb_sd = np.std(cnb_log)
print("\n")
print("Data split for complement NB train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(cnb_mean, cnb_sd))
print("\n")
print("CNB execution time in seconds: " + str(round(execution_time_cnb, 1)))
# ## Random Forest
# Trình phân loại RF là một mô hình ML được giám sát khác phù hợp với phân loại văn bản dựa trên tần suất xuất hiện của từ trong văn bản.
start_time = time.time() # Record time to measure model runtime
N = 40 # Number cycles
rf_log = [] # Log accuracy N cycles
for i in range(N):
# As before, split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# create and train Random Forest Model
forest_model = ensemble.RandomForestClassifier(random_state=1)
forest_model.fit(features, y_train) # Sample weight ???
# Test model
features_test = cv.transform(X_test)
rf_log.append(forest_model.score(features_test, y_test))
# Log RFmodel runtime
execution_time_rf = time.time() - start_time
rf_mean = np.mean(rf_log)
rf_sd = np.std(rf_log)
print("\n")
print("Data split for Random Forest train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(rf_mean, rf_sd))
print("\n")
print("RF execution time in seconds: " + str(round(execution_time_rf, 1)))
# ## Compare model performance - accuracy and runtime
# Accuracy mean and SD for SVM, MNB, CNB and RF collected into table and plotted.
accuracy_data = {
"Model": [
"Support Vector Machine",
"Multinomial Naive Bayes",
"Complement Naive Bayes",
"Random Forest",
],
"Mean Accuracy": [svm_mean, mnb_mean, cnb_mean, rf_mean],
"SD": [svm_sd, mnb_sd, cnb_sd, rf_sd],
"Runtime (s)": [
execution_time_svm / N,
execution_time_mnb / N,
execution_time_cnb / N,
execution_time_rf / N,
],
}
accuracy_df = pd.DataFrame(data=accuracy_data)
print("\n")
print("Comparison of the accuracy mean and SD for 100 cycles split-train-test")
print("\n")
accuracy_df.round(3)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/141/129141207.ipynb
|
spam-email-data
|
ksenia5
|
[{"Id": 129141207, "ScriptId": 38386288, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14987036, "CreationDate": "05/11/2023 09:50:45", "VersionNumber": 2.0, "Title": "Comparison of supervised ML spam detection", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 327.0, "LinesInsertedFromPrevious": 26.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 301.0, "LinesInsertedFromFork": 26.0, "LinesDeletedFromFork": 30.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 301.0, "TotalVotes": 0}]
|
[{"Id": 184928810, "KernelVersionId": 129141207, "SourceDatasetVersionId": 4294141}]
|
[{"Id": 4294141, "DatasetId": 2530254, "DatasourceVersionId": 4352058, "CreatorUserId": 11724440, "LicenseName": "Unknown", "CreationDate": "10/07/2022 10:48:58", "VersionNumber": 1.0, "Title": "spamEmailData", "Slug": "spam-email-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2530254, "CreatorUserId": 11724440, "OwnerUserId": 11724440.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4294141.0, "CurrentDatasourceVersionId": 4352058.0, "ForumId": 2558999, "Type": 2, "CreationDate": "10/07/2022 10:48:58", "LastActivityDate": "10/07/2022", "TotalViews": 168, "TotalDownloads": 12, "TotalVotes": 5, "TotalKernels": 2}]
|
[{"Id": 11724440, "UserName": "ksenia5", "DisplayName": "Ksenia-5", "RegisterDate": "09/26/2022", "PerformanceTier": 2}]
|
# #Máy học được giám sát để phát hiện thư rác
# Ba mô hình ML để phân loại dữ liệu văn bản email là thư rác:
# 1. **Support Vector Machine (SVM)**
# 1. **Naive Bayes - Multinomial and Complement**
# 1. **Random Forest**
# Những mô hình này đã được chọn vì chúng tốt cho việc phân loại dữ liệu văn bản.
# Dữ liệu email spam được chia thành các tập hợp con đào tạo và kiểm tra theo tỷ lệ 0,75 đến 0,25.
# Đối với mỗi mô hình, chu trình phân tách, huấn luyện và kiểm tra được lặp lại 100 lần, ghi lại độ chính xác và báo cáo giá trị trung bình và độ lệch chuẩn (S.D.) của nó.
# #### **Libraries**
# First, importing the libraries.
import os
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import time
import matplotlib as mpl
import matplotlib.pyplot as plt
import wordcloud
# ML learning model selection, metrics, feature extraction
from sklearn import (
ensemble,
feature_extraction,
model_selection,
naive_bayes,
metrics,
svm,
)
# To see available files
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# #### **Lấy và làm sạch dữ liệu**
# Tiếp theo, đọc dữ liệu, xem xét các biến và xóa các cột không mong muốn.
# Read into dataframe
file_path = "../input/spam-email-data/spam.csv"
data = pd.read_csv(file_path)
# Drop extra columns; use data.columns to see column names
# data = data.drop(data.columns[2:], axis=1)
data.head(n=8) # Look at data, equivalent to data.iloc[:n]
# print(data.shape)
# #### **Exploring data**
# Tiếp theo, kiểm tra tỷ lệ email spam và không phải spam trong tập dữ liệu.
# Calculate spam ratio, to understand proportion of dataset that is spam:
spam_count = sum(True for entry in data.v1 if entry == "spam")
ham_count = sum(True for entry in data.v1 if entry == "ham")
spam_f = spam_count / len(data.v1)
ham_f = ham_count / len(data.v1)
print("Total count is {} \n".format(spam_count + ham_count))
print(
"Spam fraction = {} / {} = {:.3f} \n\nHam fraction = {} / {} = {:.3f} \n\n".format(
spam_count, len(data.v1), spam_f, ham_count, len(data.v1), ham_f
)
)
# Plot pie chart of spam and ham
colors = [(0.1, 0.9, 0.9, 0.7), (0.9, 0.2, 0.2, 0.7)]
plt.pie(
[ham_count, spam_count],
labels=["non-spam: {:.3f}".format(ham_f), "spam: {:.3f}".format(spam_f)],
colors=colors,
)
plt.title("Proportion of email that is spam")
plt.show
# Examine content of columns
# Check for Na/NaN
# Check for duplicates
# ### Generate word clouds
# See common words in spam / non-spam emails
# Select only spam emails and make one string with all spam contents
spam_only = data.v2[data.v1 == "spam"]
spam_text = " ".join(email for email in spam_only)
# similarly for non-spam
ham_only = data.v2[data.v1 == "ham"]
ham_text = " ".join(email for email in ham_only)
# spam_text[:1000]
# ham_text[:1000]
# Create word clouds for spam and ham texts using the .generate() method from wordcloud.WordCloud
spam_cloud = wordcloud.WordCloud(
collocations=False, background_color="white", width=2048, height=1080
).generate(spam_text)
ham_cloud = wordcloud.WordCloud(
collocations=False, background_color="white", width=2048, height=1080
).generate(ham_text)
# saving the image
spam_cloud.to_file("got_spam.png")
ham_cloud.to_file("got_ham.png")
# figure size in inches optional
mpl.rcParams["figure.figsize"] = 18, 6
# display two word clouds side by side
fig, ax = plt.subplots(1, 2, constrained_layout=False, sharey=True)
ax[0].set_title("Spam \n", fontsize=16)
ax[0].imshow(spam_cloud)
ax[0].axis("off") # turn off axes
# ax[0].set_xlabel('Spam \n',fontsize=16)
ax[1].set_title("Non-spam \n", fontsize=16)
ax[1].imshow(ham_cloud)
ax[1].axis("off")
fig.suptitle("Common words in spam and non-spam emails", fontsize=20)
# ### Đặt các tính năng dự đoán và mục tiêu
# * Tính năng dự đoán - nội dung email - biến độc lập
# * Mục tiêu dự đoán - thư rác hoặc nhãn ham - biến phụ thuộc
#
# Set prediction target and feature variable
X = data.v2 # Email text is the feature used in prediction
y = data.v1 # Spam/ ham classification - prediciton target
# ## Support Vector Machine (SVM)
# Đầu tiên, dữ liệu được chia thành các tập con huấn luyện (phù hợp) và thử nghiệm, mô hình SVM được tạo và huấn luyện, và cuối cùng được kiểm tra khả năng xác định chính xác thư rác trong dữ liệu không nhìn thấy.
# Quy trình này được lặp lại và ghi lại để tính giá trị trung bình và S.D. về độ chính xác của dự đoán.
# Record time to measure model runtime
start_time = time.time()
# List to log accuracy of N runs
accuracy_log = []
N = 40
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Create and train SVM model
model = svm.SVC()
model.fit(features, y_train)
# print('SVM Model fitted')
features_test = cv.transform(X_test)
accuracy_log.append(model.score(features_test, y_test))
# Log SVM model runtime
execution_time_svm = time.time() - start_time
# Calculate meand and SD of prediction accuracy for SVM model
svm_mean = np.mean(accuracy_log)
svm_sd = np.std(accuracy_log)
print("\n")
print(accuracy_log)
print("\n")
print("Data split for SVM train and test {} times.".format(N))
print("\n")
print("Execution time in seconds: " + str(round(execution_time_svm, 1)))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(svm_mean, svm_sd))
# #### Questions on SVM:
# * Tôi có thể xem các vectơ hỗ trợ để trợ giúp về khả năng giải thích, ngữ cảnh hóa và trực quan hóa mô hình không**
# * Lề - cứng/mềm - cách xác định
# ## Naive Bayes
# Trình phân loại Naive Bayes là một mô hình phân loại ML theo xác suất đơn giản. Dựa trên phân loại thống kê bằng Định lý Bayes, nó giả định không có sự phụ thuộc giữa các đặc điểm được sử dụng trong dự đoán và hoạt động tốt nhất khi điều này đúng.
# ### Mutlinomial Naive Bayes (MNB)
# The MNB is often used to classify and categorise text according to word frequency, and is suited to classifying email as spam.
start_time = time.time() # Record time to measure model runtime
mnb_log = [] # log accuracy of each split
N = 40 # number splits
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Fit MNB model
model_mnb = naive_bayes.MultinomialNB()
model_mnb.fit(features, y_train)
# Test model
features_test = cv.transform(X_test)
mnb_log.append(model_mnb.score(features_test, y_test))
# Log MNB model runtime
execution_time_mnb = time.time() - start_time
mnb_mean = np.mean(mnb_log)
mnb_sd = np.std(mnb_log)
print("\n")
print("Data split for multinomial NB train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(mnb_mean, mnb_sd))
print("\n")
print("Execution time in seconds: " + str(round(execution_time_mnb, 1)))
# ### Complement Naive Bayes (CNB)
# CNB là một biến thể của mô hình MNB đặc biệt phù hợp với các tập dữ liệu mất cân bằng
start_time = time.time() # Record time to measure model runtime
N = 40 # Number of cycles
cnb_log = [] # log accuracy of N cycles
for i in range(N):
# Split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# Fit model
model_cnb = naive_bayes.ComplementNB()
model_cnb.fit(features, y_train)
# Test model
features_test = cv.transform(X_test)
cnb_log.append(model_cnb.score(features_test, y_test))
# Log CNB model runtime
execution_time_cnb = time.time() - start_time
cnb_mean = np.mean(cnb_log)
cnb_sd = np.std(cnb_log)
print("\n")
print("Data split for complement NB train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(cnb_mean, cnb_sd))
print("\n")
print("CNB execution time in seconds: " + str(round(execution_time_cnb, 1)))
# ## Random Forest
# Trình phân loại RF là một mô hình ML được giám sát khác phù hợp với phân loại văn bản dựa trên tần suất xuất hiện của từ trong văn bản.
start_time = time.time() # Record time to measure model runtime
N = 40 # Number cycles
rf_log = [] # Log accuracy N cycles
for i in range(N):
# As before, split data into train and test sets
X_train, X_test, y_train, y_test = model_selection.train_test_split(
X, y, test_size=0.25
)
# Feature extraction
cv = feature_extraction.text.CountVectorizer()
features = cv.fit_transform(X_train)
# create and train Random Forest Model
forest_model = ensemble.RandomForestClassifier(random_state=1)
forest_model.fit(features, y_train) # Sample weight ???
# Test model
features_test = cv.transform(X_test)
rf_log.append(forest_model.score(features_test, y_test))
# Log RFmodel runtime
execution_time_rf = time.time() - start_time
rf_mean = np.mean(rf_log)
rf_sd = np.std(rf_log)
print("\n")
print("Data split for Random Forest train and test {} times.".format(N))
print("\n")
print("Accuracy mean and S.D are {:.3f} and {:.3f} ".format(rf_mean, rf_sd))
print("\n")
print("RF execution time in seconds: " + str(round(execution_time_rf, 1)))
# ## Compare model performance - accuracy and runtime
# Accuracy mean and SD for SVM, MNB, CNB and RF collected into table and plotted.
accuracy_data = {
"Model": [
"Support Vector Machine",
"Multinomial Naive Bayes",
"Complement Naive Bayes",
"Random Forest",
],
"Mean Accuracy": [svm_mean, mnb_mean, cnb_mean, rf_mean],
"SD": [svm_sd, mnb_sd, cnb_sd, rf_sd],
"Runtime (s)": [
execution_time_svm / N,
execution_time_mnb / N,
execution_time_cnb / N,
execution_time_rf / N,
],
}
accuracy_df = pd.DataFrame(data=accuracy_data)
print("\n")
print("Comparison of the accuracy mean and SD for 100 cycles split-train-test")
print("\n")
accuracy_df.round(3)
| false | 0 | 3,758 | 0 | 3,779 | 3,758 |
||
129141881
|
<jupyter_start><jupyter_text>AirIndia Monthly Passenger Traffic
This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes.
**1. Month:** This column refers to the month in which the data was recorded.
**2. DEPARTURES:** The number of flights that departed during the month in question.
**3. HOURS:** Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet.
**4. KILOMETRE(TH):** Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance.
**5. PASSENGERS CARRIED:** Number of passengers carried by the airline during a given month.
**6. PASSENGER KMS.PERFORMED(TH):** Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance.
**7. AVAILABLE SEAT KILOMETRE(TH):** Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization.
**8. PAX.LOAD FACTOR (IN %)**: Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes.
Kaggle dataset identifier: airindia-monthly-passenger-traffic
<jupyter_script># About Dataset
# This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes.
# 1. Month: This column refers to the month in which the data was recorded.
# 2. DEPARTURES: The number of flights that departed during the month in question.
# 3. HOURS: Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet.
# 4. KILOMETRE(TH): Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance.
# 5. PASSENGERS CARRIED: Number of passengers carried by the airline during a given month.
# 6. PASSENGER KMS.PERFORMED(TH): Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance.
# 7. AVAILABLE SEAT KILOMETRE(TH): Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization.
# 8. PAX.LOAD FACTOR (IN %): Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes.
# Importing Lib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from scipy.stats import skew
# Reading Data
df = pd.read_csv("AirIndia (Domestic).csv")
# Head of data
df.head()
# INFO
df.info()
# Checking Null Values
df.isnull().sum()
# Replacing Null Values with ( 0 )
df.fillna(0, inplace=True)
# If there is any Null values left after replacing null values with 0
#
df.isnull().sum()
# Unique
for i in df.columns:
print(i, "---->", df[i].unique())
# Pair plot to view linear relation between values
sns.pairplot(data=df)
# Shape
#
df.shape
# Correlation between Values
df.corr()
# Columns
#
df.columns
# Change columns name
df.rename(columns={"DEPARTURES\n": "DEPARTURES"}, inplace=True)
df.rename(columns={"HOURS\n": "HOURS"}, inplace=True)
df.rename(columns={"KILOMETER\n(TH)": "KILOMETER_TH"}, inplace=True)
df.rename(columns={"PASSENGERS CARRIED\n": "PASSENGERS_CARRIED"}, inplace=True)
df.rename(
columns={"AVAILABLE SEAT KILOMETRE\n(TH)": "AVAILABLE_SEAT_KILOMETRE_TH"},
inplace=True,
)
df.rename(columns={" PAX. LOAD FACTOR#\n(IN %)": "PAX_LOAD_FACTOR_ %"}, inplace=True)
df.rename(columns={"Month": "MONTHS"}, inplace=True)
df.rename(
columns={"PASSENGER KMS. PERFORMED\n(TH)": "MONTHPASSENGER_KMS_PERFORMED_TH"},
inplace=True,
)
df.head()
# Cat_cols , Num_cols
cat_cols = df.columns[df.dtypes == object]
num_cols = df.select_dtypes(include=np.number).columns.tolist()
print("The categorical columns are", cat_cols)
print("The numerical columns are", num_cols)
# EDA
for col in df.select_dtypes(include=np.number).columns.tolist():
print(col)
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
sns.distplot(a=df[col], bins=20, color="green", hist_kws={"edgecolor": "black"})
plt.ylabel("count")
plt.subplot(1, 2, 2)
sns.boxplot(x=df[col], color="pink")
plt.show()
# as we replaced nan value with (0) therefor 0 should not to be in data as 0 is non reasonable cause flight still there entry is zero doesn't make sense right?
df[df["DEPARTURES"] < 100]
# Correlation
#
df.corr()
# Heat Map
plt.figure(figsize=(15, 15))
sns.heatmap(df.drop(["MONTHS", "FY"], axis=1).corr(), annot=True, vmin=-1, vmax=1)
plt.show()
# Month Kilomerter
month_kilometer = (
df.groupby(["MONTHS"])["KILOMETER_TH"].mean().reset_index().sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="KILOMETER_TH", data=df)
plt.show()
# Flight Carried Month
carried_months = (
df.groupby(["MONTHS"])["PASSENGERS_CARRIED"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="PASSENGERS_CARRIED", data=df)
plt.show()
# Month Passenger Km
month_passenger_kms = (
df.groupby(["MONTHS"])["MONTHPASSENGER_KMS_PERFORMED_TH"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="MONTHPASSENGER_KMS_PERFORMED_TH", data=df)
plt.show()
# Available Seats Kilometer
AVAILABLE_SEAT_KILOMETRE_TH_MONTHS = (
df.groupby(["MONTHS"])["AVAILABLE_SEAT_KILOMETRE_TH"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="AVAILABLE_SEAT_KILOMETRE_TH", data=df)
plt.show()
# HOURS VS MONTHS
HOURS_MONTHS = (
df.groupby(["MONTHS"])["HOURS"].mean().reset_index().sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="HOURS", data=df)
plt.show()
num_cols = [
"DEPARTURES",
"HOURS",
"KILOMETER_TH",
"PASSENGERS_CARRIED",
"MONTHPASSENGER_KMS_PERFORMED_TH",
"AVAILABLE_SEAT_KILOMETRE_TH",
]
for column in num_cols:
print(column)
print(df[column][pd.to_numeric(df[column], errors="coerce").isnull()])
# Scatter Plot
def plot_scatter(df, cols, col_y="PAX_LOAD_FACTOR_ %"):
for col in cols:
fig = plt.figure(figsize=(12, 6)) # define plot area
ax = fig.gca() # define axis
df.plot.scatter(x=col, y=col_y, ax=ax)
ax.set_title(
"Scatter plot of " + col_y + " vs. " + col
) # Give the plot a main title
ax.set_xlabel(col) # Set text for the x axis
ax.set_ylabel(col_y) # Set text for y axis
plt.show()
num_cols = [
"DEPARTURES",
"HOURS",
"KILOMETER_TH",
"PASSENGERS_CARRIED",
"MONTHPASSENGER_KMS_PERFORMED_TH",
"AVAILABLE_SEAT_KILOMETRE_TH",
]
plot_scatter(df, num_cols)
def plot_desity_2d(df, cols, col_y="PAX_LOAD_FACTOR_ %", kind="kde"):
for col in cols:
sns.set_style("whitegrid")
sns.jointplot(col, col_y, data=df, kind=kind)
plt.xlabel(col) # Set text for the x axis
plt.ylabel(col_y) # Set text for y axis
plt.show()
plot_desity_2d(df, num_cols)
FY = df["FY"].value_counts()
plt.figure(figsize=(8, 8))
plt.pie(
FY, labels=df["FY"].value_counts().head(10).index, autopct="%0.0f%%", shadow=True
)
plt.show()
# ['DEPARTURES', 'HOURS', 'KILOMETER_TH', 'PASSENGERS_CARRIED', 'MONTHPASSENGER_KMS_PERFORMED_TH', 'AVAILABLE_SEAT_KILOMETRE_TH']
sns.barplot(data=df, x="FY", y="DEPARTURES")
plt.show()
sns.barplot(data=df, x="FY", y="HOURS")
plt.show()
sns.barplot(data=df, x="FY", y="KILOMETER_TH")
plt.show()
sns.barplot(data=df, x="FY", y="PASSENGERS_CARRIED")
plt.show()
sns.barplot(data=df, x="FY", y="MONTHPASSENGER_KMS_PERFORMED_TH")
plt.show()
sns.barplot(data=df, x="FY", y="AVAILABLE_SEAT_KILOMETRE_TH")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/141/129141881.ipynb
|
airindia-monthly-passenger-traffic
|
nishantbhardwaj07
|
[{"Id": 129141881, "ScriptId": 38392714, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10864913, "CreationDate": "05/11/2023 09:57:03", "VersionNumber": 1.0, "Title": "AirIndia(Domestic)", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 205.0, "LinesInsertedFromPrevious": 205.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 184930000, "KernelVersionId": 129141881, "SourceDatasetVersionId": 5467490}]
|
[{"Id": 5467490, "DatasetId": 3156392, "DatasourceVersionId": 5541690, "CreatorUserId": 9217065, "LicenseName": "Other (specified in description)", "CreationDate": "04/20/2023 11:26:18", "VersionNumber": 7.0, "Title": "AirIndia Monthly Passenger Traffic", "Slug": "airindia-monthly-passenger-traffic", "Subtitle": "Monthly Traffic & Operating Data of AirIndia for past 10 Financial Years FY14-23", "Description": "This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes.\n\n**1. Month:** This column refers to the month in which the data was recorded.\n\n**2. DEPARTURES:** The number of flights that departed during the month in question.\n\n**3. HOURS:** Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet.\n\n**4. KILOMETRE(TH):** Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance.\n\n**5. PASSENGERS CARRIED:** Number of passengers carried by the airline during a given month.\n\n**6. PASSENGER KMS.PERFORMED(TH):** Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance.\n\n**7. AVAILABLE SEAT KILOMETRE(TH):** Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization.\n\n**8. PAX.LOAD FACTOR (IN %)**: Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes.", "VersionNotes": "Data Update 2023-04-20", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3156392, "CreatorUserId": 9217065, "OwnerUserId": 9217065.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5467490.0, "CurrentDatasourceVersionId": 5541690.0, "ForumId": 3220424, "Type": 2, "CreationDate": "04/20/2023 04:40:15", "LastActivityDate": "04/20/2023", "TotalViews": 8752, "TotalDownloads": 1694, "TotalVotes": 38, "TotalKernels": 8}]
|
[{"Id": 9217065, "UserName": "nishantbhardwaj07", "DisplayName": "NishantBhardwaj07", "RegisterDate": "12/20/2021", "PerformanceTier": 1}]
|
# About Dataset
# This dataset is about airline operations and performance. The data is quantitative and numerical in nature. It can be analyzed and used to derive insights on the airline's performance, capacity utilization, revenue generation, and efficiency. This type of data is commonly used in the airline industry for performance analysis, benchmarking, and decision-making purposes.
# 1. Month: This column refers to the month in which the data was recorded.
# 2. DEPARTURES: The number of flights that departed during the month in question.
# 3. HOURS: Hours flown by the airline during the month in question. This can be used to track the airline's utilization of its fleet.
# 4. KILOMETRE(TH): Kilometers flown by the airline during the month, measured in thousands. This can be used to track the airline's overall operational performance.
# 5. PASSENGERS CARRIED: Number of passengers carried by the airline during a given month.
# 6. PASSENGER KMS.PERFORMED(TH): Passenger kilometers performed by the airline during the month, measured in thousands. This can be used to track the airline's revenue performance.
# 7. AVAILABLE SEAT KILOMETRE(TH): Seat kilometers available on the airline's flights during the month, measured in thousands. This can be used to track the airline's capacity utilization.
# 8. PAX.LOAD FACTOR (IN %): Percentage of available seats that were actually occupied by passengers during the month in question. This is a key metric for airlines, as it indicates how effectively they are filling their planes.
# Importing Lib
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
from scipy.stats import skew
# Reading Data
df = pd.read_csv("AirIndia (Domestic).csv")
# Head of data
df.head()
# INFO
df.info()
# Checking Null Values
df.isnull().sum()
# Replacing Null Values with ( 0 )
df.fillna(0, inplace=True)
# If there is any Null values left after replacing null values with 0
#
df.isnull().sum()
# Unique
for i in df.columns:
print(i, "---->", df[i].unique())
# Pair plot to view linear relation between values
sns.pairplot(data=df)
# Shape
#
df.shape
# Correlation between Values
df.corr()
# Columns
#
df.columns
# Change columns name
df.rename(columns={"DEPARTURES\n": "DEPARTURES"}, inplace=True)
df.rename(columns={"HOURS\n": "HOURS"}, inplace=True)
df.rename(columns={"KILOMETER\n(TH)": "KILOMETER_TH"}, inplace=True)
df.rename(columns={"PASSENGERS CARRIED\n": "PASSENGERS_CARRIED"}, inplace=True)
df.rename(
columns={"AVAILABLE SEAT KILOMETRE\n(TH)": "AVAILABLE_SEAT_KILOMETRE_TH"},
inplace=True,
)
df.rename(columns={" PAX. LOAD FACTOR#\n(IN %)": "PAX_LOAD_FACTOR_ %"}, inplace=True)
df.rename(columns={"Month": "MONTHS"}, inplace=True)
df.rename(
columns={"PASSENGER KMS. PERFORMED\n(TH)": "MONTHPASSENGER_KMS_PERFORMED_TH"},
inplace=True,
)
df.head()
# Cat_cols , Num_cols
cat_cols = df.columns[df.dtypes == object]
num_cols = df.select_dtypes(include=np.number).columns.tolist()
print("The categorical columns are", cat_cols)
print("The numerical columns are", num_cols)
# EDA
for col in df.select_dtypes(include=np.number).columns.tolist():
print(col)
plt.figure(figsize=(15, 4))
plt.subplot(1, 2, 1)
sns.distplot(a=df[col], bins=20, color="green", hist_kws={"edgecolor": "black"})
plt.ylabel("count")
plt.subplot(1, 2, 2)
sns.boxplot(x=df[col], color="pink")
plt.show()
# as we replaced nan value with (0) therefor 0 should not to be in data as 0 is non reasonable cause flight still there entry is zero doesn't make sense right?
df[df["DEPARTURES"] < 100]
# Correlation
#
df.corr()
# Heat Map
plt.figure(figsize=(15, 15))
sns.heatmap(df.drop(["MONTHS", "FY"], axis=1).corr(), annot=True, vmin=-1, vmax=1)
plt.show()
# Month Kilomerter
month_kilometer = (
df.groupby(["MONTHS"])["KILOMETER_TH"].mean().reset_index().sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="KILOMETER_TH", data=df)
plt.show()
# Flight Carried Month
carried_months = (
df.groupby(["MONTHS"])["PASSENGERS_CARRIED"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="PASSENGERS_CARRIED", data=df)
plt.show()
# Month Passenger Km
month_passenger_kms = (
df.groupby(["MONTHS"])["MONTHPASSENGER_KMS_PERFORMED_TH"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="MONTHPASSENGER_KMS_PERFORMED_TH", data=df)
plt.show()
# Available Seats Kilometer
AVAILABLE_SEAT_KILOMETRE_TH_MONTHS = (
df.groupby(["MONTHS"])["AVAILABLE_SEAT_KILOMETRE_TH"]
.mean()
.reset_index()
.sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="AVAILABLE_SEAT_KILOMETRE_TH", data=df)
plt.show()
# HOURS VS MONTHS
HOURS_MONTHS = (
df.groupby(["MONTHS"])["HOURS"].mean().reset_index().sort_values("MONTHS")
)
sns.barplot(x="MONTHS", y="HOURS", data=df)
plt.show()
num_cols = [
"DEPARTURES",
"HOURS",
"KILOMETER_TH",
"PASSENGERS_CARRIED",
"MONTHPASSENGER_KMS_PERFORMED_TH",
"AVAILABLE_SEAT_KILOMETRE_TH",
]
for column in num_cols:
print(column)
print(df[column][pd.to_numeric(df[column], errors="coerce").isnull()])
# Scatter Plot
def plot_scatter(df, cols, col_y="PAX_LOAD_FACTOR_ %"):
for col in cols:
fig = plt.figure(figsize=(12, 6)) # define plot area
ax = fig.gca() # define axis
df.plot.scatter(x=col, y=col_y, ax=ax)
ax.set_title(
"Scatter plot of " + col_y + " vs. " + col
) # Give the plot a main title
ax.set_xlabel(col) # Set text for the x axis
ax.set_ylabel(col_y) # Set text for y axis
plt.show()
num_cols = [
"DEPARTURES",
"HOURS",
"KILOMETER_TH",
"PASSENGERS_CARRIED",
"MONTHPASSENGER_KMS_PERFORMED_TH",
"AVAILABLE_SEAT_KILOMETRE_TH",
]
plot_scatter(df, num_cols)
def plot_desity_2d(df, cols, col_y="PAX_LOAD_FACTOR_ %", kind="kde"):
for col in cols:
sns.set_style("whitegrid")
sns.jointplot(col, col_y, data=df, kind=kind)
plt.xlabel(col) # Set text for the x axis
plt.ylabel(col_y) # Set text for y axis
plt.show()
plot_desity_2d(df, num_cols)
FY = df["FY"].value_counts()
plt.figure(figsize=(8, 8))
plt.pie(
FY, labels=df["FY"].value_counts().head(10).index, autopct="%0.0f%%", shadow=True
)
plt.show()
# ['DEPARTURES', 'HOURS', 'KILOMETER_TH', 'PASSENGERS_CARRIED', 'MONTHPASSENGER_KMS_PERFORMED_TH', 'AVAILABLE_SEAT_KILOMETRE_TH']
sns.barplot(data=df, x="FY", y="DEPARTURES")
plt.show()
sns.barplot(data=df, x="FY", y="HOURS")
plt.show()
sns.barplot(data=df, x="FY", y="KILOMETER_TH")
plt.show()
sns.barplot(data=df, x="FY", y="PASSENGERS_CARRIED")
plt.show()
sns.barplot(data=df, x="FY", y="MONTHPASSENGER_KMS_PERFORMED_TH")
plt.show()
sns.barplot(data=df, x="FY", y="AVAILABLE_SEAT_KILOMETRE_TH")
plt.show()
| false | 0 | 2,400 | 2 | 2,818 | 2,400 |
||
129141030
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script>import pandas as pd
# **Creating Series From list**
l1 = ["adsgfgfasas", "ffdadd", "fdfasd", "faddads", "tgsdewez"]
pd.Series(l1)
l2 = [12, 32, 45, 23, 2, 3, 6, 3]
pd.Series(l2)
# **Creating Series from Dictionary**
d1 = {"bengluru": 242563433, "Delhi": 65632744, "Mumbai": 65486132}
pd.Series(d1)
# **Customized Index**
l4 = [23, 34, 32, 21]
l5 = ["a", "b", "c", "d"]
pd.Series(l4, index=l5)
# **Creating Series from read csv method**
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
data
help(pd.read_csv)
l6 = pd.read_csv(
"/kaggle/input/videogamesales/vgsales.csv", usecols=["Name"], squeeze=True
)
l6
# **Attributes of Series**
a = {"bengluru": 242563433, "Delhi": 65632744, "Mumbai": 65486132, "Kolkata": 1326648}
s4 = pd.Series(a, name="Population")
s4
s4.values
s4.index
s4.dtype
s4.is_unique
s4.ndim
s4.shape
s4.size
s4.name
s5 = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv", usecols=["Global_Sales"])
s5
s5.sum()
len(s5)
s5 = pd.Series([5, -1, 1, 2, 3])
s5
s5.prod()
s5.abs()
s5.idxmax()
s5.max()
len(s5)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/141/129141030.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129141030, "ScriptId": 38292809, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14585695, "CreationDate": "05/11/2023 09:49:20", "VersionNumber": 1.0, "Title": "Pandas", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184928421, "KernelVersionId": 129141030, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
import pandas as pd
# **Creating Series From list**
l1 = ["adsgfgfasas", "ffdadd", "fdfasd", "faddads", "tgsdewez"]
pd.Series(l1)
l2 = [12, 32, 45, 23, 2, 3, 6, 3]
pd.Series(l2)
# **Creating Series from Dictionary**
d1 = {"bengluru": 242563433, "Delhi": 65632744, "Mumbai": 65486132}
pd.Series(d1)
# **Customized Index**
l4 = [23, 34, 32, 21]
l5 = ["a", "b", "c", "d"]
pd.Series(l4, index=l5)
# **Creating Series from read csv method**
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
data
help(pd.read_csv)
l6 = pd.read_csv(
"/kaggle/input/videogamesales/vgsales.csv", usecols=["Name"], squeeze=True
)
l6
# **Attributes of Series**
a = {"bengluru": 242563433, "Delhi": 65632744, "Mumbai": 65486132, "Kolkata": 1326648}
s4 = pd.Series(a, name="Population")
s4
s4.values
s4.index
s4.dtype
s4.is_unique
s4.ndim
s4.shape
s4.size
s4.name
s5 = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv", usecols=["Global_Sales"])
s5
s5.sum()
len(s5)
s5 = pd.Series([5, -1, 1, 2, 3])
s5
s5.prod()
s5.abs()
s5.idxmax()
s5.max()
len(s5)
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 539 | 0 | 1,653 | 539 |
129708233
|
<jupyter_start><jupyter_text>cyclistic_data_2020_2022
Kaggle dataset identifier: cyclistic-data-2020-2022
<jupyter_script># # Importing libraries
import pandas as pd
import numpy as np
from pathlib import Path
# # Creating a list with all tripdata files containing information from April, 2020 to April, 2022.
from pathlib import Path
path = Path("/kaggle/input/cyclistic-data-2020-2022")
tripdata_files = sorted(path.glob("*")) # list with all csv files
# # Concatenating all the files in one dataframe
dtypes = {
"ride_id": str,
"rideable_type": str,
"start_station_id": str,
"end_station_id": str,
"member_casual": str,
} # assigning correct data types to some columns before reading the dataframe
all_tripdata = pd.concat(
[pd.read_csv(file, dtype=dtypes) for file in tripdata_files], ignore_index=True
)
all_tripdata.head()
all_tripdata.info()
# # Cleaning dataset
# Changing date columns type to datetime type
all_tripdata["started_at"] = pd.to_datetime(all_tripdata.started_at)
all_tripdata["ended_at"] = pd.to_datetime(all_tripdata.ended_at)
# Checking for missing values
all_tripdata.isnull().sum()
# ## Analzying NaN values in the end_lat and end_lng columns
# - There are 9771 of NaN values in these columns, which only represents 0.10% of the dataset records.
# But before deleting these rows, I will see if I can match any of these station coordinates with a station name or id.
all_tripdata.loc[
all_tripdata.end_lat.isnull()
& all_tripdata.end_lng.isnull()
& (all_tripdata.end_station_name.notnull() | all_tripdata.end_station_id.notnull()),
:,
]
# - There was no matching end station name nor id. Thefore, these records will be deleted.
all_tripdata.dropna(
axis="index", how="all", subset=["end_lat", "end_lng"], inplace=True
)
# Continuing to check for missing values
all_tripdata.isnull().sum()
# ## Analyzing NaN values in the start_station_name and start_station_id columns
# - start_station_name and start_station_id have 938438 and 939061 NaN values, respectively.
# - The number of start_station_id is different from the number of start_station name, which means that some start station name are filled and its id is missing, or vice-versa.
# ### Finding out the start station with missing ids
start_station_missing_id_df = all_tripdata.loc[
(all_tripdata.start_station_id.isnull())
& (all_tripdata.start_station_name.notnull()),
["start_station_name", "start_station_id"],
]
start_station_missing_id_df
station_names_with_missing_id = start_station_missing_id_df.start_station_name.unique()
print(
f'There are \033[1m{len(station_names_with_missing_id)}\033[0m "start stations" with missing id:\n'
)
n = 0
for station in station_names_with_missing_id:
print(f"- {station}")
print(f"\n\033[1mA total of {start_station_missing_id_df.shape[0]} records.\033[0m")
# ### Searching in the all_tripdata if these start stations can be matched with an id
start_station_filled_id = all_tripdata.loc[
(all_tripdata.start_station_name.isin(station_names_with_missing_id))
& (all_tripdata.start_station_id.notnull()),
["start_station_name", "start_station_id"],
].drop_duplicates()
start_station_filled_id
# - 4 out of the 5 start stations with missing id matched one id in the all_tripdata dataframe.
# - The id of station hubbard_test_lws was not found, and cannot be retrieved.
# Let's recover the other 4.
# ### Recovering start station ids
# creating a dataframe containing all the records with recoverable start station ids
recoverable_start_station_df = start_station_missing_id_df[
start_station_missing_id_df["start_station_name"].isin(
start_station_filled_id["start_station_name"]
)
]
recoverable_start_station_df
recoverable_start_station_df.shape
# - 623 records can be recovered out of the 626 rows with stations missing ids.
# - This means that only 3 records (from hubbard_test_lws station) still have missing ids.
# Replace NaN values in start_station_id based on matching start_station_name
all_tripdata["start_station_id"] = all_tripdata["start_station_id"].fillna(
all_tripdata["start_station_name"].map(
start_station_filled_id.set_index("start_station_name")["start_station_id"]
)
)
all_tripdata.isnull().sum()
# ### Finding out the missing start station names
start_station_missing_name_df = all_tripdata.loc[
(all_tripdata.start_station_name.isnull())
& (all_tripdata.start_station_id.notnull()),
["start_station_name", "start_station_id"],
]
start_station_missing_name_df
ids_with_missing_start_station = start_station_missing_name_df.start_station_id.unique()
print(
f"There are \033[1m{len(ids_with_missing_start_station)}\033[0m ids with missing start station names:\n"
)
n = 0
for id in ids_with_missing_start_station:
print(f"- {id}")
print(f"\n\033[1mA total of {start_station_missing_name_df.shape[0]} records.\033[0m")
# ### Trying to match with start_station_name in the all_tripdata
start_station_filled_name = all_tripdata.loc[
(all_tripdata.start_station_id.isin(start_station_missing_name_df.start_station_id))
& (all_tripdata.start_station_name.notnull()),
["start_station_name", "start_station_id"],
].drop_duplicates()
start_station_filled_name
# - All 3 ids matched a station name, so they can be retrieved.
# Replace NaN values in start_station_id based on matching start_station_name
all_tripdata["start_station_name"] = all_tripdata["start_station_name"].fillna(
all_tripdata["start_station_id"].map(
start_station_filled_name.set_index("start_station_id")["start_station_name"]
)
)
all_tripdata.isnull().sum()
# ## Analyzing NaN values in the end_station_name and end_station_id columns
# ### Repeating the previous procedure
# filtering records with end_station_id NaN values
end_station_missing_id_df = all_tripdata.loc[
(all_tripdata.end_station_id.isnull()) & (all_tripdata.end_station_name.notnull()),
["end_station_name", "end_station_id"],
]
end_station_missing_id_df
end_station_missing_ids = end_station_missing_id_df.end_station_name.unique()
print(
f'There are \033[1m{len(end_station_missing_ids)}\033[0m "end stations" with missing ids:\n'
)
n = 0
for stations in end_station_missing_ids:
print(f"- {stations}")
print(f"\n\033[1mA total of {end_station_missing_id_df.shape[0]} records.\033[0m")
# ### Searching in the all_tripdata if these end stations can be matched with an id
# checking which stations can be recovered
end_station_filled_id = all_tripdata.loc[
(all_tripdata.end_station_name.isin(end_station_missing_ids))
& (all_tripdata.end_station_id.notnull()),
["end_station_name", "end_station_id"],
].drop_duplicates()
end_station_filled_id
# - 2 out of the 3 start stations with missing id matched one id in the all_tripdata dataframe.
# - The same as before, the id of station hubbard_test_lws was not found, and cannot be retrieved.
# Let's recover the other 2.
# Creating a dataframe with recoverable end stations
recoverable_end_station_df = end_station_missing_id_df[
end_station_missing_id_df["end_station_name"].isin(
end_station_names_filled_id["end_station_name"]
)
]
recoverable_end_station_df
recoverable_end_station_df.shape
# - 459 records can be recovered out of the 461 rows with stations missing ids.
# - This means that only 2 records (from hubbard_test_lws station) still have missing ids.
# Replace NaN values in end_station_id based on matching end_station_name
all_tripdata["end_station_id"] = all_tripdata["end_station_id"].fillna(
all_tripdata["end_station_name"].map(
end_station_filled_id.set_index("end_station_name")["end_station_id"]
)
)
all_tripdata.isnull().sum()
# - The remaining NaN values in start_station_name, start_station_id, end_station_name and end_station_id cannot be retrieved because they don't match any id or name, so they will be deleted.
all_tripdata.dropna(
axis="index",
how="any",
subset=[
"start_station_name",
"start_station_id",
"end_station_name",
"end_station_id",
],
inplace=True,
)
all_tripdata.isnull().sum()
# ## Checking for duplicated records
all_tripdata.duplicated().sum()
# - There are no duplicated entries in the dataset.
# ## Checking the rideable_type column items
all_tripdata.rideable_type.unique()
# ## Checking the member_casual column items
all_tripdata.member_casual.unique()
# ## The dataset is clean and it can be imported to Tableau.
all_tripdata.to_csv(r"all_tripdata_new.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/708/129708233.ipynb
|
cyclistic-data-2020-2022
|
pedrosourem
|
[{"Id": 129708233, "ScriptId": 27083008, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9512322, "CreationDate": "05/15/2023 23:36:58", "VersionNumber": 15.0, "Title": "cyclistic_case_study", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 204.0, "LinesInsertedFromPrevious": 43.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 161.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186041485, "KernelVersionId": 129708233, "SourceDatasetVersionId": 4778647}]
|
[{"Id": 4778647, "DatasetId": 2765874, "DatasourceVersionId": 4841997, "CreatorUserId": 9512322, "LicenseName": "Unknown", "CreationDate": "12/27/2022 16:52:11", "VersionNumber": 1.0, "Title": "cyclistic_data_2020_2022", "Slug": "cyclistic-data-2020-2022", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2765874, "CreatorUserId": 9512322, "OwnerUserId": 9512322.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4778647.0, "CurrentDatasourceVersionId": 4841997.0, "ForumId": 2799758, "Type": 2, "CreationDate": "12/27/2022 16:52:11", "LastActivityDate": "12/27/2022", "TotalViews": 111, "TotalDownloads": 1, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 9512322, "UserName": "pedrosourem", "DisplayName": "pedrosourem", "RegisterDate": "01/27/2022", "PerformanceTier": 0}]
|
# # Importing libraries
import pandas as pd
import numpy as np
from pathlib import Path
# # Creating a list with all tripdata files containing information from April, 2020 to April, 2022.
from pathlib import Path
path = Path("/kaggle/input/cyclistic-data-2020-2022")
tripdata_files = sorted(path.glob("*")) # list with all csv files
# # Concatenating all the files in one dataframe
dtypes = {
"ride_id": str,
"rideable_type": str,
"start_station_id": str,
"end_station_id": str,
"member_casual": str,
} # assigning correct data types to some columns before reading the dataframe
all_tripdata = pd.concat(
[pd.read_csv(file, dtype=dtypes) for file in tripdata_files], ignore_index=True
)
all_tripdata.head()
all_tripdata.info()
# # Cleaning dataset
# Changing date columns type to datetime type
all_tripdata["started_at"] = pd.to_datetime(all_tripdata.started_at)
all_tripdata["ended_at"] = pd.to_datetime(all_tripdata.ended_at)
# Checking for missing values
all_tripdata.isnull().sum()
# ## Analzying NaN values in the end_lat and end_lng columns
# - There are 9771 of NaN values in these columns, which only represents 0.10% of the dataset records.
# But before deleting these rows, I will see if I can match any of these station coordinates with a station name or id.
all_tripdata.loc[
all_tripdata.end_lat.isnull()
& all_tripdata.end_lng.isnull()
& (all_tripdata.end_station_name.notnull() | all_tripdata.end_station_id.notnull()),
:,
]
# - There was no matching end station name nor id. Thefore, these records will be deleted.
all_tripdata.dropna(
axis="index", how="all", subset=["end_lat", "end_lng"], inplace=True
)
# Continuing to check for missing values
all_tripdata.isnull().sum()
# ## Analyzing NaN values in the start_station_name and start_station_id columns
# - start_station_name and start_station_id have 938438 and 939061 NaN values, respectively.
# - The number of start_station_id is different from the number of start_station name, which means that some start station name are filled and its id is missing, or vice-versa.
# ### Finding out the start station with missing ids
start_station_missing_id_df = all_tripdata.loc[
(all_tripdata.start_station_id.isnull())
& (all_tripdata.start_station_name.notnull()),
["start_station_name", "start_station_id"],
]
start_station_missing_id_df
station_names_with_missing_id = start_station_missing_id_df.start_station_name.unique()
print(
f'There are \033[1m{len(station_names_with_missing_id)}\033[0m "start stations" with missing id:\n'
)
n = 0
for station in station_names_with_missing_id:
print(f"- {station}")
print(f"\n\033[1mA total of {start_station_missing_id_df.shape[0]} records.\033[0m")
# ### Searching in the all_tripdata if these start stations can be matched with an id
start_station_filled_id = all_tripdata.loc[
(all_tripdata.start_station_name.isin(station_names_with_missing_id))
& (all_tripdata.start_station_id.notnull()),
["start_station_name", "start_station_id"],
].drop_duplicates()
start_station_filled_id
# - 4 out of the 5 start stations with missing id matched one id in the all_tripdata dataframe.
# - The id of station hubbard_test_lws was not found, and cannot be retrieved.
# Let's recover the other 4.
# ### Recovering start station ids
# creating a dataframe containing all the records with recoverable start station ids
recoverable_start_station_df = start_station_missing_id_df[
start_station_missing_id_df["start_station_name"].isin(
start_station_filled_id["start_station_name"]
)
]
recoverable_start_station_df
recoverable_start_station_df.shape
# - 623 records can be recovered out of the 626 rows with stations missing ids.
# - This means that only 3 records (from hubbard_test_lws station) still have missing ids.
# Replace NaN values in start_station_id based on matching start_station_name
all_tripdata["start_station_id"] = all_tripdata["start_station_id"].fillna(
all_tripdata["start_station_name"].map(
start_station_filled_id.set_index("start_station_name")["start_station_id"]
)
)
all_tripdata.isnull().sum()
# ### Finding out the missing start station names
start_station_missing_name_df = all_tripdata.loc[
(all_tripdata.start_station_name.isnull())
& (all_tripdata.start_station_id.notnull()),
["start_station_name", "start_station_id"],
]
start_station_missing_name_df
ids_with_missing_start_station = start_station_missing_name_df.start_station_id.unique()
print(
f"There are \033[1m{len(ids_with_missing_start_station)}\033[0m ids with missing start station names:\n"
)
n = 0
for id in ids_with_missing_start_station:
print(f"- {id}")
print(f"\n\033[1mA total of {start_station_missing_name_df.shape[0]} records.\033[0m")
# ### Trying to match with start_station_name in the all_tripdata
start_station_filled_name = all_tripdata.loc[
(all_tripdata.start_station_id.isin(start_station_missing_name_df.start_station_id))
& (all_tripdata.start_station_name.notnull()),
["start_station_name", "start_station_id"],
].drop_duplicates()
start_station_filled_name
# - All 3 ids matched a station name, so they can be retrieved.
# Replace NaN values in start_station_id based on matching start_station_name
all_tripdata["start_station_name"] = all_tripdata["start_station_name"].fillna(
all_tripdata["start_station_id"].map(
start_station_filled_name.set_index("start_station_id")["start_station_name"]
)
)
all_tripdata.isnull().sum()
# ## Analyzing NaN values in the end_station_name and end_station_id columns
# ### Repeating the previous procedure
# filtering records with end_station_id NaN values
end_station_missing_id_df = all_tripdata.loc[
(all_tripdata.end_station_id.isnull()) & (all_tripdata.end_station_name.notnull()),
["end_station_name", "end_station_id"],
]
end_station_missing_id_df
end_station_missing_ids = end_station_missing_id_df.end_station_name.unique()
print(
f'There are \033[1m{len(end_station_missing_ids)}\033[0m "end stations" with missing ids:\n'
)
n = 0
for stations in end_station_missing_ids:
print(f"- {stations}")
print(f"\n\033[1mA total of {end_station_missing_id_df.shape[0]} records.\033[0m")
# ### Searching in the all_tripdata if these end stations can be matched with an id
# checking which stations can be recovered
end_station_filled_id = all_tripdata.loc[
(all_tripdata.end_station_name.isin(end_station_missing_ids))
& (all_tripdata.end_station_id.notnull()),
["end_station_name", "end_station_id"],
].drop_duplicates()
end_station_filled_id
# - 2 out of the 3 start stations with missing id matched one id in the all_tripdata dataframe.
# - The same as before, the id of station hubbard_test_lws was not found, and cannot be retrieved.
# Let's recover the other 2.
# Creating a dataframe with recoverable end stations
recoverable_end_station_df = end_station_missing_id_df[
end_station_missing_id_df["end_station_name"].isin(
end_station_names_filled_id["end_station_name"]
)
]
recoverable_end_station_df
recoverable_end_station_df.shape
# - 459 records can be recovered out of the 461 rows with stations missing ids.
# - This means that only 2 records (from hubbard_test_lws station) still have missing ids.
# Replace NaN values in end_station_id based on matching end_station_name
all_tripdata["end_station_id"] = all_tripdata["end_station_id"].fillna(
all_tripdata["end_station_name"].map(
end_station_filled_id.set_index("end_station_name")["end_station_id"]
)
)
all_tripdata.isnull().sum()
# - The remaining NaN values in start_station_name, start_station_id, end_station_name and end_station_id cannot be retrieved because they don't match any id or name, so they will be deleted.
all_tripdata.dropna(
axis="index",
how="any",
subset=[
"start_station_name",
"start_station_id",
"end_station_name",
"end_station_id",
],
inplace=True,
)
all_tripdata.isnull().sum()
# ## Checking for duplicated records
all_tripdata.duplicated().sum()
# - There are no duplicated entries in the dataset.
# ## Checking the rideable_type column items
all_tripdata.rideable_type.unique()
# ## Checking the member_casual column items
all_tripdata.member_casual.unique()
# ## The dataset is clean and it can be imported to Tableau.
all_tripdata.to_csv(r"all_tripdata_new.csv", index=False)
| false | 0 | 2,681 | 0 | 2,725 | 2,681 |
||
129708706
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.utils import np_utils
# Normalize CIFAR-10 dataset using PyTorch
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.202, 0.199, 0.201]),
]
)
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform
)
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform
)
# Load CIFAR-10 dataset
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=128, shuffle=False, num_workers=2
)
# Build model
model = Sequential(
[
Convolution2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Convolution2D(filters=64, kernel_size=(3, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Convolution2D(filters=128, kernel_size=(3, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(1000),
Activation("relu"),
Dropout(0.5),
Dense(10),
Activation("softmax"),
]
)
# Compile model
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
import numpy as np
# Convert PyTorch data loaders to NumPy arrays
train_images = np.concatenate([batch[0].numpy() for batch in trainloader])
train_labels = np.concatenate([batch[1].numpy() for batch in trainloader])
test_images = np.concatenate([batch[0].numpy() for batch in testloader])
test_labels = np.concatenate([batch[1].numpy() for batch in testloader])
# Reshape the image arrays to match Keras format
train_images = train_images.reshape((-1, 32, 32, 3))
test_images = test_images.reshape((-1, 32, 32, 3))
# Convert labels to categorical format
num_classes = 10
train_labels = np_utils.to_categorical(train_labels, num_classes)
test_labels = np_utils.to_categorical(test_labels, num_classes)
# Train the model
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels),
verbose=1,
)
# Evaluate model on test data
# Convert PyTorch test data to NumPy arrays
test_images = np.concatenate([batch[0].numpy() for batch in testloader])
test_labels = np.concatenate([batch[1].numpy() for batch in testloader])
# Reshape the image array to match Keras format
test_images = test_images.reshape((-1, 32, 32, 3))
# Convert labels to categorical format
num_classes = 10
test_labels = np_utils.to_categorical(test_labels, num_classes)
# Evaluate the model on test data
score = model.evaluate(test_images, test_labels, verbose=1)
print("Test accuracy:", score[1])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/708/129708706.ipynb
| null | null |
[{"Id": 129708706, "ScriptId": 38572479, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11649449, "CreationDate": "05/15/2023 23:44:43", "VersionNumber": 1.0, "Title": "notebookf079eb624d", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 111.0, "LinesInsertedFromPrevious": 111.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import torch
import torch
import torchvision
import torchvision.transforms as transforms
import numpy as np
import keras
from keras.datasets import cifar10
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.layers import Convolution2D, MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dropout
from keras.utils import np_utils
# Normalize CIFAR-10 dataset using PyTorch
transform = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize(mean=[0.491, 0.482, 0.447], std=[0.202, 0.199, 0.201]),
]
)
trainset = torchvision.datasets.CIFAR10(
root="./data", train=True, download=True, transform=transform
)
testset = torchvision.datasets.CIFAR10(
root="./data", train=False, download=True, transform=transform
)
# Load CIFAR-10 dataset
trainloader = torch.utils.data.DataLoader(
trainset, batch_size=128, shuffle=True, num_workers=2
)
testloader = torch.utils.data.DataLoader(
testset, batch_size=128, shuffle=False, num_workers=2
)
# Build model
model = Sequential(
[
Convolution2D(filters=32, kernel_size=(3, 3), input_shape=(32, 32, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Convolution2D(filters=64, kernel_size=(3, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Convolution2D(filters=128, kernel_size=(3, 3)),
Activation("relu"),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(1000),
Activation("relu"),
Dropout(0.5),
Dense(10),
Activation("softmax"),
]
)
# Compile model
model.compile(loss="categorical_crossentropy", optimizer="adam", metrics=["accuracy"])
import numpy as np
# Convert PyTorch data loaders to NumPy arrays
train_images = np.concatenate([batch[0].numpy() for batch in trainloader])
train_labels = np.concatenate([batch[1].numpy() for batch in trainloader])
test_images = np.concatenate([batch[0].numpy() for batch in testloader])
test_labels = np.concatenate([batch[1].numpy() for batch in testloader])
# Reshape the image arrays to match Keras format
train_images = train_images.reshape((-1, 32, 32, 3))
test_images = test_images.reshape((-1, 32, 32, 3))
# Convert labels to categorical format
num_classes = 10
train_labels = np_utils.to_categorical(train_labels, num_classes)
test_labels = np_utils.to_categorical(test_labels, num_classes)
# Train the model
model.fit(
train_images,
train_labels,
epochs=1,
validation_data=(test_images, test_labels),
verbose=1,
)
# Evaluate model on test data
# Convert PyTorch test data to NumPy arrays
test_images = np.concatenate([batch[0].numpy() for batch in testloader])
test_labels = np.concatenate([batch[1].numpy() for batch in testloader])
# Reshape the image array to match Keras format
test_images = test_images.reshape((-1, 32, 32, 3))
# Convert labels to categorical format
num_classes = 10
test_labels = np_utils.to_categorical(test_labels, num_classes)
# Evaluate the model on test data
score = model.evaluate(test_images, test_labels, verbose=1)
print("Test accuracy:", score[1])
| false | 0 | 1,131 | 0 | 1,131 | 1,131 |
||
129708822
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold, SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.metrics import mean_absolute_error
import scipy.stats as stats
import pylab
import lightgbm as lgb
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
submit = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
train.head()
train.info()
plt.figure(figsize=(10, 6))
sns.kdeplot(data=train["yield"])
plt.title("Yield Distribution")
plt.show()
def normality(data, feature):
"""
Takes a dataset and a feature. It will check for
normality in the distribution
"""
plt.figure(figsize=(10, 5))
plt.subplot(1, 3, 1)
sns.boxplot(data=data, y=feature)
plt.title("Outliers")
plt.subplot(1, 3, 2)
sns.kdeplot(data[feature])
plt.title("KDE plot")
plt.subplot(1, 3, 3)
stats.probplot(data[feature], plot=pylab)
plt.title("Distribution per quantiles")
plt.suptitle(f"{feature}")
plt.tight_layout()
plt.show()
for i in range(len(train.columns[1:-1])):
normality(train, train.columns[1:-1][i])
# ## Drop outliers
train = train.drop(train[train["honeybee"] > 2.5].index)
train = train.drop(train[train["osmia"] < 0.2].index)
train = train.drop(train[train["andrena"] < 0.1].index)
train = train.drop(train[train["bumbles"] < 0.03].index)
train = train.drop(train[train["fruitset"] < 0.3].index)
# train = train.drop(train[train["RainingDays"] < 5].index)
# train = train.drop(train[train["AverageRainingDays"] < 0.09].index)
# ## Correlation (heatmap)
corr = train.drop("id", axis=1).corr().round(2)
matrix = np.triu(corr)
plt.figure(figsize=(10, 7))
sns.heatmap(corr, annot=True, mask=matrix)
plt.show()
to_drop = [
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"AverageRainingDays",
]
X = train.drop(["id", "yield", *to_drop], axis=1)
y = train["yield"]
X_test = test.drop(["id", *to_drop], axis=1)
# ## Feature Importance
rfr = SelectFromModel(
RandomForestRegressor(n_estimators=10, random_state=49), threshold=0.001
)
rfr.fit(X, y)
imp = rfr.estimator_.feature_importances_
df = pd.DataFrame(imp, index=X.columns, columns=["Importance"])
fig, ax = plt.subplots(figsize=(10, 7))
sorted_idx = imp.argsort()
ax.barh(
df.index[sorted_idx],
df["Importance"][sorted_idx],
height=0.8,
facecolor="grey",
alpha=0.8,
edgecolor="k",
)
ax.set_xlabel("Importance score")
ax.set_title("Permutation feature importance")
plt.gca().invert_yaxis()
plt.tight_layout()
plt.show()
to_drop = list(X.columns[~(rfr.get_support())])
print(f"Suggest to drop: {to_drop}")
# X = X.drop(to_drop,axis=1)
# X_test = X_test.drop(to_drop,axis=1)
X.head()
# ## Scale data
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_test = scaler.transform(X_test)
# ## Split data
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, random_state=49
)
# ## Define a model
model = RandomForestRegressor()
params = {
"criterion": ["absolute_error"],
"n_estimators": [100],
"max_depth": [12],
"random_state": [49],
}
grid = GridSearchCV(
estimator=model,
param_grid=params,
cv=3,
scoring="neg_mean_absolute_error",
verbose=2,
)
grid.fit(X_train, y_train)
best_params = grid.best_params_
best_model = grid.best_estimator_
print(f"Best parameters: {best_params}")
# model = lgb.LGBMRegressor()
# params = {
# "n_estimators": [100],
# "max_depth": [-1,2,3,5,10],
# "num_leaves": [4,6,8,10],
# "learning_rate": [0.09,0.1,0.2],
# "objective": ["regression"],
# "random_state": [49]
# }
# grid = GridSearchCV(estimator=model,param_grid=params,cv=5,scoring="neg_mean_absolute_error",verbose=1,n_jobs=-1)
# grid.fit(X_train,y_train)
# best_params = grid.best_params_
# best_model = grid.best_estimator_
# print(f"Best parameters: {best_params}")
y_pred = best_model.predict(X_valid)
mae = mean_absolute_error(y_valid, y_pred)
print(f"MAE: {mae}")
# ## Final model
final_model = model.set_params(**best_params).fit(X, y)
results = final_model.predict(X_test)
submit["yield"] = results
plt.figure(figsize=(10, 7))
sns.kdeplot(data=train["yield"], label="train")
sns.kdeplot(data=submit["yield"], label="test")
plt.title("train vs test set")
plt.legend()
plt.show()
submit.to_csv("bbg007_submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/708/129708822.ipynb
| null | null |
[{"Id": 129708822, "ScriptId": 38010782, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7958366, "CreationDate": "05/15/2023 23:46:58", "VersionNumber": 10.0, "Title": "BBG_S3E14_Blueberries", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 180.0, "LinesInsertedFromPrevious": 44.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 136.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.feature_selection import VarianceThreshold, SelectFromModel
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import Ridge, Lasso, ElasticNet
from sklearn.preprocessing import StandardScaler, PolynomialFeatures
from sklearn.metrics import mean_absolute_error
import scipy.stats as stats
import pylab
import lightgbm as lgb
import warnings
warnings.filterwarnings("ignore")
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
submit = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
train.head()
train.info()
plt.figure(figsize=(10, 6))
sns.kdeplot(data=train["yield"])
plt.title("Yield Distribution")
plt.show()
def normality(data, feature):
"""
Takes a dataset and a feature. It will check for
normality in the distribution
"""
plt.figure(figsize=(10, 5))
plt.subplot(1, 3, 1)
sns.boxplot(data=data, y=feature)
plt.title("Outliers")
plt.subplot(1, 3, 2)
sns.kdeplot(data[feature])
plt.title("KDE plot")
plt.subplot(1, 3, 3)
stats.probplot(data[feature], plot=pylab)
plt.title("Distribution per quantiles")
plt.suptitle(f"{feature}")
plt.tight_layout()
plt.show()
for i in range(len(train.columns[1:-1])):
normality(train, train.columns[1:-1][i])
# ## Drop outliers
train = train.drop(train[train["honeybee"] > 2.5].index)
train = train.drop(train[train["osmia"] < 0.2].index)
train = train.drop(train[train["andrena"] < 0.1].index)
train = train.drop(train[train["bumbles"] < 0.03].index)
train = train.drop(train[train["fruitset"] < 0.3].index)
# train = train.drop(train[train["RainingDays"] < 5].index)
# train = train.drop(train[train["AverageRainingDays"] < 0.09].index)
# ## Correlation (heatmap)
corr = train.drop("id", axis=1).corr().round(2)
matrix = np.triu(corr)
plt.figure(figsize=(10, 7))
sns.heatmap(corr, annot=True, mask=matrix)
plt.show()
to_drop = [
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"AverageRainingDays",
]
X = train.drop(["id", "yield", *to_drop], axis=1)
y = train["yield"]
X_test = test.drop(["id", *to_drop], axis=1)
# ## Feature Importance
rfr = SelectFromModel(
RandomForestRegressor(n_estimators=10, random_state=49), threshold=0.001
)
rfr.fit(X, y)
imp = rfr.estimator_.feature_importances_
df = pd.DataFrame(imp, index=X.columns, columns=["Importance"])
fig, ax = plt.subplots(figsize=(10, 7))
sorted_idx = imp.argsort()
ax.barh(
df.index[sorted_idx],
df["Importance"][sorted_idx],
height=0.8,
facecolor="grey",
alpha=0.8,
edgecolor="k",
)
ax.set_xlabel("Importance score")
ax.set_title("Permutation feature importance")
plt.gca().invert_yaxis()
plt.tight_layout()
plt.show()
to_drop = list(X.columns[~(rfr.get_support())])
print(f"Suggest to drop: {to_drop}")
# X = X.drop(to_drop,axis=1)
# X_test = X_test.drop(to_drop,axis=1)
X.head()
# ## Scale data
scaler = StandardScaler()
X = scaler.fit_transform(X)
X_test = scaler.transform(X_test)
# ## Split data
X_train, X_valid, y_train, y_valid = train_test_split(
X, y, test_size=0.33, random_state=49
)
# ## Define a model
model = RandomForestRegressor()
params = {
"criterion": ["absolute_error"],
"n_estimators": [100],
"max_depth": [12],
"random_state": [49],
}
grid = GridSearchCV(
estimator=model,
param_grid=params,
cv=3,
scoring="neg_mean_absolute_error",
verbose=2,
)
grid.fit(X_train, y_train)
best_params = grid.best_params_
best_model = grid.best_estimator_
print(f"Best parameters: {best_params}")
# model = lgb.LGBMRegressor()
# params = {
# "n_estimators": [100],
# "max_depth": [-1,2,3,5,10],
# "num_leaves": [4,6,8,10],
# "learning_rate": [0.09,0.1,0.2],
# "objective": ["regression"],
# "random_state": [49]
# }
# grid = GridSearchCV(estimator=model,param_grid=params,cv=5,scoring="neg_mean_absolute_error",verbose=1,n_jobs=-1)
# grid.fit(X_train,y_train)
# best_params = grid.best_params_
# best_model = grid.best_estimator_
# print(f"Best parameters: {best_params}")
y_pred = best_model.predict(X_valid)
mae = mean_absolute_error(y_valid, y_pred)
print(f"MAE: {mae}")
# ## Final model
final_model = model.set_params(**best_params).fit(X, y)
results = final_model.predict(X_test)
submit["yield"] = results
plt.figure(figsize=(10, 7))
sns.kdeplot(data=train["yield"], label="train")
sns.kdeplot(data=submit["yield"], label="test")
plt.title("train vs test set")
plt.legend()
plt.show()
submit.to_csv("bbg007_submission.csv", index=False)
| false | 0 | 1,733 | 1 | 1,733 | 1,733 |
||
129708656
|
<jupyter_start><jupyter_text>Fruits and Vegetables Image Recognition Dataset
### Context
This dataset contains images of the following food items:
* fruits- banana, apple, pear, grapes, orange, kiwi, watermelon, pomegranate, pineapple, mango.
* vegetables- cucumber, carrot, capsicum, onion, potato, lemon, tomato, raddish, beetroot, cabbage, lettuce, spinach, soy bean, cauliflower, bell pepper, chilli pepper, turnip, corn, sweetcorn, sweet potato, paprika, jalepeño, ginger, garlic, peas, eggplant.
### Content
This dataset contains three folders:
* train (100 images each)
* test (10 images each)
* validation (10 images each)
each of the above folders contains subfolders for different fruits and vegetables wherein the images for respective food items are present
### Data Collection
The images in this dataset were scraped by me from Bing Image Search for a project of mine.
### Inspiration
The idea was to build an application which recognizes the food item(s) from the captured photo and gives its user different recipes that can be made using the food item(s).
### Citation
```
Kritik Seth, "Fruits and Vegetables Image Recognition Dataset," Kaggle 2020 [https://www.kaggle.com/kritikseth/fruit-and-vegetable-image-recognition]
```
Kaggle dataset identifier: fruit-and-vegetable-image-recognition
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
training_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/train",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
validation_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/validation",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
import os
import random
import matplotlib.pyplot as plt
import cv2
# Define the path to the dataset directory
dataset_path = "/kaggle/input/fruit-and-vegetable-image-recognition/train"
# Get the list of class names from the directory
class_names = os.listdir(dataset_path)
# Calculate the number of rows and columns for subplots
num_rows = 6
num_cols = (len(class_names) + num_rows - 1) // num_rows
# Create a figure and subplots to display the images
fig, axes = plt.subplots(num_rows, num_cols, figsize=(20, 20))
# Iterate over each class and randomly select one image to display
for i, class_name in enumerate(class_names):
row = i // num_cols
col = i % num_cols
class_dir = os.path.join(dataset_path, class_name)
image_files = os.listdir(class_dir)
random_image = random.choice(image_files)
image_path = os.path.join(class_dir, random_image)
image = cv2.imread(image_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
axes[row, col].imshow(image)
axes[row, col].set_title(class_name)
axes[row, col].axis("off")
# Remove empty subplots if there are any
for i in range(len(class_names), num_rows * num_cols):
row = i // num_cols
col = i % num_cols
fig.delaxes(axes[row, col])
# Adjust spacing between subplots
plt.tight_layout()
plt.show()
# Get the list of class names from the directory
class_names = os.listdir(dataset_path)
# Calculate the number of rows and columns for subplots
num_rows = 6
num_cols = (len(class_names) + num_rows - 1) // num_rows
# Create a figure and subplots to display the images
fig, axes = plt.subplots(num_rows, num_cols, figsize=(20, 20))
# Iterate over each class and randomly select one image to display
for i, class_name in enumerate(class_names):
row = i // num_cols
col = i % num_cols
class_dir = os.path.join(dataset_path, class_name)
image_files = os.listdir(class_dir)
random_image = random.choice(image_files)
image_path = os.path.join(class_dir, random_image)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
axes[row, col].imshow(image)
axes[row, col].set_title(class_name)
axes[row, col].axis("off")
# Remove empty subplots if there are any
for i in range(len(class_names), num_rows * num_cols):
row = i // num_cols
col = i % num_cols
fig.delaxes(axes[row, col])
# Adjust spacing between subplots
plt.tight_layout()
plt.show()
cnn = tf.keras.models.Sequential()
cnn.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=3,
padding="same",
activation="relu",
input_shape=[64, 64, 3],
)
)
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Dropout(0.25))
cnn.add(
tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")
)
cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Dropout(0.25))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(units=512, activation="relu"))
cnn.add(tf.keras.layers.Dense(units=256, activation="relu"))
cnn.add(tf.keras.layers.Dropout(0.5)) # To avoid overfitting
# Output Layer
cnn.add(tf.keras.layers.Dense(units=36, activation="softmax"))
cnn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
cnn.summary()
training_history = cnn.fit(x=training_set, validation_data=validation_set, epochs=32)
# Training set Accuracy
train_loss, train_acc = cnn.evaluate(training_set)
print("Training accuracy:", train_acc)
# Validation set Accuracy
val_loss, val_acc = cnn.evaluate(validation_set)
print("Validation accuracy:", val_acc)
cnn.save("trained_model.h5")
training_history.history # Return Dictionary of history
print(
"Validation set Accuracy: {} %".format(
training_history.history["val_accuracy"][-1] * 100
)
)
epochs = [i for i in range(1, 33)]
plt.plot(epochs, training_history.history["accuracy"], color="red")
plt.xlabel("No. of Epochs")
plt.ylabel("Traiining Accuracy")
plt.title("Visualization of Training Accuracy Result")
plt.show()
plt.plot(epochs, training_history.history["val_accuracy"], color="blue")
plt.xlabel("No. of Epochs")
plt.ylabel("Validation Accuracy")
plt.title("Visualization of Validation Accuracy Result")
plt.show()
test_x = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/test",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
test_loss, test_acc = cnn.evaluate(test_x)
print("Test accuracy:", test_acc)
test_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/test",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
cnn = tf.keras.models.load_model("/kaggle/working/trained_model.h5")
# Test Image Visualization
import cv2
image_path = (
"/kaggle/input/fruit-and-vegetable-image-recognition/test/onion/Image_5.jpg"
)
# Reading an image in default mode
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Converting BGR to RGB
# Displaying the image
plt.imshow(img)
plt.title("Test Image")
plt.xticks([])
plt.yticks([])
plt.show()
image = tf.keras.preprocessing.image.load_img(image_path, target_size=(64, 64))
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = cnn.predict(input_arr)
result_index = np.argmax(predictions) # Return index of max element
print(result_index)
# Displaying the image
plt.imshow(img)
plt.title("Test Image")
plt.xticks([])
plt.yticks([])
plt.show()
# Single image Prediction
print("It's a {}".format(test_set.class_names[result_index]))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/708/129708656.ipynb
|
fruit-and-vegetable-image-recognition
|
kritikseth
|
[{"Id": 129708656, "ScriptId": 38565351, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14899429, "CreationDate": "05/15/2023 23:43:50", "VersionNumber": 2.0, "Title": "VegetableAndFruits", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 273.0, "LinesInsertedFromPrevious": 149.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 124.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 186042077, "KernelVersionId": 129708656, "SourceDatasetVersionId": 3173719}]
|
[{"Id": 3173719, "DatasetId": 952827, "DatasourceVersionId": 3223198, "CreatorUserId": 3302541, "LicenseName": "CC0: Public Domain", "CreationDate": "02/12/2022 06:15:55", "VersionNumber": 8.0, "Title": "Fruits and Vegetables Image Recognition Dataset", "Slug": "fruit-and-vegetable-image-recognition", "Subtitle": "Fruit and Vegetable Images for Object Recognition", "Description": "### Context\n\nThis dataset contains images of the following food items:\n* fruits- banana, apple, pear, grapes, orange, kiwi, watermelon, pomegranate, pineapple, mango.\n* vegetables- cucumber, carrot, capsicum, onion, potato, lemon, tomato, raddish, beetroot, cabbage, lettuce, spinach, soy bean, cauliflower, bell pepper, chilli pepper, turnip, corn, sweetcorn, sweet potato, paprika, jalepe\u00f1o, ginger, garlic, peas, eggplant.\n\n\n### Content\n\nThis dataset contains three folders:\n* train (100 images each)\n* test (10 images each)\n* validation (10 images each)\n\neach of the above folders contains subfolders for different fruits and vegetables wherein the images for respective food items are present\n\n\n### Data Collection\n\nThe images in this dataset were scraped by me from Bing Image Search for a project of mine.\n\n### Inspiration\n\nThe idea was to build an application which recognizes the food item(s) from the captured photo and gives its user different recipes that can be made using the food item(s).\n\n\n### Citation\n```\nKritik Seth, \"Fruits and Vegetables Image Recognition Dataset,\" Kaggle 2020 [https://www.kaggle.com/kritikseth/fruit-and-vegetable-image-recognition]\n```", "VersionNotes": "Image Correction v2", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 952827, "CreatorUserId": 3302541, "OwnerUserId": 3302541.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3173719.0, "CurrentDatasourceVersionId": 3223198.0, "ForumId": 969028, "Type": 2, "CreationDate": "11/03/2020 05:02:28", "LastActivityDate": "11/03/2020", "TotalViews": 132239, "TotalDownloads": 16325, "TotalVotes": 227, "TotalKernels": 81}]
|
[{"Id": 3302541, "UserName": "kritikseth", "DisplayName": "Kritik Seth", "RegisterDate": "06/02/2019", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
import matplotlib.pyplot as plt
training_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/train",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
validation_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/validation",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
import os
import random
import matplotlib.pyplot as plt
import cv2
# Define the path to the dataset directory
dataset_path = "/kaggle/input/fruit-and-vegetable-image-recognition/train"
# Get the list of class names from the directory
class_names = os.listdir(dataset_path)
# Calculate the number of rows and columns for subplots
num_rows = 6
num_cols = (len(class_names) + num_rows - 1) // num_rows
# Create a figure and subplots to display the images
fig, axes = plt.subplots(num_rows, num_cols, figsize=(20, 20))
# Iterate over each class and randomly select one image to display
for i, class_name in enumerate(class_names):
row = i // num_cols
col = i % num_cols
class_dir = os.path.join(dataset_path, class_name)
image_files = os.listdir(class_dir)
random_image = random.choice(image_files)
image_path = os.path.join(class_dir, random_image)
image = cv2.imread(image_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
axes[row, col].imshow(image)
axes[row, col].set_title(class_name)
axes[row, col].axis("off")
# Remove empty subplots if there are any
for i in range(len(class_names), num_rows * num_cols):
row = i // num_cols
col = i % num_cols
fig.delaxes(axes[row, col])
# Adjust spacing between subplots
plt.tight_layout()
plt.show()
# Get the list of class names from the directory
class_names = os.listdir(dataset_path)
# Calculate the number of rows and columns for subplots
num_rows = 6
num_cols = (len(class_names) + num_rows - 1) // num_rows
# Create a figure and subplots to display the images
fig, axes = plt.subplots(num_rows, num_cols, figsize=(20, 20))
# Iterate over each class and randomly select one image to display
for i, class_name in enumerate(class_names):
row = i // num_cols
col = i % num_cols
class_dir = os.path.join(dataset_path, class_name)
image_files = os.listdir(class_dir)
random_image = random.choice(image_files)
image_path = os.path.join(class_dir, random_image)
image = cv2.imread(image_path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
axes[row, col].imshow(image)
axes[row, col].set_title(class_name)
axes[row, col].axis("off")
# Remove empty subplots if there are any
for i in range(len(class_names), num_rows * num_cols):
row = i // num_cols
col = i % num_cols
fig.delaxes(axes[row, col])
# Adjust spacing between subplots
plt.tight_layout()
plt.show()
cnn = tf.keras.models.Sequential()
cnn.add(
tf.keras.layers.Conv2D(
filters=32,
kernel_size=3,
padding="same",
activation="relu",
input_shape=[64, 64, 3],
)
)
cnn.add(tf.keras.layers.Conv2D(filters=32, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Dropout(0.25))
cnn.add(
tf.keras.layers.Conv2D(filters=64, kernel_size=3, padding="same", activation="relu")
)
cnn.add(tf.keras.layers.Conv2D(filters=64, kernel_size=3, activation="relu"))
cnn.add(tf.keras.layers.MaxPool2D(pool_size=2, strides=2))
cnn.add(tf.keras.layers.Dropout(0.25))
cnn.add(tf.keras.layers.Flatten())
cnn.add(tf.keras.layers.Dense(units=512, activation="relu"))
cnn.add(tf.keras.layers.Dense(units=256, activation="relu"))
cnn.add(tf.keras.layers.Dropout(0.5)) # To avoid overfitting
# Output Layer
cnn.add(tf.keras.layers.Dense(units=36, activation="softmax"))
cnn.compile(optimizer="adam", loss="categorical_crossentropy", metrics=["accuracy"])
cnn.summary()
training_history = cnn.fit(x=training_set, validation_data=validation_set, epochs=32)
# Training set Accuracy
train_loss, train_acc = cnn.evaluate(training_set)
print("Training accuracy:", train_acc)
# Validation set Accuracy
val_loss, val_acc = cnn.evaluate(validation_set)
print("Validation accuracy:", val_acc)
cnn.save("trained_model.h5")
training_history.history # Return Dictionary of history
print(
"Validation set Accuracy: {} %".format(
training_history.history["val_accuracy"][-1] * 100
)
)
epochs = [i for i in range(1, 33)]
plt.plot(epochs, training_history.history["accuracy"], color="red")
plt.xlabel("No. of Epochs")
plt.ylabel("Traiining Accuracy")
plt.title("Visualization of Training Accuracy Result")
plt.show()
plt.plot(epochs, training_history.history["val_accuracy"], color="blue")
plt.xlabel("No. of Epochs")
plt.ylabel("Validation Accuracy")
plt.title("Visualization of Validation Accuracy Result")
plt.show()
test_x = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/test",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
test_loss, test_acc = cnn.evaluate(test_x)
print("Test accuracy:", test_acc)
test_set = tf.keras.utils.image_dataset_from_directory(
"/kaggle/input/fruit-and-vegetable-image-recognition/test",
labels="inferred",
label_mode="categorical",
class_names=None,
color_mode="rgb",
batch_size=32,
image_size=(64, 64),
shuffle=True,
seed=None,
validation_split=None,
subset=None,
interpolation="bilinear",
follow_links=False,
crop_to_aspect_ratio=False,
)
cnn = tf.keras.models.load_model("/kaggle/working/trained_model.h5")
# Test Image Visualization
import cv2
image_path = (
"/kaggle/input/fruit-and-vegetable-image-recognition/test/onion/Image_5.jpg"
)
# Reading an image in default mode
img = cv2.imread(image_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Converting BGR to RGB
# Displaying the image
plt.imshow(img)
plt.title("Test Image")
plt.xticks([])
plt.yticks([])
plt.show()
image = tf.keras.preprocessing.image.load_img(image_path, target_size=(64, 64))
input_arr = tf.keras.preprocessing.image.img_to_array(image)
input_arr = np.array([input_arr]) # Convert single image to a batch.
predictions = cnn.predict(input_arr)
result_index = np.argmax(predictions) # Return index of max element
print(result_index)
# Displaying the image
plt.imshow(img)
plt.title("Test Image")
plt.xticks([])
plt.yticks([])
plt.show()
# Single image Prediction
print("It's a {}".format(test_set.class_names[result_index]))
| false | 0 | 2,399 | 5 | 2,793 | 2,399 |
||
129708513
|
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import root_scalar
# Para o projeto de probabilidade foi escolhida a função $$f(x) = - \frac{(\alpha+1)^2 x^\alpha \log(\beta x)}{1-(\alpha + 1)\log(\beta)}$$ onde $\alpha > -1, \beta \in (0,1), x\in (0,1)$.
def densidade(a, b, x):
num = (a + 1) ** 2 * x**a * np.log(b * x)
den = 1 - (a + 1) * np.log(b)
return -num / den
# Podemos gerar um gráfico com diversos parâmetros
x = np.linspace(0.01, 1, 100)
plt.plot(x, densidade(5, 0, x), "r")
plt.plot(x, densidade(1.5, 0, x), "g")
plt.plot(x, densidade(-0.5, 0, x), "b")
plt.plot(x, densidade(3, 0, x), "m")
plt.show()
# A distribuição acumulada tem a seguinte forma:
# $$\mathbb{P}(X \leq x) = \frac{x^{\alpha+1}((\alpha+1)(\log(\beta x))-1)}{(\alpha+1)\log(\beta)-1}$$
def cdf(a, b, x):
num = x ** (a + 1) * ((a + 1) * np.log(b * x) - 1)
den = (a + 1) * np.log(b) - 1
return num / den
# Vamos gerar um gráfico com alguns valores
plt.plot(x, cdf(5, 0.99, x), "r")
plt.plot(x, cdf(1.5, 0.4, x), "g")
plt.plot(x, cdf(-0.5, 0.5, x), "b")
plt.plot(x, cdf(3, 0.99, x), "m")
plt.show()
# Agora, nossa preocupação é a possibilidade de gerar valores aleatórios. Vamos utilizar o método da inversão.
# Define the quantile function computed numerically
def myqf(p, a, b):
f = lambda x: cdf(a, b, x) - p
res = root_scalar(f, bracket=[np.finfo(float).eps, 1 - np.finfo(float).eps])
return res.root
# Generate some uniform random numbers
u = np.random.uniform(size=100000)
# Apply the inverse transform
x = np.array([myqf(p, a=5, b=0.99) for p in u])
# Plot the histogram and density function
plt.hist(x, density=True, bins=50)
plt.plot(
np.linspace(0.01, 1),
densidade(5, 0.99, np.linspace(0.01, 1)),
color="r",
linewidth=2,
)
plt.show()
# Vamos utilizar uma parametrização mais elegante para a função, temos:
# $$f(x) = -\frac{\theta^2 x^{\theta-1}(\log(x)-\gamma)}{1+\theta\gamma}, \theta \in (0, \infty), \gamma \in [0, \infty)$$
# que tem função acumulada dada por
# $$P(X \leq x) = \dfrac{x^\theta\cdot\left({\theta}{\gamma}-{\theta}\ln\left(x\right)+1\right)}{{\theta}{\gamma}+1}$$
def new_density(t, g, x):
num = t**2 * x ** (t - 1) * (np.log(x) - g)
den = 1 + t * g
return -num / den
def new_cdf(t, g, x):
num = x ** (t) * (t * g - t * np.log(x) + 1)
den = 1 + t * g
return num / den
def new_myqf(p, t, g):
f = lambda x: new_cdf(t, g, x) - p
res = root_scalar(f, bracket=[np.finfo(float).eps, 1 - np.finfo(float).eps])
return res.root
def get_samples(n, t, g):
u_1 = np.random.uniform(size=n)
x_1 = np.array([new_myqf(p, t, g) for p in u_1])
return x_1
plt.plot(np.linspace(0.01, 1, 100), new_density(0.5, 0, np.linspace(0.01, 1, 100)), "r")
plt.plot(np.linspace(0.01, 1, 100), new_density(0.1, 0, np.linspace(0.01, 1, 100)), "g")
plt.plot(np.linspace(0.01, 1, 100), new_density(3, 0, np.linspace(0.01, 1, 100)), "b")
plt.plot(np.linspace(0.01, 1, 100), new_density(5, 0, np.linspace(0.01, 1, 100)), "y")
plt.plot(np.linspace(0.01, 1, 100), new_density(15, 0, np.linspace(0.01, 1, 100)), "m")
plt.show()
# Sendo $\gamma$ conhecido, o estimador de máxima verossimilhança proposto para $\theta$ é $$\hat{\theta}_{MLE} = \frac{-(\gamma+\epsilon) \pm \sqrt{(\gamma+\epsilon)^2 - 8\gamma\epsilon}}{2\gamma\epsilon}$$ onde $$\epsilon = \frac{1}{n}\sum_{i=1}^n \log(x_i).$$ Além disso, para encontrar o estimador de $\gamma$, se $\theta$ é conhecido precisamos maximizar
# $$- \frac{\theta n}{\gamma\theta +1} - \sum_{i=1}^n \frac{1}{\gamma - log(x_i)} = 0.$$
# Estimação para $\gamma = 0$, utilizamos $$\hat{\theta} = -\frac{2n}{\sum_{k=1}^n \log(x_k)}$$
def theta_estim_gamma(theta, gamma, n):
eps = np.sum(np.log(get_samples(n, theta, gamma))) / n
termo_a = gamma * eps
termo_b = gamma + eps
num_1 = -termo_b + np.sqrt((termo_b) ** 2 - 8 * termo_a)
num_2 = -termo_b - np.sqrt((termo_b) ** 2 - 8 * termo_a)
den = 2 * termo_a
return max(num_1 / den, num_2 / den)
theta_estim_gamma(0.6, 5, 1000)
def param_estim_1(theta, gamma, n, iters):
estim_values = []
for i in range(iters):
estim = theta_estim_gamma(theta, gamma, n)
estim_values.append(estim)
estim_array = np.array(estim_values)
MSE = 1 / n * np.sum((theta - estim_array) ** 2)
return MSE
param_estim_1(2, 2, 1000, 1000)
def theta_estim(theta, gamma, n):
num = 2 * n
den = np.sum(np.log(get_samples(n, theta, gamma)))
return -num / den
def param_estim_2(theta, gamma, n, iters):
estim_values = []
for i in range(iters):
estim = theta_estim(theta, gamma, n)
estim_values.append(estim)
estim_array = np.array(estim_values)
MSE = 1 / n * np.sum((theta - estim_array) ** 2)
return MSE
def theta_estim_media(theta, gamma, n):
samples = get_samples(n, theta, gamma)
return np.mean(samples)
theta_estim_media(1, 3, 1000)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/708/129708513.ipynb
| null | null |
[{"Id": 129708513, "ScriptId": 37086218, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9954330, "CreationDate": "05/15/2023 23:40:56", "VersionNumber": 1.0, "Title": "Projeto Probabilidade", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 138.0, "LinesInsertedFromPrevious": 138.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import root_scalar
# Para o projeto de probabilidade foi escolhida a função $$f(x) = - \frac{(\alpha+1)^2 x^\alpha \log(\beta x)}{1-(\alpha + 1)\log(\beta)}$$ onde $\alpha > -1, \beta \in (0,1), x\in (0,1)$.
def densidade(a, b, x):
num = (a + 1) ** 2 * x**a * np.log(b * x)
den = 1 - (a + 1) * np.log(b)
return -num / den
# Podemos gerar um gráfico com diversos parâmetros
x = np.linspace(0.01, 1, 100)
plt.plot(x, densidade(5, 0, x), "r")
plt.plot(x, densidade(1.5, 0, x), "g")
plt.plot(x, densidade(-0.5, 0, x), "b")
plt.plot(x, densidade(3, 0, x), "m")
plt.show()
# A distribuição acumulada tem a seguinte forma:
# $$\mathbb{P}(X \leq x) = \frac{x^{\alpha+1}((\alpha+1)(\log(\beta x))-1)}{(\alpha+1)\log(\beta)-1}$$
def cdf(a, b, x):
num = x ** (a + 1) * ((a + 1) * np.log(b * x) - 1)
den = (a + 1) * np.log(b) - 1
return num / den
# Vamos gerar um gráfico com alguns valores
plt.plot(x, cdf(5, 0.99, x), "r")
plt.plot(x, cdf(1.5, 0.4, x), "g")
plt.plot(x, cdf(-0.5, 0.5, x), "b")
plt.plot(x, cdf(3, 0.99, x), "m")
plt.show()
# Agora, nossa preocupação é a possibilidade de gerar valores aleatórios. Vamos utilizar o método da inversão.
# Define the quantile function computed numerically
def myqf(p, a, b):
f = lambda x: cdf(a, b, x) - p
res = root_scalar(f, bracket=[np.finfo(float).eps, 1 - np.finfo(float).eps])
return res.root
# Generate some uniform random numbers
u = np.random.uniform(size=100000)
# Apply the inverse transform
x = np.array([myqf(p, a=5, b=0.99) for p in u])
# Plot the histogram and density function
plt.hist(x, density=True, bins=50)
plt.plot(
np.linspace(0.01, 1),
densidade(5, 0.99, np.linspace(0.01, 1)),
color="r",
linewidth=2,
)
plt.show()
# Vamos utilizar uma parametrização mais elegante para a função, temos:
# $$f(x) = -\frac{\theta^2 x^{\theta-1}(\log(x)-\gamma)}{1+\theta\gamma}, \theta \in (0, \infty), \gamma \in [0, \infty)$$
# que tem função acumulada dada por
# $$P(X \leq x) = \dfrac{x^\theta\cdot\left({\theta}{\gamma}-{\theta}\ln\left(x\right)+1\right)}{{\theta}{\gamma}+1}$$
def new_density(t, g, x):
num = t**2 * x ** (t - 1) * (np.log(x) - g)
den = 1 + t * g
return -num / den
def new_cdf(t, g, x):
num = x ** (t) * (t * g - t * np.log(x) + 1)
den = 1 + t * g
return num / den
def new_myqf(p, t, g):
f = lambda x: new_cdf(t, g, x) - p
res = root_scalar(f, bracket=[np.finfo(float).eps, 1 - np.finfo(float).eps])
return res.root
def get_samples(n, t, g):
u_1 = np.random.uniform(size=n)
x_1 = np.array([new_myqf(p, t, g) for p in u_1])
return x_1
plt.plot(np.linspace(0.01, 1, 100), new_density(0.5, 0, np.linspace(0.01, 1, 100)), "r")
plt.plot(np.linspace(0.01, 1, 100), new_density(0.1, 0, np.linspace(0.01, 1, 100)), "g")
plt.plot(np.linspace(0.01, 1, 100), new_density(3, 0, np.linspace(0.01, 1, 100)), "b")
plt.plot(np.linspace(0.01, 1, 100), new_density(5, 0, np.linspace(0.01, 1, 100)), "y")
plt.plot(np.linspace(0.01, 1, 100), new_density(15, 0, np.linspace(0.01, 1, 100)), "m")
plt.show()
# Sendo $\gamma$ conhecido, o estimador de máxima verossimilhança proposto para $\theta$ é $$\hat{\theta}_{MLE} = \frac{-(\gamma+\epsilon) \pm \sqrt{(\gamma+\epsilon)^2 - 8\gamma\epsilon}}{2\gamma\epsilon}$$ onde $$\epsilon = \frac{1}{n}\sum_{i=1}^n \log(x_i).$$ Além disso, para encontrar o estimador de $\gamma$, se $\theta$ é conhecido precisamos maximizar
# $$- \frac{\theta n}{\gamma\theta +1} - \sum_{i=1}^n \frac{1}{\gamma - log(x_i)} = 0.$$
# Estimação para $\gamma = 0$, utilizamos $$\hat{\theta} = -\frac{2n}{\sum_{k=1}^n \log(x_k)}$$
def theta_estim_gamma(theta, gamma, n):
eps = np.sum(np.log(get_samples(n, theta, gamma))) / n
termo_a = gamma * eps
termo_b = gamma + eps
num_1 = -termo_b + np.sqrt((termo_b) ** 2 - 8 * termo_a)
num_2 = -termo_b - np.sqrt((termo_b) ** 2 - 8 * termo_a)
den = 2 * termo_a
return max(num_1 / den, num_2 / den)
theta_estim_gamma(0.6, 5, 1000)
def param_estim_1(theta, gamma, n, iters):
estim_values = []
for i in range(iters):
estim = theta_estim_gamma(theta, gamma, n)
estim_values.append(estim)
estim_array = np.array(estim_values)
MSE = 1 / n * np.sum((theta - estim_array) ** 2)
return MSE
param_estim_1(2, 2, 1000, 1000)
def theta_estim(theta, gamma, n):
num = 2 * n
den = np.sum(np.log(get_samples(n, theta, gamma)))
return -num / den
def param_estim_2(theta, gamma, n, iters):
estim_values = []
for i in range(iters):
estim = theta_estim(theta, gamma, n)
estim_values.append(estim)
estim_array = np.array(estim_values)
MSE = 1 / n * np.sum((theta - estim_array) ** 2)
return MSE
def theta_estim_media(theta, gamma, n):
samples = get_samples(n, theta, gamma)
return np.mean(samples)
theta_estim_media(1, 3, 1000)
| false | 0 | 2,133 | 2 | 2,133 | 2,133 |
||
129672798
|
<jupyter_start><jupyter_text>GTZAN Dataset - Music Genre Classification
### Context
Music. Experts have been trying for a long time to understand sound and what differenciates one song from another. How to visualize sound. What makes a tone different from another.
This data hopefully can give the opportunity to do just that.
### Content
* **genres original** - A collection of 10 genres with 100 audio files each, all having a length of 30 seconds (the famous GTZAN dataset, the MNIST of sounds)
* **images original** - A visual representation for each audio file. One way to classify data is through neural networks. Because NNs (like CNN, what we will be using today) usually take in some sort of image representation, the audio files were converted to Mel Spectrograms to make this possible.
* **2 CSV files** - Containing features of the audio files. One file has for each song (30 seconds long) a mean and variance computed over multiple features that can be extracted from an audio file. The other file has the same structure, but the songs were split before into 3 seconds audio files (this way increasing 10 times the amount of data we fuel into our classification models). *With data, more is always better*.
Kaggle dataset identifier: gtzan-dataset-music-genre-classification
<jupyter_script># Usual Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn
# Librosa to handle audio files
import librosa
import librosa.display
import IPython.display as ipd
import warnings
warnings.filterwarnings("ignore")
# the path of file and generals
import os
general_path = "../input/gtzan-dataset-music-genre-classification/Data"
genres = list(os.listdir(f"{general_path}/genres_original/"))
genres
# # Explore Audio Data
# We will use `librosa`, which is the mother of audio files.
# ## Understanding Audio
# Let's first Explore our Audio Data to see how it looks (we'll work with `reggae.00036.wav` file).
# * **Sound**: sequence of vibrations in varying pressure strengths (`y`)
# * The **sample rate** (`sr`) is the number of samples of audio carried per second, measured in Hz or kHz
# Importing 1 file
sound_sequence, sr = librosa.load(
f"{general_path}/genres_original/reggae/reggae.00036.wav"
)
print("sound_sequence:", sound_sequence, "\n")
print("sound_sequence shape:", np.shape(sound_sequence), "\n")
print("Sample Rate (KHz):", sr, "\n")
# Trim leading and trailing silence from an audio signal (silence before and after the actual audio)
audio_file, _ = librosa.effects.trim(sound_sequence)
# the result is an numpy ndarray
print("Audio File:", audio_file, "\n")
print("Audio File shape:", np.shape(audio_file))
# ### 2D Representation: Sound Waves
plt.figure(figsize=(16, 6))
librosa.display.waveshow(y=audio_file, sr=sr, color="#A300F9")
plt.title("Sound Waves in Reggae 36", fontsize=23)
# ## EDA
# EDA is going to be performed on the `features_30_sec.csv`. This file contains the mean and variance for each audio file fo the features analysed above.
# So, the table has a final of 1000 rows (10 genrex x 100 audio files) and 60 features (dimensionalities).
data = pd.read_csv(f"{general_path}/features_30_sec.csv")
data.head()
# ### Correlation Heatmap for feature means
# # Machine Learning Classification
# Using the `features_3_sec.csv` file, we can try to build a classifier that accurately predicts for any new audio file input it's genre.
# ### Libraries
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score, KFold
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout
from keras import regularizers
# ### Reading in the Data
# Now let's try to predict the Genre of the audio using Machine Learning techniques.
data = pd.read_csv(f"{general_path}/features_3_sec.csv")
data = data.iloc[0:, 1:]
data.head()
# ### Features and Target variable
# * creates the target and feature variables
# * normalizes the data
y = data["label"] # genre variable.
X = data.loc[:, data.columns != "label"] # select all columns but not the labels
X = np.asarray(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
# # NN Model (baseline model)
epochs_num = 20
batch_size = 5
baseline_model = Sequential()
baseline_model.add(Dense(120, input_dim=np.shape(X)[1], activation="relu"))
baseline_model.add(Dropout(0.25))
baseline_model.add(
Dense(
64,
kernel_initializer="normal",
kernel_regularizer=regularizers.l2(0.001),
activation="relu",
)
)
baseline_model.add(Dropout(0.25))
baseline_model.add(Dense(len(genres), activation="softmax"))
baseline_model.summary()
# Compile the model
baseline_model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
y_test = [genres.index(y) for y in y_test]
y_test = np.array(y_test)
y_train = [genres.index(y) for y in y_train]
y_train = np.array(y_train)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
history = baseline_model.fit(X_train, y_train, epochs=epochs_num, batch_size=batch_size)
y_pred = baseline_model.predict(X_test)
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
print("NN accuracy : ", (round(accuracy_score(y_test, y_pred), 3)) * 100, "%")
# # KNN model - Cross Validation
knn = KNeighborsClassifier(n_neighbors=len(genres))
num_folds = 5 # number of folds
fold_no = 1
y = np.array(y)
kfold = KFold(n_splits=num_folds, shuffle=True)
for train, test in kfold.split(X, y):
print(f"Fold No.{fold_no}")
knn.fit(X[train], y[train])
preds = knn.predict(X[test])
acc = (round(accuracy_score(y[test], preds), 3)) * 100
print("KNN Accuracy : ", acc, "%")
print("---------------------------")
fold_no = fold_no + 1
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/672/129672798.ipynb
|
gtzan-dataset-music-genre-classification
|
andradaolteanu
|
[{"Id": 129672798, "ScriptId": 38505633, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14041541, "CreationDate": "05/15/2023 16:23:54", "VersionNumber": 1.0, "Title": "notebook48611ab4b4", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 157.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 88.0, "LinesInsertedFromFork": 69.0, "LinesDeletedFromFork": 301.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 88.0, "TotalVotes": 0}]
|
[{"Id": 185986750, "KernelVersionId": 129672798, "SourceDatasetVersionId": 1032238}]
|
[{"Id": 1032238, "DatasetId": 568973, "DatasourceVersionId": 1061257, "CreatorUserId": 3564129, "LicenseName": "Other (specified in description)", "CreationDate": "03/24/2020 14:05:33", "VersionNumber": 1.0, "Title": "GTZAN Dataset - Music Genre Classification", "Slug": "gtzan-dataset-music-genre-classification", "Subtitle": "Audio Files | Mel Spectrograms | CSV with extracted features", "Description": "### Context\n\nMusic. Experts have been trying for a long time to understand sound and what differenciates one song from another. How to visualize sound. What makes a tone different from another.\n\nThis data hopefully can give the opportunity to do just that.\n\n\n### Content\n\n* **genres original** - A collection of 10 genres with 100 audio files each, all having a length of 30 seconds (the famous GTZAN dataset, the MNIST of sounds)\n* **images original** - A visual representation for each audio file. One way to classify data is through neural networks. Because NNs (like CNN, what we will be using today) usually take in some sort of image representation, the audio files were converted to Mel Spectrograms to make this possible.\n* **2 CSV files** - Containing features of the audio files. One file has for each song (30 seconds long) a mean and variance computed over multiple features that can be extracted from an audio file. The other file has the same structure, but the songs were split before into 3 seconds audio files (this way increasing 10 times the amount of data we fuel into our classification models). *With data, more is always better*.\n\n\n### Acknowledgements\n\n* The GTZAN dataset is the most-used public dataset for evaluation in machine listening research for music genre recognition (MGR). The files were collected in 2000-2001 from a variety of sources including personal CDs, radio, microphone recordings, in order to represent a variety of recording conditions (http://marsyas.info/downloads/datasets.html).\n* This was a team project for uni, so the effort in creating the images and features wasn't only my own. So, I want to thank **James Wiltshire, Lauren O'Hare and Minyu Lei** for being the best teammates ever and for having so much fun and learning so much during the 3 days we worked on this.\n\n\n### Inspiration\n\n* what is an audio file?\n* how does an audio file look?\n* can you extract features?\n* can you perform EDA?\n* can you create a super powerful NN on the images?", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 568973, "CreatorUserId": 3564129, "OwnerUserId": 3564129.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1032238.0, "CurrentDatasourceVersionId": 1061257.0, "ForumId": 582697, "Type": 2, "CreationDate": "03/24/2020 14:05:33", "LastActivityDate": "03/24/2020", "TotalViews": 304300, "TotalDownloads": 45527, "TotalVotes": 639, "TotalKernels": 154}]
|
[{"Id": 3564129, "UserName": "andradaolteanu", "DisplayName": "Andrada", "RegisterDate": "08/09/2019", "PerformanceTier": 4}]
|
# Usual Libraries
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import sklearn
# Librosa to handle audio files
import librosa
import librosa.display
import IPython.display as ipd
import warnings
warnings.filterwarnings("ignore")
# the path of file and generals
import os
general_path = "../input/gtzan-dataset-music-genre-classification/Data"
genres = list(os.listdir(f"{general_path}/genres_original/"))
genres
# # Explore Audio Data
# We will use `librosa`, which is the mother of audio files.
# ## Understanding Audio
# Let's first Explore our Audio Data to see how it looks (we'll work with `reggae.00036.wav` file).
# * **Sound**: sequence of vibrations in varying pressure strengths (`y`)
# * The **sample rate** (`sr`) is the number of samples of audio carried per second, measured in Hz or kHz
# Importing 1 file
sound_sequence, sr = librosa.load(
f"{general_path}/genres_original/reggae/reggae.00036.wav"
)
print("sound_sequence:", sound_sequence, "\n")
print("sound_sequence shape:", np.shape(sound_sequence), "\n")
print("Sample Rate (KHz):", sr, "\n")
# Trim leading and trailing silence from an audio signal (silence before and after the actual audio)
audio_file, _ = librosa.effects.trim(sound_sequence)
# the result is an numpy ndarray
print("Audio File:", audio_file, "\n")
print("Audio File shape:", np.shape(audio_file))
# ### 2D Representation: Sound Waves
plt.figure(figsize=(16, 6))
librosa.display.waveshow(y=audio_file, sr=sr, color="#A300F9")
plt.title("Sound Waves in Reggae 36", fontsize=23)
# ## EDA
# EDA is going to be performed on the `features_30_sec.csv`. This file contains the mean and variance for each audio file fo the features analysed above.
# So, the table has a final of 1000 rows (10 genrex x 100 audio files) and 60 features (dimensionalities).
data = pd.read_csv(f"{general_path}/features_30_sec.csv")
data.head()
# ### Correlation Heatmap for feature means
# # Machine Learning Classification
# Using the `features_3_sec.csv` file, we can try to build a classifier that accurately predicts for any new audio file input it's genre.
# ### Libraries
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import RFE
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import cross_val_score, KFold
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout
from keras import regularizers
# ### Reading in the Data
# Now let's try to predict the Genre of the audio using Machine Learning techniques.
data = pd.read_csv(f"{general_path}/features_3_sec.csv")
data = data.iloc[0:, 1:]
data.head()
# ### Features and Target variable
# * creates the target and feature variables
# * normalizes the data
y = data["label"] # genre variable.
X = data.loc[:, data.columns != "label"] # select all columns but not the labels
X = np.asarray(X).astype(np.float32)
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=42
)
scaler = MinMaxScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.fit_transform(X_test)
# # NN Model (baseline model)
epochs_num = 20
batch_size = 5
baseline_model = Sequential()
baseline_model.add(Dense(120, input_dim=np.shape(X)[1], activation="relu"))
baseline_model.add(Dropout(0.25))
baseline_model.add(
Dense(
64,
kernel_initializer="normal",
kernel_regularizer=regularizers.l2(0.001),
activation="relu",
)
)
baseline_model.add(Dropout(0.25))
baseline_model.add(Dense(len(genres), activation="softmax"))
baseline_model.summary()
# Compile the model
baseline_model.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
y_test = [genres.index(y) for y in y_test]
y_test = np.array(y_test)
y_train = [genres.index(y) for y in y_train]
y_train = np.array(y_train)
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
history = baseline_model.fit(X_train, y_train, epochs=epochs_num, batch_size=batch_size)
y_pred = baseline_model.predict(X_test)
y_pred = np.argmax(y_pred, axis=1)
y_test = np.argmax(y_test, axis=1)
print("NN accuracy : ", (round(accuracy_score(y_test, y_pred), 3)) * 100, "%")
# # KNN model - Cross Validation
knn = KNeighborsClassifier(n_neighbors=len(genres))
num_folds = 5 # number of folds
fold_no = 1
y = np.array(y)
kfold = KFold(n_splits=num_folds, shuffle=True)
for train, test in kfold.split(X, y):
print(f"Fold No.{fold_no}")
knn.fit(X[train], y[train])
preds = knn.predict(X[test])
acc = (round(accuracy_score(y[test], preds), 3)) * 100
print("KNN Accuracy : ", acc, "%")
print("---------------------------")
fold_no = fold_no + 1
| false | 0 | 1,594 | 0 | 1,892 | 1,594 |
||
129672608
|
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
diabetes_dataset = pd.read_csv("/kaggle/input/project/datasets_228_482_diabetes.csv")
diabetes_dataset.head()
diabetes_dataset.shape
diabetes_dataset.describe()
diabetes_dataset["Outcome"].value_counts()
Y = diabetes_dataset["Outcome"]
X = diabetes_dataset.drop(columns="Outcome", axis=1)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, stratify=Y, random_state=2
)
print(X.shape, X_train.shape, X_test.shape)
classifier = GaussianNB()
classifier.fit(X_train, Y_train)
X_train_prediction = classifier.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
from sklearn.linear_model import Perceptron
per = Perceptron(tol=1e-3, random_state=0)
per.fit(X_train, Y_train)
X_train_prediction = per.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
from sklearn.neural_network import MLPClassifier
NN = MLPClassifier(hidden_layer_sizes=(256, 128, 64, 32, 16, 8, 4, 2), random_state=1)
NN.fit(X_train, Y_train)
X_train_prediction = NN.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/672/129672608.ipynb
| null | null |
[{"Id": 129672608, "ScriptId": 38308906, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15019151, "CreationDate": "05/15/2023 16:22:12", "VersionNumber": 2.0, "Title": "PROJECTAI", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 59.0, "LinesInsertedFromPrevious": 8.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 51.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
diabetes_dataset = pd.read_csv("/kaggle/input/project/datasets_228_482_diabetes.csv")
diabetes_dataset.head()
diabetes_dataset.shape
diabetes_dataset.describe()
diabetes_dataset["Outcome"].value_counts()
Y = diabetes_dataset["Outcome"]
X = diabetes_dataset.drop(columns="Outcome", axis=1)
X_train, X_test, Y_train, Y_test = train_test_split(
X, Y, test_size=0.2, stratify=Y, random_state=2
)
print(X.shape, X_train.shape, X_test.shape)
classifier = GaussianNB()
classifier.fit(X_train, Y_train)
X_train_prediction = classifier.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
from sklearn.linear_model import Perceptron
per = Perceptron(tol=1e-3, random_state=0)
per.fit(X_train, Y_train)
X_train_prediction = per.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
from sklearn.neural_network import MLPClassifier
NN = MLPClassifier(hidden_layer_sizes=(256, 128, 64, 32, 16, 8, 4, 2), random_state=1)
NN.fit(X_train, Y_train)
X_train_prediction = NN.predict(X_train)
training_data_accuracy = accuracy_score(X_train_prediction, Y_train)
print("Accuracy score of the training data : ", training_data_accuracy)
| false | 0 | 524 | 0 | 524 | 524 |
||
129672950
|
# **Dimensionality Reduction **
# ## **Dimensionality Reduction**
# Dimenionality reduction for me is the process of simplifying complex data by
# reducing the number of features or variables while retaining the important information.
# If I simplyfy it more, I can say it is a means to look at same picture from
# different dimensions, and then select the dimension that gives the perfect view where all the data is there and can be easily identified.
# ---
# Let me explain, this in more simpler terms, through a daily life example.
# Suppose you have a 1000 photographs and you want them to classify them and prepare different sets for each. You are classifying the photographs based on common themes or visual patterns. But there is another way in which you can group on the basis of categories or themes, making it easier to analyze and understand.
# ---
# In a similar manner, dimensionality reduction techniques take a complex dataset with numerous features and transform it into a lower-dimensional representation.
# It simplifies the data, making it more manageable and allowing for better visualization, analysis, and decision-making.
# ---
# In this notebook I am going to cover some techniques for dimensionality reduction with intution behind it:-
# 1. PCA
# 2. LDA
# 3. NMF
# # PCA (Principal Component Analysis)
# I am adding two links which will give you very good intution behind this algorithms.
# 1.https://stats.stackexchange.com/questions/2691/making-sense-of-principal-component-analysis-eigenvectors-eigenvalues/140579#140579
# 2.https://www.youtube.com/watch?v=iRbsBi5W0-c
#
# Before moving forward there are certain terms which I would like to explain certain terms such as Eigen values, Eigen vectors and covariance matrix.
# For that refer:-
# ->https://byjus.com/maths/eigen-values/
# ->https://byjus.com/covariance-matrix-formula/#:~:text=Covariance%20Matrix%20is%20a%20measure,matrix%20and%20variance%2Dcovariance%20matrix.
# # Preparing our own PCA Class
# Steps:-
# 1. Mean Centering
# 2. Find the Covariance Matrix among all the columns
# 3. Find eigen values/eigen Vectors by eigen decomposing covariance matrix
# 4. Now transform all the data points.
import numpy as np
import pandas as pd
np.random.seed(23)
# creating data for group 1
v1 = np.array([0, 0, 0])
cov_mat1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
sample1 = np.random.multivariate_normal(v1, cov_mat1, 20)
df = pd.DataFrame(sample1, columns=["f1", "f2", "f3"])
df["target"] = 1
# creating data for group 2
v2 = np.array([1, 1, 1])
cov_mat2 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
sample1 = np.random.multivariate_normal(v2, cov_mat2, 20)
df1 = pd.DataFrame(sample1, columns=["f1", "f2", "f3"])
df1["target"] = 0
df = df.append(df1, ignore_index=True)
df = df.sample(40)
import plotly.express as px
fig = px.scatter_3d(
df, x=df["f1"], y=df["f2"], z=df["f3"], color=df["target"].astype("str")
)
fig.show()
# step 1: Mean Centering
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df.iloc[:, 0:3] = scaler.fit_transform(df.iloc[:, 0:3])
# step2:-creating covariance matrix
cov_mat = np.cov([df.iloc[:, 0], df.iloc[:, 1], df.iloc[:, 2]])
print("Covariance Matrix", cov_mat)
# step 3 :- get the eigen values and eigen matrix- doing eigen decomposing on cov matrix
eig_values, eig_vectors = np.linalg.eig(cov_mat)
eig_values # since we have 3 features we will have 3 eigen values
eig_vectors # we have 3 eigen vectors as 3 values of eigen values are there for all 3 features
pc = eig_vectors[0:2]
# transforing using 2 principal components
trsf_df = np.dot(df.iloc[:, 0:3], pc.T)
df2 = pd.DataFrame(trsf_df, columns=["PC1", "PC2"])
df2["target"] = df["target"].values
df2
import plotly.express as px
fig = px.scatter(df2, x=df2["PC1"], y=df2["PC2"], color=df2["target"].astype("str"))
fig.show()
# #Implementing PCA using MNIST DATASET
from google.colab import drive
drive.mount("/content/gdrive")
m_df = pd.read_csv("train.csv")
m_df
m_df = m_df.fillna(0)
import matplotlib.pyplot as plt
plt.imshow(m_df.iloc[13912, 1:].values.reshape(28, 28))
x = m_df.iloc[:, 1:]
y = m_df.iloc[:, 0]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
x_train.shape
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
# APPLYING PCA
#
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
from sklearn.decomposition import PCA
pca = PCA(n_components=100)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
x_train1.shape
knn1 = KNeighborsClassifier()
knn1.fit(x_train1, y_train)
y_pred1 = knn1.predict(x_test1)
accuracy_score(y_test, y_pred1)
# plotting it in 2 D and 3 D
pca = PCA(n_components=2)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
import plotly.express as px
fig = px.scatter(x=x_train1[:, 0], y=x_train1[:, 1], color=y_train.astype("str"))
fig.show()
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
pca = PCA(n_components=3)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
import plotly.express as px
fig = px.scatter_3d(
x=x_train1[:, 0], y=x_train1[:, 1], z=x_train1[:, 2], color=y_train.astype("str")
)
fig.show()
pca.explained_variance_ # eigen_values
pca.components_ # eigen_vectors
# finding optimum no of PCA
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
pca = PCA(n_components=None)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
pca.explained_variance_ratio_
pca.explained_variance_ratio_.shape
np.cumsum(pca.explained_variance_ratio_)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
# explained variance
# # When not to use PCA
# While Principal Component Analysis (PCA) is a powerful dimensionality reduction technique, there are situations where it may not be suitable or may not provide optimal results. Here are some cases when PCA may not be the best choice:
# Non-Linear Relationships: PCA assumes a linear relationship between variables. If the underlying relationships in the data are non-linear, PCA may not capture the essential structures effectively. In such cases, nonlinear dimensionality reduction techniques, such as manifold learning methods (e.g., t-SNE, Isomap), may be more appropriate.
# Preserving Interpretability: PCA creates linear combinations of the original features to form the principal components. While this transformation is useful for dimensionality reduction, it often results in a loss of interpretability. If preserving the interpretability of the original features is crucial for your analysis, alternative techniques that retain the original features, such as feature selection or feature extraction methods with explicit interpretations (e.g., factor analysis), might be more suitable.
# Sparse or Sparse-Semantic Data: PCA works best when the data exhibits dense and continuous patterns. If the data is sparse, meaning most of the values are zeros or missing, PCA may not perform well. In such cases, specialized techniques for handling sparse data, such as sparse PCA or non-negative matrix factorization (NMF), could be more appropriate.
# Outliers: PCA is sensitive to outliers in the data. Outliers can disproportionately influence the calculation of principal components, potentially leading to a distorted representation of the data. If the presence of outliers is a concern, it is advisable to either preprocess the data to handle outliers or consider robust dimensionality reduction techniques that are less affected by outliers.
# Retaining Specific Features: PCA aims to capture the most significant variance in the data but may not prioritize preserving specific features that are important for your analysis. If you have domain knowledge and specific variables that you want to retain in the dimensionality reduction process, other methods such as feature selection or domain-specific techniques may be more appropriate.
# Large Datasets: PCA can be computationally expensive for very large datasets. As the number of variables or samples increases, the computational and memory requirements of PCA grow. In such cases, approximate or incremental PCA methods can be used to handle large-scale datasets.
# # LDA (Least Discriminant Analysis)
# Linear Discriminant Analysis (LDA) is one of the commonly used dimensionality reduction techniques in machine learning to solve more than two-class classification problems. It is also known as Normal Discriminant Analysis (NDA) or Discriminant Function Analysis (DFA).
# It is good if data is linearly seperable.
# It is a kind of linear transformation technique.
# It takes into account class labels as well, thus making it supervised algorithm.
# # Let us understand this by using the simple example:-
# 1. Imagine you have a bunch of pictures of different animals, like cats, dogs, and birds. Each picture has many features, such as the color, size, and shape of the animal.
# 2. Now, you want to find a way to represent these pictures using fewer features, so you can understand the differences between the animals better. This is where dimensionality reduction comes in.
# 3. LDA is a method that helps us reduce the number of features while still keeping the important information that helps us distinguish between different classes, like cats, dogs, and birds.
# 4. First, we need to gather some examples of the animals we want to classify, like pictures of cats, dogs, and birds. We also need to know which animal each picture belongs to (the class labels).
# 5. Next, we look at the features of the animals in the pictures, such as their color, size, and shape. LDA tries to find a new set of features that are good at separating the animals into their correct classes.
# 6. To do this, LDA calculates the average (mean) feature values for each class. For example, it finds the average color, size, and shape of all the cat pictures, and does the same for dogs and birds.
# 7. LDA also calculates the spread or variance of the feature values within each class. This tells us how much the feature values differ within each animal class. It helps LDA understand the differences between individual animals of the same class.
# 8. Now, LDA wants to find a set of features that makes the spread between different classes as large as possible, while keeping the spread within each class as small as possible. This means the new features should help us separate animals of different classes, but be similar for animals of the same class.
# 9. LDA achieves this by finding a direction (line) in the feature space that maximizes the differences between classes and minimizes the differences within each class. It's like drawing a line that best separates the animals based on their features.
# 10. Once LDA finds this line, it uses it to project or transform the original pictures onto a lower-dimensional space. This means we reduce the number of features in each picture but still keep the important information that helps us classify the animals correctly.
# 11. Finally, we can use these lower-dimensional representations of the pictures to classify new animals. LDA has helped us reduce the complexity of the pictures while preserving the important information that distinguishes one animal from another.
#
# # **Let's look at the step by step implementation of LDA **
# 1. Calculate mean for each class and overall class.
# 2. Calculate within class co-variance matrix.
# 3. calculate between class covariance matrix.
# 4. calculate the eigen values and eigen matrix.
# 5. determine the LD function
#
import numpy as np
import pandas as pd
# creating data for group 1
v1 = np.array([0, 0])
cov_mat1 = np.array([[1, 0], [0, 1]])
sample1 = np.random.multivariate_normal(v1, cov_mat1, 20)
df = pd.DataFrame(sample1, columns=["f1", "f2"])
df["target"] = 1
# creating data for group 2
v2 = np.array([1, 1])
cov_mat2 = np.array([[1, 0], [0, 1]])
sample1 = np.random.multivariate_normal(v2, cov_mat2, 20)
df1 = pd.DataFrame(sample1, columns=["f1", "f2"])
df1["target"] = 0
df = df.append(df1, ignore_index=True)
df = df.sample(40)
df.head()
import plotly.express as px
fig = px.scatter(df, x=df["f1"], y=df["f2"], color=df["target"].astype("str"))
fig.show()
# step 1: Mean Centering
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df.iloc[:, 0:2] = scaler.fit_transform(df.iloc[:, 0:2])
# step 2: create the within class scatter matrix s1
# scatter matrix class1
c1 = df[(df["target"] == 0)]
c1_features = c1[["f1", "f2"]].values
c1_mean = np.mean(c1_features, axis=0)
c1_f = c1_features - c1_mean
num_rows, num_cols = c1_f.shape
result_sum = np.zeros((num_cols, num_cols))
for i in range(num_rows):
row = c1_f[i]
r = np.outer(row.T, row)
result_sum += r
s1 = result_sum / c1.shape[0]
# scatter matrix class2
c2 = df[(df["target"] == 0)]
c2_features = c2[["f1", "f2"]].values
c2_mean = np.mean(c2_features, axis=0)
c2_f = c2_features - c2_mean
num_rows, num_cols = c2_f.shape
result_sum = np.zeros((num_cols, num_cols))
for i in range(num_rows):
row = c2_f[i]
r = np.outer(row.T, row)
result_sum += r
s2 = result_sum / c2.shape[0]
# scatter matrix within class
sw = s1 + s2
# scatter matrix between class
sb = np.outer(c1_mean.T, c2_mean)
sw
sb
# calculate eigen vector and eigen values
eig_values, eig_vectors = np.linalg.eig(np.dot(sw.T, sb))
eig_values
eig_vectors
df.head()
trsf_df = np.dot(df.iloc[:, 0:2], eig_vectors.T)
df2 = pd.DataFrame(trsf_df, columns=["LD1", "LD2"])
df2["target"] = df["target"].values
df2
# applying on MNIST dataset LDA
m_df = pd.read_csv("train.csv")
x = m_df.iloc[:, 1:]
y = m_df.iloc[:, 0]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA()
x_lda = lda.fit_transform(x, y)
lda.explained_variance_ratio_
np.cumsum(lda.explained_variance_ratio_)
print(lda.classes_)
import plotly.express as px
fig = px.scatter_3d(x=x_lda[:, 0], y=x_lda[:, 1], z=x_lda[:, 2], color=y.astype("str"))
fig.show()
# # NMF(Non Negative Matrix Factorization)
# This is a very strong algorithm which many applications. For example, it can be applied for Recommender Systems, for Collaborative Filtering for topic modelling and for dimensionality reduction.
# In Python, it can work with sparse matrix where the only restriction is that the values should be non-negative.
# The logic for Dimensionality Reduction is to take our m×n data and to decompose it into two matrices of m×features and features×n respectively. The features will be the reduced dimensions.
# Non-Negative Matrix Factorization (NMF or NNMF) is also a linear dimensionality reduction technique that can be used to reduce the dimensionality of the feature matrix.
# All dimensionality reduction techniques fall under the category of unsupervised machine learning in which we can reveal hidden patterns and important relationships in the data without requiring labels.
# So, dimensionality reduction algorithms deal with unlabeled data. When training such an algorithm, the fit() method only needs the feature matrix, X as the input and it does not require the label column, y.
# As its name implies, non-negative matrix factorization (NMF) needs the feature matrix to be non-negative.
# Because of this non-negativity constraint, the usage of NMF is limited to data with non-negative values such as image data (pixel values always lie between 0 and 255, hence there are no negative values in image data!).
# # MATHS BEHIND NMF
# Non-negative matrix factorization (NMF) is the process of decomposing a non-negative feature matrix, V (nxp) into a product of two non-negative matrices called W (nxd) and H (dxp). All three matrices should contain non-negative elements.
# The product of W and H matrices only gives an approximation to the matrix V. So, you should expect some information loss when applying NMF. Let's look into simpler version of it.
# 1. Imagine you have a matrix X with numbers arranged in rows and columns. Each number in the matrix represents a value of some kind. The goal of NMF is to find two new matrices, W and H, that can be multiplied together to approximate the original matrix X.
# 2. Matrix W contains basis vectors or prototypes, which are like building blocks for the data. Matrix H represents the coefficients or weights associated with those basis vectors.
# 3. To get the factorization, we want to minimize the difference between the original matrix X and the reconstructed matrix WH. We do this by finding the best values for W and H. This is achieved by an iterative process that minimizes a cost function, which measures the difference between X and WH.
# 4. One common cost function is the Euclidean distance, which calculates the squared difference between each element of X and its corresponding element in WH. The goal is to make this difference as small as possible.
# 5. To ensure that the factorization is non-negative, we constrain both W and H to be non-negative matrices. This constraint is important because it allows NMF to capture additive and non-negative parts-based representations of the data, making the factorization more interpretable.
# 6. To optimize the factorization, NMF uses an iterative algorithm called multiplicative update rules. This algorithm starts by randomly initializing the values of W and H, and then iteratively updates these values to minimize the cost function.
# 7. In each iteration, we update the values of W and H based on the relative contributions of the data X and the current approximation WH. These updates scale the current values of W and H in a way that reduces the difference between X and WH.
# 8. We continue these updates until we reach a point where the cost function no longer decreases significantly or a maximum number of iterations is reached. At this point, we have found the optimal values for W and H, and the factorization is complete.
# 9. By decomposing the original matrix X into W and H, NMF allows us to extract meaningful features or patterns from the data. These features can be used for various purposes, such as reducing the dimensionality of the data, clustering similar data points, or separating signals from noise.
# Overall, NMF is a technique that helps us find lower-rank non-negative matrices that, when multiplied together, approximate the original data matrix. This approximation is achieved through an iterative process that minimizes the difference between the original data and the reconstructed data. The non-negativity constraint ensures that the factorization captures meaningful and interpretable patterns in the data.
from sklearn.datasets import load_wine
data = load_wine()
df = pd.DataFrame(data=data["data"], columns=data["feature_names"])
df["target"] = data["target"]
df
import numpy as np
def nmf_dimensionality_reduction(X, num_components, num_iterations):
num_samples, num_features = X.shape
# Initialize W and H with random positive values
W = np.random.rand(num_samples, num_components)
H = np.random.rand(num_components, num_features)
for iteration in range(num_iterations):
# Update W
numerator_w = X @ H.T
denominator_w = W @ H @ H.T
W = W * np.divide(numerator_w, denominator_w)
# Update H
numerator_h = W.T @ X
denominator_h = W.T @ W @ H
H = H * np.divide(numerator_h, denominator_h)
# Reduce dimensionality by representing data using the coefficients in H
reduced_data = W
return reduced_data
# Example usage
# Assuming X is your dataset with shape (num_samples, num_features)
num_components = 3
num_iterations = 100
reduced_data = nmf_dimensionality_reduction(
df.iloc[:, 0:14], num_components, num_iterations
)
reduced_data
reduced_data["target"] = df["target"]
reduced_data
import plotly.express as px
fig = px.scatter_3d(
x=reduced_data.iloc[:, 0],
y=reduced_data.iloc[:, 1],
z=reduced_data.iloc[:, 2],
color=reduced_data["target"].astype("str"),
)
fig.show()
# IMPLEMENTING MNIST DATASET USING SKLEARN LIBRARY NMF
x.shape
y.shape
# visualizing
import matplotlib.pyplot as plt
n = 5
plt.figure(figsize=(6.75, 1.5))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.imshow(x.iloc[i].values.reshape(28, 28), cmap="binary")
ax.axis("off")
plt.show()
from sklearn.decomposition import NMF
# Taking d=9
nmf_model = NMF(n_components=9, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
nmf_model.reconstruction_err_ # V-V'
# Here error is high as no of dimensions are 9
print("V_shape:", x.shape)
print("W_shape:", W.shape)
print("H_shape", nmf_model.components_.shape)
# visualize it
image_data_nmf_recovered = nmf_model.inverse_transform(
W
) # getting original from the transformed one
n = 5
plt.figure(figsize=(6.75, 1.5))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.imshow(image_data_nmf_recovered[i, :].reshape(28, 28), cmap="binary")
ax.axis("off")
plt.show()
# increasing no of components till error is reduced making the loop of it:-
for i in range(10, 780):
nmf_model = NMF(n_components=i, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:-", i, " error is:- ", nmf_model.reconstruction_err_) # V-V'
nmf_model = NMF(n_components=100, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:- 100 error is:- ", nmf_model.reconstruction_err_) # V-V'
nmf_model = NMF(n_components=700, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:- 700 error is:- ", nmf_model.reconstruction_err_) # V-V'
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/672/129672950.ipynb
| null | null |
[{"Id": 129672950, "ScriptId": 38561200, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7431797, "CreationDate": "05/15/2023 16:25:25", "VersionNumber": 2.0, "Title": "Dimensionality Reduction", "EvaluationDate": "05/15/2023", "IsChange": false, "TotalLines": 547.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 547.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# **Dimensionality Reduction **
# ## **Dimensionality Reduction**
# Dimenionality reduction for me is the process of simplifying complex data by
# reducing the number of features or variables while retaining the important information.
# If I simplyfy it more, I can say it is a means to look at same picture from
# different dimensions, and then select the dimension that gives the perfect view where all the data is there and can be easily identified.
# ---
# Let me explain, this in more simpler terms, through a daily life example.
# Suppose you have a 1000 photographs and you want them to classify them and prepare different sets for each. You are classifying the photographs based on common themes or visual patterns. But there is another way in which you can group on the basis of categories or themes, making it easier to analyze and understand.
# ---
# In a similar manner, dimensionality reduction techniques take a complex dataset with numerous features and transform it into a lower-dimensional representation.
# It simplifies the data, making it more manageable and allowing for better visualization, analysis, and decision-making.
# ---
# In this notebook I am going to cover some techniques for dimensionality reduction with intution behind it:-
# 1. PCA
# 2. LDA
# 3. NMF
# # PCA (Principal Component Analysis)
# I am adding two links which will give you very good intution behind this algorithms.
# 1.https://stats.stackexchange.com/questions/2691/making-sense-of-principal-component-analysis-eigenvectors-eigenvalues/140579#140579
# 2.https://www.youtube.com/watch?v=iRbsBi5W0-c
#
# Before moving forward there are certain terms which I would like to explain certain terms such as Eigen values, Eigen vectors and covariance matrix.
# For that refer:-
# ->https://byjus.com/maths/eigen-values/
# ->https://byjus.com/covariance-matrix-formula/#:~:text=Covariance%20Matrix%20is%20a%20measure,matrix%20and%20variance%2Dcovariance%20matrix.
# # Preparing our own PCA Class
# Steps:-
# 1. Mean Centering
# 2. Find the Covariance Matrix among all the columns
# 3. Find eigen values/eigen Vectors by eigen decomposing covariance matrix
# 4. Now transform all the data points.
import numpy as np
import pandas as pd
np.random.seed(23)
# creating data for group 1
v1 = np.array([0, 0, 0])
cov_mat1 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
sample1 = np.random.multivariate_normal(v1, cov_mat1, 20)
df = pd.DataFrame(sample1, columns=["f1", "f2", "f3"])
df["target"] = 1
# creating data for group 2
v2 = np.array([1, 1, 1])
cov_mat2 = np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
sample1 = np.random.multivariate_normal(v2, cov_mat2, 20)
df1 = pd.DataFrame(sample1, columns=["f1", "f2", "f3"])
df1["target"] = 0
df = df.append(df1, ignore_index=True)
df = df.sample(40)
import plotly.express as px
fig = px.scatter_3d(
df, x=df["f1"], y=df["f2"], z=df["f3"], color=df["target"].astype("str")
)
fig.show()
# step 1: Mean Centering
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df.iloc[:, 0:3] = scaler.fit_transform(df.iloc[:, 0:3])
# step2:-creating covariance matrix
cov_mat = np.cov([df.iloc[:, 0], df.iloc[:, 1], df.iloc[:, 2]])
print("Covariance Matrix", cov_mat)
# step 3 :- get the eigen values and eigen matrix- doing eigen decomposing on cov matrix
eig_values, eig_vectors = np.linalg.eig(cov_mat)
eig_values # since we have 3 features we will have 3 eigen values
eig_vectors # we have 3 eigen vectors as 3 values of eigen values are there for all 3 features
pc = eig_vectors[0:2]
# transforing using 2 principal components
trsf_df = np.dot(df.iloc[:, 0:3], pc.T)
df2 = pd.DataFrame(trsf_df, columns=["PC1", "PC2"])
df2["target"] = df["target"].values
df2
import plotly.express as px
fig = px.scatter(df2, x=df2["PC1"], y=df2["PC2"], color=df2["target"].astype("str"))
fig.show()
# #Implementing PCA using MNIST DATASET
from google.colab import drive
drive.mount("/content/gdrive")
m_df = pd.read_csv("train.csv")
m_df
m_df = m_df.fillna(0)
import matplotlib.pyplot as plt
plt.imshow(m_df.iloc[13912, 1:].values.reshape(28, 28))
x = m_df.iloc[:, 1:]
y = m_df.iloc[:, 0]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
x_train.shape
from sklearn.neighbors import KNeighborsClassifier
knn = KNeighborsClassifier()
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
from sklearn.metrics import accuracy_score
accuracy_score(y_test, y_pred)
# APPLYING PCA
#
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
from sklearn.decomposition import PCA
pca = PCA(n_components=100)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
x_train1.shape
knn1 = KNeighborsClassifier()
knn1.fit(x_train1, y_train)
y_pred1 = knn1.predict(x_test1)
accuracy_score(y_test, y_pred1)
# plotting it in 2 D and 3 D
pca = PCA(n_components=2)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
import plotly.express as px
fig = px.scatter(x=x_train1[:, 0], y=x_train1[:, 1], color=y_train.astype("str"))
fig.show()
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
pca = PCA(n_components=3)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
import plotly.express as px
fig = px.scatter_3d(
x=x_train1[:, 0], y=x_train1[:, 1], z=x_train1[:, 2], color=y_train.astype("str")
)
fig.show()
pca.explained_variance_ # eigen_values
pca.components_ # eigen_vectors
# finding optimum no of PCA
x_train1 = scaler.fit_transform(x_train)
x_test1 = scaler.transform(x_test)
pca = PCA(n_components=None)
x_train1 = pca.fit_transform(x_train1)
x_test1 = pca.transform(x_test1)
pca.explained_variance_ratio_
pca.explained_variance_ratio_.shape
np.cumsum(pca.explained_variance_ratio_)
plt.plot(np.cumsum(pca.explained_variance_ratio_))
# explained variance
# # When not to use PCA
# While Principal Component Analysis (PCA) is a powerful dimensionality reduction technique, there are situations where it may not be suitable or may not provide optimal results. Here are some cases when PCA may not be the best choice:
# Non-Linear Relationships: PCA assumes a linear relationship between variables. If the underlying relationships in the data are non-linear, PCA may not capture the essential structures effectively. In such cases, nonlinear dimensionality reduction techniques, such as manifold learning methods (e.g., t-SNE, Isomap), may be more appropriate.
# Preserving Interpretability: PCA creates linear combinations of the original features to form the principal components. While this transformation is useful for dimensionality reduction, it often results in a loss of interpretability. If preserving the interpretability of the original features is crucial for your analysis, alternative techniques that retain the original features, such as feature selection or feature extraction methods with explicit interpretations (e.g., factor analysis), might be more suitable.
# Sparse or Sparse-Semantic Data: PCA works best when the data exhibits dense and continuous patterns. If the data is sparse, meaning most of the values are zeros or missing, PCA may not perform well. In such cases, specialized techniques for handling sparse data, such as sparse PCA or non-negative matrix factorization (NMF), could be more appropriate.
# Outliers: PCA is sensitive to outliers in the data. Outliers can disproportionately influence the calculation of principal components, potentially leading to a distorted representation of the data. If the presence of outliers is a concern, it is advisable to either preprocess the data to handle outliers or consider robust dimensionality reduction techniques that are less affected by outliers.
# Retaining Specific Features: PCA aims to capture the most significant variance in the data but may not prioritize preserving specific features that are important for your analysis. If you have domain knowledge and specific variables that you want to retain in the dimensionality reduction process, other methods such as feature selection or domain-specific techniques may be more appropriate.
# Large Datasets: PCA can be computationally expensive for very large datasets. As the number of variables or samples increases, the computational and memory requirements of PCA grow. In such cases, approximate or incremental PCA methods can be used to handle large-scale datasets.
# # LDA (Least Discriminant Analysis)
# Linear Discriminant Analysis (LDA) is one of the commonly used dimensionality reduction techniques in machine learning to solve more than two-class classification problems. It is also known as Normal Discriminant Analysis (NDA) or Discriminant Function Analysis (DFA).
# It is good if data is linearly seperable.
# It is a kind of linear transformation technique.
# It takes into account class labels as well, thus making it supervised algorithm.
# # Let us understand this by using the simple example:-
# 1. Imagine you have a bunch of pictures of different animals, like cats, dogs, and birds. Each picture has many features, such as the color, size, and shape of the animal.
# 2. Now, you want to find a way to represent these pictures using fewer features, so you can understand the differences between the animals better. This is where dimensionality reduction comes in.
# 3. LDA is a method that helps us reduce the number of features while still keeping the important information that helps us distinguish between different classes, like cats, dogs, and birds.
# 4. First, we need to gather some examples of the animals we want to classify, like pictures of cats, dogs, and birds. We also need to know which animal each picture belongs to (the class labels).
# 5. Next, we look at the features of the animals in the pictures, such as their color, size, and shape. LDA tries to find a new set of features that are good at separating the animals into their correct classes.
# 6. To do this, LDA calculates the average (mean) feature values for each class. For example, it finds the average color, size, and shape of all the cat pictures, and does the same for dogs and birds.
# 7. LDA also calculates the spread or variance of the feature values within each class. This tells us how much the feature values differ within each animal class. It helps LDA understand the differences between individual animals of the same class.
# 8. Now, LDA wants to find a set of features that makes the spread between different classes as large as possible, while keeping the spread within each class as small as possible. This means the new features should help us separate animals of different classes, but be similar for animals of the same class.
# 9. LDA achieves this by finding a direction (line) in the feature space that maximizes the differences between classes and minimizes the differences within each class. It's like drawing a line that best separates the animals based on their features.
# 10. Once LDA finds this line, it uses it to project or transform the original pictures onto a lower-dimensional space. This means we reduce the number of features in each picture but still keep the important information that helps us classify the animals correctly.
# 11. Finally, we can use these lower-dimensional representations of the pictures to classify new animals. LDA has helped us reduce the complexity of the pictures while preserving the important information that distinguishes one animal from another.
#
# # **Let's look at the step by step implementation of LDA **
# 1. Calculate mean for each class and overall class.
# 2. Calculate within class co-variance matrix.
# 3. calculate between class covariance matrix.
# 4. calculate the eigen values and eigen matrix.
# 5. determine the LD function
#
import numpy as np
import pandas as pd
# creating data for group 1
v1 = np.array([0, 0])
cov_mat1 = np.array([[1, 0], [0, 1]])
sample1 = np.random.multivariate_normal(v1, cov_mat1, 20)
df = pd.DataFrame(sample1, columns=["f1", "f2"])
df["target"] = 1
# creating data for group 2
v2 = np.array([1, 1])
cov_mat2 = np.array([[1, 0], [0, 1]])
sample1 = np.random.multivariate_normal(v2, cov_mat2, 20)
df1 = pd.DataFrame(sample1, columns=["f1", "f2"])
df1["target"] = 0
df = df.append(df1, ignore_index=True)
df = df.sample(40)
df.head()
import plotly.express as px
fig = px.scatter(df, x=df["f1"], y=df["f2"], color=df["target"].astype("str"))
fig.show()
# step 1: Mean Centering
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
df.iloc[:, 0:2] = scaler.fit_transform(df.iloc[:, 0:2])
# step 2: create the within class scatter matrix s1
# scatter matrix class1
c1 = df[(df["target"] == 0)]
c1_features = c1[["f1", "f2"]].values
c1_mean = np.mean(c1_features, axis=0)
c1_f = c1_features - c1_mean
num_rows, num_cols = c1_f.shape
result_sum = np.zeros((num_cols, num_cols))
for i in range(num_rows):
row = c1_f[i]
r = np.outer(row.T, row)
result_sum += r
s1 = result_sum / c1.shape[0]
# scatter matrix class2
c2 = df[(df["target"] == 0)]
c2_features = c2[["f1", "f2"]].values
c2_mean = np.mean(c2_features, axis=0)
c2_f = c2_features - c2_mean
num_rows, num_cols = c2_f.shape
result_sum = np.zeros((num_cols, num_cols))
for i in range(num_rows):
row = c2_f[i]
r = np.outer(row.T, row)
result_sum += r
s2 = result_sum / c2.shape[0]
# scatter matrix within class
sw = s1 + s2
# scatter matrix between class
sb = np.outer(c1_mean.T, c2_mean)
sw
sb
# calculate eigen vector and eigen values
eig_values, eig_vectors = np.linalg.eig(np.dot(sw.T, sb))
eig_values
eig_vectors
df.head()
trsf_df = np.dot(df.iloc[:, 0:2], eig_vectors.T)
df2 = pd.DataFrame(trsf_df, columns=["LD1", "LD2"])
df2["target"] = df["target"].values
df2
# applying on MNIST dataset LDA
m_df = pd.read_csv("train.csv")
x = m_df.iloc[:, 1:]
y = m_df.iloc[:, 0]
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.2, random_state=42
)
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
lda = LDA()
x_lda = lda.fit_transform(x, y)
lda.explained_variance_ratio_
np.cumsum(lda.explained_variance_ratio_)
print(lda.classes_)
import plotly.express as px
fig = px.scatter_3d(x=x_lda[:, 0], y=x_lda[:, 1], z=x_lda[:, 2], color=y.astype("str"))
fig.show()
# # NMF(Non Negative Matrix Factorization)
# This is a very strong algorithm which many applications. For example, it can be applied for Recommender Systems, for Collaborative Filtering for topic modelling and for dimensionality reduction.
# In Python, it can work with sparse matrix where the only restriction is that the values should be non-negative.
# The logic for Dimensionality Reduction is to take our m×n data and to decompose it into two matrices of m×features and features×n respectively. The features will be the reduced dimensions.
# Non-Negative Matrix Factorization (NMF or NNMF) is also a linear dimensionality reduction technique that can be used to reduce the dimensionality of the feature matrix.
# All dimensionality reduction techniques fall under the category of unsupervised machine learning in which we can reveal hidden patterns and important relationships in the data without requiring labels.
# So, dimensionality reduction algorithms deal with unlabeled data. When training such an algorithm, the fit() method only needs the feature matrix, X as the input and it does not require the label column, y.
# As its name implies, non-negative matrix factorization (NMF) needs the feature matrix to be non-negative.
# Because of this non-negativity constraint, the usage of NMF is limited to data with non-negative values such as image data (pixel values always lie between 0 and 255, hence there are no negative values in image data!).
# # MATHS BEHIND NMF
# Non-negative matrix factorization (NMF) is the process of decomposing a non-negative feature matrix, V (nxp) into a product of two non-negative matrices called W (nxd) and H (dxp). All three matrices should contain non-negative elements.
# The product of W and H matrices only gives an approximation to the matrix V. So, you should expect some information loss when applying NMF. Let's look into simpler version of it.
# 1. Imagine you have a matrix X with numbers arranged in rows and columns. Each number in the matrix represents a value of some kind. The goal of NMF is to find two new matrices, W and H, that can be multiplied together to approximate the original matrix X.
# 2. Matrix W contains basis vectors or prototypes, which are like building blocks for the data. Matrix H represents the coefficients or weights associated with those basis vectors.
# 3. To get the factorization, we want to minimize the difference between the original matrix X and the reconstructed matrix WH. We do this by finding the best values for W and H. This is achieved by an iterative process that minimizes a cost function, which measures the difference between X and WH.
# 4. One common cost function is the Euclidean distance, which calculates the squared difference between each element of X and its corresponding element in WH. The goal is to make this difference as small as possible.
# 5. To ensure that the factorization is non-negative, we constrain both W and H to be non-negative matrices. This constraint is important because it allows NMF to capture additive and non-negative parts-based representations of the data, making the factorization more interpretable.
# 6. To optimize the factorization, NMF uses an iterative algorithm called multiplicative update rules. This algorithm starts by randomly initializing the values of W and H, and then iteratively updates these values to minimize the cost function.
# 7. In each iteration, we update the values of W and H based on the relative contributions of the data X and the current approximation WH. These updates scale the current values of W and H in a way that reduces the difference between X and WH.
# 8. We continue these updates until we reach a point where the cost function no longer decreases significantly or a maximum number of iterations is reached. At this point, we have found the optimal values for W and H, and the factorization is complete.
# 9. By decomposing the original matrix X into W and H, NMF allows us to extract meaningful features or patterns from the data. These features can be used for various purposes, such as reducing the dimensionality of the data, clustering similar data points, or separating signals from noise.
# Overall, NMF is a technique that helps us find lower-rank non-negative matrices that, when multiplied together, approximate the original data matrix. This approximation is achieved through an iterative process that minimizes the difference between the original data and the reconstructed data. The non-negativity constraint ensures that the factorization captures meaningful and interpretable patterns in the data.
from sklearn.datasets import load_wine
data = load_wine()
df = pd.DataFrame(data=data["data"], columns=data["feature_names"])
df["target"] = data["target"]
df
import numpy as np
def nmf_dimensionality_reduction(X, num_components, num_iterations):
num_samples, num_features = X.shape
# Initialize W and H with random positive values
W = np.random.rand(num_samples, num_components)
H = np.random.rand(num_components, num_features)
for iteration in range(num_iterations):
# Update W
numerator_w = X @ H.T
denominator_w = W @ H @ H.T
W = W * np.divide(numerator_w, denominator_w)
# Update H
numerator_h = W.T @ X
denominator_h = W.T @ W @ H
H = H * np.divide(numerator_h, denominator_h)
# Reduce dimensionality by representing data using the coefficients in H
reduced_data = W
return reduced_data
# Example usage
# Assuming X is your dataset with shape (num_samples, num_features)
num_components = 3
num_iterations = 100
reduced_data = nmf_dimensionality_reduction(
df.iloc[:, 0:14], num_components, num_iterations
)
reduced_data
reduced_data["target"] = df["target"]
reduced_data
import plotly.express as px
fig = px.scatter_3d(
x=reduced_data.iloc[:, 0],
y=reduced_data.iloc[:, 1],
z=reduced_data.iloc[:, 2],
color=reduced_data["target"].astype("str"),
)
fig.show()
# IMPLEMENTING MNIST DATASET USING SKLEARN LIBRARY NMF
x.shape
y.shape
# visualizing
import matplotlib.pyplot as plt
n = 5
plt.figure(figsize=(6.75, 1.5))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.imshow(x.iloc[i].values.reshape(28, 28), cmap="binary")
ax.axis("off")
plt.show()
from sklearn.decomposition import NMF
# Taking d=9
nmf_model = NMF(n_components=9, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
nmf_model.reconstruction_err_ # V-V'
# Here error is high as no of dimensions are 9
print("V_shape:", x.shape)
print("W_shape:", W.shape)
print("H_shape", nmf_model.components_.shape)
# visualize it
image_data_nmf_recovered = nmf_model.inverse_transform(
W
) # getting original from the transformed one
n = 5
plt.figure(figsize=(6.75, 1.5))
for i in range(n):
ax = plt.subplot(1, n, i + 1)
plt.imshow(image_data_nmf_recovered[i, :].reshape(28, 28), cmap="binary")
ax.axis("off")
plt.show()
# increasing no of components till error is reduced making the loop of it:-
for i in range(10, 780):
nmf_model = NMF(n_components=i, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:-", i, " error is:- ", nmf_model.reconstruction_err_) # V-V'
nmf_model = NMF(n_components=100, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:- 100 error is:- ", nmf_model.reconstruction_err_) # V-V'
nmf_model = NMF(n_components=700, init="random", random_state=0)
# W = transformed data matrix, x = original feature matrix
W = nmf_model.fit_transform(x)
# H = factorization matrix
H = nmf_model.components_
print("For Components:- 700 error is:- ", nmf_model.reconstruction_err_) # V-V'
| false | 0 | 6,575 | 0 | 6,575 | 6,575 |
||
129672448
|
<jupyter_start><jupyter_text>Video Game Sales
This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].
Fields include
* Rank - Ranking of overall sales
* Name - The games name
* Platform - Platform of the games release (i.e. PC,PS4, etc.)
* Year - Year of the game's release
* Genre - Genre of the game
* Publisher - Publisher of the game
* NA_Sales - Sales in North America (in millions)
* EU_Sales - Sales in Europe (in millions)
* JP_Sales - Sales in Japan (in millions)
* Other_Sales - Sales in the rest of the world (in millions)
* Global_Sales - Total worldwide sales.
The script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.
It is based on BeautifulSoup using Python.
There are 16,598 records. 2 records were dropped due to incomplete information.
[1]: http://www.vgchartz.com/
Kaggle dataset identifier: videogamesales
<jupyter_code>import pandas as pd
df = pd.read_csv('videogamesales/vgsales.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<jupyter_text>Examples:
{
"Rank": 1,
"Name": "Wii Sports",
"Platform": "Wii",
"Year": 2006,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 41.49,
"EU_Sales": 29.02,
"JP_Sales": 3.77,
"Other_Sales": 8.46,
"Global_Sales": 82.74
}
{
"Rank": 2,
"Name": "Super Mario Bros.",
"Platform": "NES",
"Year": 1985,
"Genre": "Platform",
"Publisher": "Nintendo",
"NA_Sales": 29.08,
"EU_Sales": 3.58,
"JP_Sales": 6.8100000000000005,
"Other_Sales": 0.77,
"Global_Sales": 40.24
}
{
"Rank": 3,
"Name": "Mario Kart Wii",
"Platform": "Wii",
"Year": 2008,
"Genre": "Racing",
"Publisher": "Nintendo",
"NA_Sales": 15.85,
"EU_Sales": 12.88,
"JP_Sales": 3.79,
"Other_Sales": 3.31,
"Global_Sales": 35.82
}
{
"Rank": 4,
"Name": "Wii Sports Resort",
"Platform": "Wii",
"Year": 2009,
"Genre": "Sports",
"Publisher": "Nintendo",
"NA_Sales": 15.75,
"EU_Sales": 11.01,
"JP_Sales": 3.2800000000000002,
"Other_Sales": 2.96,
"Global_Sales": 33.0
}
<jupyter_script># **Importing pandas and numpy to perform DA**
import numpy as np
import pandas as pd
# **Downloading Data using pandas read_csv function**
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
# **Checking the info about the data**
data.info()
# there are total 11 Columns and 16598 Rows
# total datatypes are: int, object, float
# there are 2 columns with null cells. Year & Publisher
# **Describing the File**
data.describe()
# **Counting total number of category of games**
#
data["Genre"].value_counts()
# there are total 12 types of games
# Top 3 max category are Action, Sports & Misc
# **Checking Null cells in Year Column**
nulldata = data["Year"].isnull()
# Storing the value in nulldata variable to check the null cells
data[nulldata]
# Checking the mean value to fill the null value
data["Year"].mean().round()
# **Filling the null in Year Column**
data["Year"].fillna(2006, inplace=True)
# **Rechecking whether null is filled with given value or not**
data.info()
# the null cells is filled with the mentioned value in Year Column
# **Checking the Publisher Columns Null cells**
p = data["Publisher"].isnull()
data[p]
# there are null cells in Publisher Column
# **Filling the null Cells**
data["Publisher"].fillna("Unknown", inplace=True)
# Filled the null cells with value "Unknown" as there are some cells containing Unknown.
# **ReChecking the info of data**
data.info()
data.head()
# **Checking duplicate rows**
dupe = data.duplicated(subset=["Name", "Platform", "Year", "Genre", "Publisher"])
# storing the duplicates in valiable dupe
data[dupe]
# There are 2 rows with duplicate records
# **For correct analyzing we have to delete the duplicate rows**
data.drop_duplicates(
subset=["Name", "Platform", "Year", "Genre", "Publisher"],
ignore_index=True,
inplace=True,
)
data.info()
# from 16598 Rows now it is 16596 rows
# Duplicate rows are been deleted
# **For better understanding lets customize the index and columns**
data.set_index("Rank", inplace=True)
data.head()
# Rank Column is the new Index
# **Will Replace "_" with a "Space" in column names**
data.columns = data.columns.str.replace("_", " ")
data.head()
# "_" is been replaced with " "
# **Sorting the sales columns in Decending Order**
data.sort_values("NA Sales", ascending=False, inplace=True)
data.sort_values("EU Sales", ascending=False, inplace=True)
data.sort_values("JP Sales", ascending=False, inplace=True)
data.sort_values("Other Sales", ascending=False, inplace=True)
data.sort_values("Global Sales", ascending=False, inplace=True)
data[0:20]
# Sorting is Successful in Descending Order
# **Calculating Sum, Mean, Median & std for NA Sales, EU Sales, JP Sales, Other Sales & Global Sales columns**
data["NA Sales"].sum()
data["EU Sales"].sum()
data["Global Sales"].mean()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/672/129672448.ipynb
|
videogamesales
|
gregorut
|
[{"Id": 129672448, "ScriptId": 38558928, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14773018, "CreationDate": "05/15/2023 16:20:42", "VersionNumber": 1.0, "Title": "notebookb51334f45b", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 109.0, "LinesInsertedFromPrevious": 109.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185985934, "KernelVersionId": 129672448, "SourceDatasetVersionId": 618}]
|
[{"Id": 618, "DatasetId": 284, "DatasourceVersionId": 618, "CreatorUserId": 462330, "LicenseName": "Unknown", "CreationDate": "10/26/2016 09:10:49", "VersionNumber": 2.0, "Title": "Video Game Sales", "Slug": "videogamesales", "Subtitle": "Analyze sales data from more than 16,500 games.", "Description": "This dataset contains a list of video games with sales greater than 100,000 copies. It was generated by a scrape of [vgchartz.com][1].\n\nFields include\n\n* Rank - Ranking of overall sales\n\n* Name - The games name\n\n* Platform - Platform of the games release (i.e. PC,PS4, etc.)\n\n* Year - Year of the game's release\n\n* Genre - Genre of the game\n\n* Publisher - Publisher of the game\n\n* NA_Sales - Sales in North America (in millions)\n\n* EU_Sales - Sales in Europe (in millions)\n\n* JP_Sales - Sales in Japan (in millions)\n\n* Other_Sales - Sales in the rest of the world (in millions)\n\n* Global_Sales - Total worldwide sales.\n\nThe script to scrape the data is available at https://github.com/GregorUT/vgchartzScrape.\nIt is based on BeautifulSoup using Python.\nThere are 16,598 records. 2 records were dropped due to incomplete information.\n\n\n [1]: http://www.vgchartz.com/", "VersionNotes": "Cleaned up formating", "TotalCompressedBytes": 1355781.0, "TotalUncompressedBytes": 1355781.0}]
|
[{"Id": 284, "CreatorUserId": 462330, "OwnerUserId": 462330.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 618.0, "CurrentDatasourceVersionId": 618.0, "ForumId": 1788, "Type": 2, "CreationDate": "10/26/2016 08:17:30", "LastActivityDate": "02/06/2018", "TotalViews": 1798828, "TotalDownloads": 471172, "TotalVotes": 5485, "TotalKernels": 1480}]
|
[{"Id": 462330, "UserName": "gregorut", "DisplayName": "GregorySmith", "RegisterDate": "11/09/2015", "PerformanceTier": 1}]
|
# **Importing pandas and numpy to perform DA**
import numpy as np
import pandas as pd
# **Downloading Data using pandas read_csv function**
data = pd.read_csv("/kaggle/input/videogamesales/vgsales.csv")
# **Checking the info about the data**
data.info()
# there are total 11 Columns and 16598 Rows
# total datatypes are: int, object, float
# there are 2 columns with null cells. Year & Publisher
# **Describing the File**
data.describe()
# **Counting total number of category of games**
#
data["Genre"].value_counts()
# there are total 12 types of games
# Top 3 max category are Action, Sports & Misc
# **Checking Null cells in Year Column**
nulldata = data["Year"].isnull()
# Storing the value in nulldata variable to check the null cells
data[nulldata]
# Checking the mean value to fill the null value
data["Year"].mean().round()
# **Filling the null in Year Column**
data["Year"].fillna(2006, inplace=True)
# **Rechecking whether null is filled with given value or not**
data.info()
# the null cells is filled with the mentioned value in Year Column
# **Checking the Publisher Columns Null cells**
p = data["Publisher"].isnull()
data[p]
# there are null cells in Publisher Column
# **Filling the null Cells**
data["Publisher"].fillna("Unknown", inplace=True)
# Filled the null cells with value "Unknown" as there are some cells containing Unknown.
# **ReChecking the info of data**
data.info()
data.head()
# **Checking duplicate rows**
dupe = data.duplicated(subset=["Name", "Platform", "Year", "Genre", "Publisher"])
# storing the duplicates in valiable dupe
data[dupe]
# There are 2 rows with duplicate records
# **For correct analyzing we have to delete the duplicate rows**
data.drop_duplicates(
subset=["Name", "Platform", "Year", "Genre", "Publisher"],
ignore_index=True,
inplace=True,
)
data.info()
# from 16598 Rows now it is 16596 rows
# Duplicate rows are been deleted
# **For better understanding lets customize the index and columns**
data.set_index("Rank", inplace=True)
data.head()
# Rank Column is the new Index
# **Will Replace "_" with a "Space" in column names**
data.columns = data.columns.str.replace("_", " ")
data.head()
# "_" is been replaced with " "
# **Sorting the sales columns in Decending Order**
data.sort_values("NA Sales", ascending=False, inplace=True)
data.sort_values("EU Sales", ascending=False, inplace=True)
data.sort_values("JP Sales", ascending=False, inplace=True)
data.sort_values("Other Sales", ascending=False, inplace=True)
data.sort_values("Global Sales", ascending=False, inplace=True)
data[0:20]
# Sorting is Successful in Descending Order
# **Calculating Sum, Mean, Median & std for NA Sales, EU Sales, JP Sales, Other Sales & Global Sales columns**
data["NA Sales"].sum()
data["EU Sales"].sum()
data["Global Sales"].mean()
|
[{"videogamesales/vgsales.csv": {"column_names": "[\"Rank\", \"Name\", \"Platform\", \"Year\", \"Genre\", \"Publisher\", \"NA_Sales\", \"EU_Sales\", \"JP_Sales\", \"Other_Sales\", \"Global_Sales\"]", "column_data_types": "{\"Rank\": \"int64\", \"Name\": \"object\", \"Platform\": \"object\", \"Year\": \"float64\", \"Genre\": \"object\", \"Publisher\": \"object\", \"NA_Sales\": \"float64\", \"EU_Sales\": \"float64\", \"JP_Sales\": \"float64\", \"Other_Sales\": \"float64\", \"Global_Sales\": \"float64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 16598 entries, 0 to 16597\nData columns (total 11 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Rank 16598 non-null int64 \n 1 Name 16598 non-null object \n 2 Platform 16598 non-null object \n 3 Year 16327 non-null float64\n 4 Genre 16598 non-null object \n 5 Publisher 16540 non-null object \n 6 NA_Sales 16598 non-null float64\n 7 EU_Sales 16598 non-null float64\n 8 JP_Sales 16598 non-null float64\n 9 Other_Sales 16598 non-null float64\n 10 Global_Sales 16598 non-null float64\ndtypes: float64(6), int64(1), object(4)\nmemory usage: 1.4+ MB\n", "summary": "{\"Rank\": {\"count\": 16598.0, \"mean\": 8300.605253645017, \"std\": 4791.853932896403, \"min\": 1.0, \"25%\": 4151.25, \"50%\": 8300.5, \"75%\": 12449.75, \"max\": 16600.0}, \"Year\": {\"count\": 16327.0, \"mean\": 2006.4064433147546, \"std\": 5.828981114712805, \"min\": 1980.0, \"25%\": 2003.0, \"50%\": 2007.0, \"75%\": 2010.0, \"max\": 2020.0}, \"NA_Sales\": {\"count\": 16598.0, \"mean\": 0.26466742981082064, \"std\": 0.8166830292988796, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.08, \"75%\": 0.24, \"max\": 41.49}, \"EU_Sales\": {\"count\": 16598.0, \"mean\": 0.14665200626581515, \"std\": 0.5053512312869116, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.02, \"75%\": 0.11, \"max\": 29.02}, \"JP_Sales\": {\"count\": 16598.0, \"mean\": 0.077781660441017, \"std\": 0.30929064808220297, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 0.04, \"max\": 10.22}, \"Other_Sales\": {\"count\": 16598.0, \"mean\": 0.0480630196409206, \"std\": 0.18858840291271461, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.01, \"75%\": 0.04, \"max\": 10.57}, \"Global_Sales\": {\"count\": 16598.0, \"mean\": 0.5374406555006628, \"std\": 1.5550279355699124, \"min\": 0.01, \"25%\": 0.06, \"50%\": 0.17, \"75%\": 0.47, \"max\": 82.74}}", "examples": "{\"Rank\":{\"0\":1,\"1\":2,\"2\":3,\"3\":4},\"Name\":{\"0\":\"Wii Sports\",\"1\":\"Super Mario Bros.\",\"2\":\"Mario Kart Wii\",\"3\":\"Wii Sports Resort\"},\"Platform\":{\"0\":\"Wii\",\"1\":\"NES\",\"2\":\"Wii\",\"3\":\"Wii\"},\"Year\":{\"0\":2006.0,\"1\":1985.0,\"2\":2008.0,\"3\":2009.0},\"Genre\":{\"0\":\"Sports\",\"1\":\"Platform\",\"2\":\"Racing\",\"3\":\"Sports\"},\"Publisher\":{\"0\":\"Nintendo\",\"1\":\"Nintendo\",\"2\":\"Nintendo\",\"3\":\"Nintendo\"},\"NA_Sales\":{\"0\":41.49,\"1\":29.08,\"2\":15.85,\"3\":15.75},\"EU_Sales\":{\"0\":29.02,\"1\":3.58,\"2\":12.88,\"3\":11.01},\"JP_Sales\":{\"0\":3.77,\"1\":6.81,\"2\":3.79,\"3\":3.28},\"Other_Sales\":{\"0\":8.46,\"1\":0.77,\"2\":3.31,\"3\":2.96},\"Global_Sales\":{\"0\":82.74,\"1\":40.24,\"2\":35.82,\"3\":33.0}}"}}]
| true | 1 |
<start_data_description><data_path>videogamesales/vgsales.csv:
<column_names>
['Rank', 'Name', 'Platform', 'Year', 'Genre', 'Publisher', 'NA_Sales', 'EU_Sales', 'JP_Sales', 'Other_Sales', 'Global_Sales']
<column_types>
{'Rank': 'int64', 'Name': 'object', 'Platform': 'object', 'Year': 'float64', 'Genre': 'object', 'Publisher': 'object', 'NA_Sales': 'float64', 'EU_Sales': 'float64', 'JP_Sales': 'float64', 'Other_Sales': 'float64', 'Global_Sales': 'float64'}
<dataframe_Summary>
{'Rank': {'count': 16598.0, 'mean': 8300.605253645017, 'std': 4791.853932896403, 'min': 1.0, '25%': 4151.25, '50%': 8300.5, '75%': 12449.75, 'max': 16600.0}, 'Year': {'count': 16327.0, 'mean': 2006.4064433147546, 'std': 5.828981114712805, 'min': 1980.0, '25%': 2003.0, '50%': 2007.0, '75%': 2010.0, 'max': 2020.0}, 'NA_Sales': {'count': 16598.0, 'mean': 0.26466742981082064, 'std': 0.8166830292988796, 'min': 0.0, '25%': 0.0, '50%': 0.08, '75%': 0.24, 'max': 41.49}, 'EU_Sales': {'count': 16598.0, 'mean': 0.14665200626581515, 'std': 0.5053512312869116, 'min': 0.0, '25%': 0.0, '50%': 0.02, '75%': 0.11, 'max': 29.02}, 'JP_Sales': {'count': 16598.0, 'mean': 0.077781660441017, 'std': 0.30929064808220297, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 0.04, 'max': 10.22}, 'Other_Sales': {'count': 16598.0, 'mean': 0.0480630196409206, 'std': 0.18858840291271461, 'min': 0.0, '25%': 0.0, '50%': 0.01, '75%': 0.04, 'max': 10.57}, 'Global_Sales': {'count': 16598.0, 'mean': 0.5374406555006628, 'std': 1.5550279355699124, 'min': 0.01, '25%': 0.06, '50%': 0.17, '75%': 0.47, 'max': 82.74}}
<dataframe_info>
RangeIndex: 16598 entries, 0 to 16597
Data columns (total 11 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Rank 16598 non-null int64
1 Name 16598 non-null object
2 Platform 16598 non-null object
3 Year 16327 non-null float64
4 Genre 16598 non-null object
5 Publisher 16540 non-null object
6 NA_Sales 16598 non-null float64
7 EU_Sales 16598 non-null float64
8 JP_Sales 16598 non-null float64
9 Other_Sales 16598 non-null float64
10 Global_Sales 16598 non-null float64
dtypes: float64(6), int64(1), object(4)
memory usage: 1.4+ MB
<some_examples>
{'Rank': {'0': 1, '1': 2, '2': 3, '3': 4}, 'Name': {'0': 'Wii Sports', '1': 'Super Mario Bros.', '2': 'Mario Kart Wii', '3': 'Wii Sports Resort'}, 'Platform': {'0': 'Wii', '1': 'NES', '2': 'Wii', '3': 'Wii'}, 'Year': {'0': 2006.0, '1': 1985.0, '2': 2008.0, '3': 2009.0}, 'Genre': {'0': 'Sports', '1': 'Platform', '2': 'Racing', '3': 'Sports'}, 'Publisher': {'0': 'Nintendo', '1': 'Nintendo', '2': 'Nintendo', '3': 'Nintendo'}, 'NA_Sales': {'0': 41.49, '1': 29.08, '2': 15.85, '3': 15.75}, 'EU_Sales': {'0': 29.02, '1': 3.58, '2': 12.88, '3': 11.01}, 'JP_Sales': {'0': 3.77, '1': 6.81, '2': 3.79, '3': 3.28}, 'Other_Sales': {'0': 8.46, '1': 0.77, '2': 3.31, '3': 2.96}, 'Global_Sales': {'0': 82.74, '1': 40.24, '2': 35.82, '3': 33.0}}
<end_description>
| 815 | 0 | 1,928 | 815 |
129791626
|
<jupyter_start><jupyter_text>Road_Surface_Classification
Kaggle dataset identifier: road-surface-classification
<jupyter_script>import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import cv2
dir_path = "/kaggle/input/road-surface-classification/Input_data"
labels = os.listdir("/kaggle/input/road-surface-classification/Input_data")
labels = [
"ice",
"water_asphalt_severe",
"dry_asphalt_severe",
"fresh_snow",
"wet_asphalt_severe",
"melted_snow",
]
"""
import os
import random
import shutil
dataset_dir="/kaggle/input/road-surface-classification/Input_data"
new_dataset_dir="/kaggle/working/mithila"
num_images = 1000
for label in os.listdir(dataset_dir):
label_dir = os.path.join(dataset_dir, label)
images = os.listdir(label_dir)
random.shuffle(images)
selected_images = images[:num_images]
new_label_dir = os.path.join(new_dataset_dir, label)
os.makedirs(new_label_dir, exist_ok = True)
for image in selected_images:
src = os.path.join(label_dir, image)
dst = os.path.join(new_label_dir, image)
shutil.copy(src, dst)import os
import random
import shutil
dataset_dir="/kaggle/input/road-surface-classification/Input_data"
new_dataset_dir="/kaggle/working/mithila"
num_images = 1000
for label in os.listdir(dataset_dir):
label_dir = os.path.join(dataset_dir, label)
images = os.listdir(label_dir)
random.shuffle(images)
selected_images = images[:num_images]
new_label_dir = os.path.join(new_dataset_dir, label)
os.makedirs(new_label_dir, exist_ok = True)
for image in selected_images:
src = os.path.join(label_dir, image)
dst = os.path.join(new_label_dir, image)
shutil.copy(src, dst)"""
len(os.listdir("/kaggle/working/mithila"))
X = []
images = []
path = "/kaggle/input/road-surface-classification/Input_data/dry_asphalt_severe"
for image in os.listdir(path):
images.append(image)
random.shuffle(images)
dry_sample = random.sample(images, 1000)
print(dry_sample)
X.append(dry_sample)
X
len(dry_sample)
images2 = []
path2 = "/kaggle/input/road-surface-classification/Input_data/fresh_snow"
for image in os.listdir(path2):
images2.append(image)
random.shuffle(images2)
fresh_snow_sample = random.sample(images2, 1000)
print(fresh_snow_sample)
X.append(fresh_snow_sample)
X
len(fresh_snow)
images3 = []
path3 = "/kaggle/input/road-surface-classification/Input_data/ice"
for image in os.listdir(path3):
images3.append(image)
random.shuffle(images3)
ice_sample = random.sample(images3, 1000)
print(ice_sample)
X.append(ice_sample)
len(ice)
images4 = []
path4 = "/kaggle/input/road-surface-classification/Input_data/melted_snow"
for image in os.listdir(path4):
images4.append(image)
random.shuffle(images4)
melted_snow_sample = random.sample(images4, 1000)
print(melted_snow_sample)
X.append(melted_snow_sample)
len(melted_snow)
images5 = []
path5 = "/kaggle/input/road-surface-classification/Input_data/water_asphalt_severe"
for image in os.listdir(path5):
images5.append(image)
random.shuffle(images5)
water_sample = random.sample(images5, 1000)
print(water_sample)
len(water_asphalt_severe)
images6 = []
path6 = "/kaggle/input/road-surface-classification/Input_data/wet_asphalt_severe"
for image in os.listdir(path6):
images6.append(image)
random.shuffle(images6)
wet_sample = random.sample(images6, 1000)
print(wet_sample)
X.append(wet_sample)
len(wet_asphalt_severe)
os.mkdir("/kaggle/working/dry")
X.append(water_sample)
X
len(X)
import shutil
shutil.rmtree("/kaggle/working/dry")
data = []
for label in labels:
path = os.path.join(dir_path, label)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img))
resized_array = cv2.resize(img_array, (100, 100))
data.append([resized_array, labels])
len(ice)
X = []
y = []
for feature, label in data:
X.append(feature)
y.append(label)
from sklearn.preprocessing import MultiLabelBinarizer
label_bin = MultiLabelBinarizer()
y = label_bin.fit_transform(y)
X = np.array(X)
y = np.array(y)
from sklearn.model_selection import train_test_split
seed = 42
test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=seed, test_size=test_size
)
X_test
X.shape
#!pip install keras==2.4.3
#!pip install tensorflow==2.3.0
from tensorflow.keras.layers import (
Input,
Lambda,
Dense,
Flatten,
Conv2D,
MaxPooling2D,
Dropout,
)
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import numpy as np
train_generator = ImageDataGenerator(
rescale=1 / 255, zoom_range=0.2, horizontal_flip=True, rotation_range=30
)
test_generator = ImageDataGenerator(rescale=1 / 255)
train_generator = train_generator.flow(np.array(X_train), y_train, shuffle=False)
test_generator = test_generator.flow(np.array(X_test), y_test, shuffle=False)
# # **MODEL******
model = Sequential()
model.add(Conv2D(256, (3, 3), activation="relu", input_shape=X_train.shape[1:]))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam")
model.summary()
history = model.fit(
train_generator,
epochs=5,
validation_data=test_generator,
shuffle=True,
validation_steps=len(test_generator),
)
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "b", label="trainning accuracy")
plt.plot(epochs, val_accuracy, "r", label="validation accuracy")
plt.legend()
plt.show()
plt.plot(epochs, loss, "b", label="trainning loss")
plt.plot(epochs, val_loss, "r", label="validation loss")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/791/129791626.ipynb
|
road-surface-classification
|
aswathipt
|
[{"Id": 129791626, "ScriptId": 38380867, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14523325, "CreationDate": "05/16/2023 13:45:57", "VersionNumber": 5.0, "Title": "road_classify", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 243.0, "LinesInsertedFromPrevious": 100.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 143.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186158999, "KernelVersionId": 129791626, "SourceDatasetVersionId": 5645305}]
|
[{"Id": 5645305, "DatasetId": 3244694, "DatasourceVersionId": 5720636, "CreatorUserId": 14026222, "LicenseName": "Unknown", "CreationDate": "05/09/2023 16:33:25", "VersionNumber": 1.0, "Title": "Road_Surface_Classification", "Slug": "road-surface-classification", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3244694, "CreatorUserId": 14026222, "OwnerUserId": 14026222.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5645305.0, "CurrentDatasourceVersionId": 5720636.0, "ForumId": 3309986, "Type": 2, "CreationDate": "05/09/2023 16:33:25", "LastActivityDate": "05/09/2023", "TotalViews": 94, "TotalDownloads": 5, "TotalVotes": 0, "TotalKernels": 1}]
|
[{"Id": 14026222, "UserName": "aswathipt", "DisplayName": "Aswathi PT", "RegisterDate": "03/06/2023", "PerformanceTier": 0}]
|
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import cv2
dir_path = "/kaggle/input/road-surface-classification/Input_data"
labels = os.listdir("/kaggle/input/road-surface-classification/Input_data")
labels = [
"ice",
"water_asphalt_severe",
"dry_asphalt_severe",
"fresh_snow",
"wet_asphalt_severe",
"melted_snow",
]
"""
import os
import random
import shutil
dataset_dir="/kaggle/input/road-surface-classification/Input_data"
new_dataset_dir="/kaggle/working/mithila"
num_images = 1000
for label in os.listdir(dataset_dir):
label_dir = os.path.join(dataset_dir, label)
images = os.listdir(label_dir)
random.shuffle(images)
selected_images = images[:num_images]
new_label_dir = os.path.join(new_dataset_dir, label)
os.makedirs(new_label_dir, exist_ok = True)
for image in selected_images:
src = os.path.join(label_dir, image)
dst = os.path.join(new_label_dir, image)
shutil.copy(src, dst)import os
import random
import shutil
dataset_dir="/kaggle/input/road-surface-classification/Input_data"
new_dataset_dir="/kaggle/working/mithila"
num_images = 1000
for label in os.listdir(dataset_dir):
label_dir = os.path.join(dataset_dir, label)
images = os.listdir(label_dir)
random.shuffle(images)
selected_images = images[:num_images]
new_label_dir = os.path.join(new_dataset_dir, label)
os.makedirs(new_label_dir, exist_ok = True)
for image in selected_images:
src = os.path.join(label_dir, image)
dst = os.path.join(new_label_dir, image)
shutil.copy(src, dst)"""
len(os.listdir("/kaggle/working/mithila"))
X = []
images = []
path = "/kaggle/input/road-surface-classification/Input_data/dry_asphalt_severe"
for image in os.listdir(path):
images.append(image)
random.shuffle(images)
dry_sample = random.sample(images, 1000)
print(dry_sample)
X.append(dry_sample)
X
len(dry_sample)
images2 = []
path2 = "/kaggle/input/road-surface-classification/Input_data/fresh_snow"
for image in os.listdir(path2):
images2.append(image)
random.shuffle(images2)
fresh_snow_sample = random.sample(images2, 1000)
print(fresh_snow_sample)
X.append(fresh_snow_sample)
X
len(fresh_snow)
images3 = []
path3 = "/kaggle/input/road-surface-classification/Input_data/ice"
for image in os.listdir(path3):
images3.append(image)
random.shuffle(images3)
ice_sample = random.sample(images3, 1000)
print(ice_sample)
X.append(ice_sample)
len(ice)
images4 = []
path4 = "/kaggle/input/road-surface-classification/Input_data/melted_snow"
for image in os.listdir(path4):
images4.append(image)
random.shuffle(images4)
melted_snow_sample = random.sample(images4, 1000)
print(melted_snow_sample)
X.append(melted_snow_sample)
len(melted_snow)
images5 = []
path5 = "/kaggle/input/road-surface-classification/Input_data/water_asphalt_severe"
for image in os.listdir(path5):
images5.append(image)
random.shuffle(images5)
water_sample = random.sample(images5, 1000)
print(water_sample)
len(water_asphalt_severe)
images6 = []
path6 = "/kaggle/input/road-surface-classification/Input_data/wet_asphalt_severe"
for image in os.listdir(path6):
images6.append(image)
random.shuffle(images6)
wet_sample = random.sample(images6, 1000)
print(wet_sample)
X.append(wet_sample)
len(wet_asphalt_severe)
os.mkdir("/kaggle/working/dry")
X.append(water_sample)
X
len(X)
import shutil
shutil.rmtree("/kaggle/working/dry")
data = []
for label in labels:
path = os.path.join(dir_path, label)
for img in os.listdir(path):
img_array = cv2.imread(os.path.join(path, img))
resized_array = cv2.resize(img_array, (100, 100))
data.append([resized_array, labels])
len(ice)
X = []
y = []
for feature, label in data:
X.append(feature)
y.append(label)
from sklearn.preprocessing import MultiLabelBinarizer
label_bin = MultiLabelBinarizer()
y = label_bin.fit_transform(y)
X = np.array(X)
y = np.array(y)
from sklearn.model_selection import train_test_split
seed = 42
test_size = 0.30
X_train, X_test, y_train, y_test = train_test_split(
X, y, random_state=seed, test_size=test_size
)
X_test
X.shape
#!pip install keras==2.4.3
#!pip install tensorflow==2.3.0
from tensorflow.keras.layers import (
Input,
Lambda,
Dense,
Flatten,
Conv2D,
MaxPooling2D,
Dropout,
)
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from keras.preprocessing.image import ImageDataGenerator
import tensorflow as tf
import numpy as np
train_generator = ImageDataGenerator(
rescale=1 / 255, zoom_range=0.2, horizontal_flip=True, rotation_range=30
)
test_generator = ImageDataGenerator(rescale=1 / 255)
train_generator = train_generator.flow(np.array(X_train), y_train, shuffle=False)
test_generator = test_generator.flow(np.array(X_test), y_test, shuffle=False)
# # **MODEL******
model = Sequential()
model.add(Conv2D(256, (3, 3), activation="relu", input_shape=X_train.shape[1:]))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(128, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(64, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Conv2D(32, (3, 3), activation="relu"))
model.add(MaxPooling2D(2, 2))
model.add(Flatten())
model.add(Dropout(0.5))
model.add(Dense(64, activation="relu"))
model.add(Dense(4, activation="softmax"))
model.compile(loss="categorical_crossentropy", metrics=["accuracy"], optimizer="adam")
model.summary()
history = model.fit(
train_generator,
epochs=5,
validation_data=test_generator,
shuffle=True,
validation_steps=len(test_generator),
)
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
plt.plot(epochs, accuracy, "b", label="trainning accuracy")
plt.plot(epochs, val_accuracy, "r", label="validation accuracy")
plt.legend()
plt.show()
plt.plot(epochs, loss, "b", label="trainning loss")
plt.plot(epochs, val_loss, "r", label="validation loss")
plt.legend()
plt.show()
| false | 0 | 2,130 | 0 | 2,154 | 2,130 |
||
129605932
|
<jupyter_start><jupyter_text>Ferrari and Tesla. Share prices (2015-2023)
# Share prices from October 2015 till April 2023 of two famous manufacturers of cool sports cars

## Ferrari
Ferrari is an Italian luxury sports car manufacturer based in Maranello, Italy. Founded by Enzo Ferrari (1898–1988) in 1939 from the Alfa Romeo racing division as Auto Avio Costruzioni, the company built its first car in 1940, and produced its first Ferrari-badged car in 1947.

## Tesla
Tesla, Inc. is an American multinational automotive and clean energy company headquartered in Austin, Texas. Tesla designs and manufactures electric vehicles (electric cars and trucks), battery energy storage from home to grid-scale, solar panels and solar roof tiles, and related products and services. Tesla is one of the world's most valuable companies and is, as of 2023, the world's most valuable automaker. In 2021, the company had the most worldwide sales of battery electric vehicles and plug-in electric vehicles, capturing 21% of the battery-electric (purely electric) market and 14% of the plug-in market (which includes plug-in hybrids). Through its subsidiary Tesla Energy, the company develops and is a major installer of photovoltaic systems in the United States. Tesla Energy is also one of the largest global suppliers of battery energy storage systems, with 6.5 gigawatt-hours (GWh) installed in 2022.

Kaggle dataset identifier: ferrari-and-tesla-share-prices-2015-2023
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_f = pd.read_csv("/kaggle/input/ferrari-and-tesla-share-prices-2015-2023/Ferrari.csv")
df_t = pd.read_csv("/kaggle/input/ferrari-and-tesla-share-prices-2015-2023/Tesla.csv")
df_f.head()
# Convert_to Datetime
df_f["Date"] = pd.to_datetime(df_f["Date"], format="%Y-%m-%d")
df_t["Date"] = pd.to_datetime(df_t["Date"], format="%Y-%m-%d")
# New Column
df_f["name"] = "Ferrari"
df_t["name"] = "Tesla"
# Concat
new_df = pd.concat([df_f, df_t], axis=0)
new_df.sample(6)
import plotly.express as px
fig = px.line(
new_df,
x="Date",
y="High",
color="name",
color_discrete_sequence=["red", "blue"],
hover_data=["Low", "Close", "Adj Close", "Volume"],
title="Compare High Tesla & Ferrari",
)
fig.show()
# Prediction High Testla with GRU
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LSTM, GRU, Dense, Dropout
df_t.set_index("Date", inplace=True)
df = df_t["High"]
print("Sample:", df.shape)
print("Train set:", df[:"2021"].shape)
print("Test set:", df["2021":].shape)
# Normalize
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
data = df.values.reshape(-1, 1)
data_sc = sc.fit_transform(data)
data_sc[:5]
# Function Covert to Matrix
def convert_matrix(data, step=1):
x, y = [], []
for i in range(len(data) - step):
d = i + step
x.append(data[i:d,])
y.append(data[d,])
return np.array(x), np.array(y)
n_train = 1560
train, test = data_sc[:n_train], data_sc[n_train:]
print("Before Coversion Train:", train.shape)
print("Before Coversion Test:", test.shape)
step = 60 # Step back data 60 Day
x_train, y_train = convert_matrix(train, step)
x_test, y_test = convert_matrix(test, step)
print("After Conversion Train:", x_train.shape, x_test.shape)
print("After Conversion Test:", y_train.shape, y_test.shape)
# model
model = Sequential()
model.add(
GRU(units=50, return_sequences=True, input_shape=(step, 1), activation="relu")
)
model.add(Dropout(0.2))
model.add(GRU(units=40, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(
loss="mean_squared_error",
optimizer="rmsprop",
)
model.summary()
history = model.fit(x_train, y_train, epochs=50, batch_size=32, verbose=1)
# Visualize Loss
plt.plot(history.history["loss"], label="Train")
plt.legend()
plt.show()
# Mean_square_error & R2 Score
from sklearn.metrics import mean_squared_error, r2_score
testpredict = model.predict(x_test)
testpredict_inv = sc.inverse_transform(testpredict)
y_test_inv = sc.inverse_transform(y_test)
r2_test = r2_score(y_test_inv, testpredict_inv)
rsme_test = np.sqrt(mean_squared_error(y_test_inv, testpredict_inv))
print("R2 Test: {:.3f}".format(r2_test))
print("RSME Test:{:.3f}".format(rsme_test))
plt.figure(figsize=(12, 6))
plt.plot(y_test_inv, lw=1.3, label="Dataset (Test)")
plt.plot(testpredict_inv, lw=2.4, color="r", label="Prediction")
plt.title("Dataset vs Prediction (GRU)")
plt.legend()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/605/129605932.ipynb
|
ferrari-and-tesla-share-prices-2015-2023
|
kapturovalexander
|
[{"Id": 129605932, "ScriptId": 38539870, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14597751, "CreationDate": "05/15/2023 07:34:36", "VersionNumber": 1.0, "Title": "Prediction Tesla with GRU", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 122.0, "LinesInsertedFromPrevious": 122.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185844671, "KernelVersionId": 129605932, "SourceDatasetVersionId": 5634610}]
|
[{"Id": 5634610, "DatasetId": 3158061, "DatasourceVersionId": 5709885, "CreatorUserId": 10074224, "LicenseName": "CC0: Public Domain", "CreationDate": "05/08/2023 15:32:59", "VersionNumber": 6.0, "Title": "Ferrari and Tesla. Share prices (2015-2023)", "Slug": "ferrari-and-tesla-share-prices-2015-2023", "Subtitle": "Ferrari and Tesla. Share prices (October 2015 - April 2023)", "Description": "# Share prices from October 2015 till April 2023 of two famous manufacturers of cool sports cars\n\n\n## Ferrari\nFerrari is an Italian luxury sports car manufacturer based in Maranello, Italy. Founded by Enzo Ferrari (1898\u20131988) in 1939 from the Alfa Romeo racing division as Auto Avio Costruzioni, the company built its first car in 1940, and produced its first Ferrari-badged car in 1947.\n\n\n## Tesla\nTesla, Inc. is an American multinational automotive and clean energy company headquartered in Austin, Texas. Tesla designs and manufactures electric vehicles (electric cars and trucks), battery energy storage from home to grid-scale, solar panels and solar roof tiles, and related products and services. Tesla is one of the world's most valuable companies and is, as of 2023, the world's most valuable automaker. In 2021, the company had the most worldwide sales of battery electric vehicles and plug-in electric vehicles, capturing 21% of the battery-electric (purely electric) market and 14% of the plug-in market (which includes plug-in hybrids). Through its subsidiary Tesla Energy, the company develops and is a major installer of photovoltaic systems in the United States. Tesla Energy is also one of the largest global suppliers of battery energy storage systems, with 6.5 gigawatt-hours (GWh) installed in 2022.\n", "VersionNotes": "Data Update 2023-05-08", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3158061, "CreatorUserId": 10074224, "OwnerUserId": 10074224.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5995108.0, "CurrentDatasourceVersionId": 6072999.0, "ForumId": 3222109, "Type": 2, "CreationDate": "04/20/2023 11:21:25", "LastActivityDate": "04/20/2023", "TotalViews": 5689, "TotalDownloads": 1031, "TotalVotes": 37, "TotalKernels": 6}]
|
[{"Id": 10074224, "UserName": "kapturovalexander", "DisplayName": "Alexander Kapturov", "RegisterDate": "03/28/2022", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df_f = pd.read_csv("/kaggle/input/ferrari-and-tesla-share-prices-2015-2023/Ferrari.csv")
df_t = pd.read_csv("/kaggle/input/ferrari-and-tesla-share-prices-2015-2023/Tesla.csv")
df_f.head()
# Convert_to Datetime
df_f["Date"] = pd.to_datetime(df_f["Date"], format="%Y-%m-%d")
df_t["Date"] = pd.to_datetime(df_t["Date"], format="%Y-%m-%d")
# New Column
df_f["name"] = "Ferrari"
df_t["name"] = "Tesla"
# Concat
new_df = pd.concat([df_f, df_t], axis=0)
new_df.sample(6)
import plotly.express as px
fig = px.line(
new_df,
x="Date",
y="High",
color="name",
color_discrete_sequence=["red", "blue"],
hover_data=["Low", "Close", "Adj Close", "Volume"],
title="Compare High Tesla & Ferrari",
)
fig.show()
# Prediction High Testla with GRU
from tensorflow.keras import Sequential
from tensorflow.keras.layers import LSTM, GRU, Dense, Dropout
df_t.set_index("Date", inplace=True)
df = df_t["High"]
print("Sample:", df.shape)
print("Train set:", df[:"2021"].shape)
print("Test set:", df["2021":].shape)
# Normalize
from sklearn.preprocessing import MinMaxScaler
sc = MinMaxScaler()
data = df.values.reshape(-1, 1)
data_sc = sc.fit_transform(data)
data_sc[:5]
# Function Covert to Matrix
def convert_matrix(data, step=1):
x, y = [], []
for i in range(len(data) - step):
d = i + step
x.append(data[i:d,])
y.append(data[d,])
return np.array(x), np.array(y)
n_train = 1560
train, test = data_sc[:n_train], data_sc[n_train:]
print("Before Coversion Train:", train.shape)
print("Before Coversion Test:", test.shape)
step = 60 # Step back data 60 Day
x_train, y_train = convert_matrix(train, step)
x_test, y_test = convert_matrix(test, step)
print("After Conversion Train:", x_train.shape, x_test.shape)
print("After Conversion Test:", y_train.shape, y_test.shape)
# model
model = Sequential()
model.add(
GRU(units=50, return_sequences=True, input_shape=(step, 1), activation="relu")
)
model.add(Dropout(0.2))
model.add(GRU(units=40, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(units=1))
model.compile(
loss="mean_squared_error",
optimizer="rmsprop",
)
model.summary()
history = model.fit(x_train, y_train, epochs=50, batch_size=32, verbose=1)
# Visualize Loss
plt.plot(history.history["loss"], label="Train")
plt.legend()
plt.show()
# Mean_square_error & R2 Score
from sklearn.metrics import mean_squared_error, r2_score
testpredict = model.predict(x_test)
testpredict_inv = sc.inverse_transform(testpredict)
y_test_inv = sc.inverse_transform(y_test)
r2_test = r2_score(y_test_inv, testpredict_inv)
rsme_test = np.sqrt(mean_squared_error(y_test_inv, testpredict_inv))
print("R2 Test: {:.3f}".format(r2_test))
print("RSME Test:{:.3f}".format(rsme_test))
plt.figure(figsize=(12, 6))
plt.plot(y_test_inv, lw=1.3, label="Dataset (Test)")
plt.plot(testpredict_inv, lw=2.4, color="r", label="Prediction")
plt.title("Dataset vs Prediction (GRU)")
plt.legend()
plt.show()
| false | 2 | 1,292 | 3 | 2,063 | 1,292 |
||
129605069
|
# # Cyclistic bike share
# This is a case study for the capstone project at the end of the google data analytics professional certificate course.
# # Scenario
# You are a junior data analyst working in the marketing analyst team at Cyclistic, a bike-share company in Chicago. The director of marketing believes the company’s future success depends on maximizing the number of annual memberships. Therefore, your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives must approve your recommendations, so they must be backed up with compelling data insights and professional data visualizations.
# ****The scenario is a made up one but the data comes from a real bike share company.***
# # Data source
# The data was downloaded from the following website ->
# https://divvy-tripdata.s3.amazonaws.com/index.html
# The are many other files for different time periods going as far back as 2015. For this analysis we will only be using the newest data available for the past year. The data is constantly updated but this analysis will be using the data from April 2022 to March 2023.
# # Goals
# The main goal of this analysis is to discover if there are any differences in the beavior between casual riders and annual members. I will then provide suggestions that will lead to the increase of annual membership.
# To achive this goal we must look at the specific differences listed below.
# **See how many of the rides are by casual vs member riders**
# * Total number of casual rides vs member rides.
# **Compare the length of the rides**
# * calculate the min, max, mean and median length for the two type of riders.
# * Mean and median time by user type.
# **Compare the differences based on the day of the week**
# * Mean and median ride time based on day of the week.
# * Number of rides based on the day of the week.
# **Compare the differences based on the month**
# * Mean and median ride length based on the month.
# * Number of rides based on the month.
# **Compare the differences based on the type of bike**
# * Number of bike rides based on type of bike.
# * Mean and Median ride time based on type of bike.
# * number of rides for each bike type by each user type.
# * Mean and median ride time for each type of bike bye each user type.
# **Compare the most popular stations**
# * find the top ten most popular stations to start and end a ride. (this will be the total of casuals and members)
# * The precentage split of these popular station by user type.
# * top ten stations for casual riders.
# * top ten stations for member riders.
# **Compare the most popular routes**
# * Find the top ten most popular routes. (this will be the total of casuals and members)
# * The percentage split of these popular stations by user type.
# * Top ten routes for casual riders.
# * Top ten routes for member riders.
# *A hash mark # will be placed next to the parts that have been completed
#
# This R environment comes with many helpful analytics packages installed
# It is defined by the kaggle/rstats Docker image: https://github.com/kaggle/docker-rstats
# For example, here's a helpful package to load
library(tidyverse) # metapackage of all tidyverse packages
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
list.files(path="../input")
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/605/129605069.ipynb
| null | null |
[{"Id": 129605069, "ScriptId": 38536754, "ParentScriptVersionId": NaN, "ScriptLanguageId": 12, "AuthorUserId": 13571263, "CreationDate": "05/15/2023 07:27:10", "VersionNumber": 2.0, "Title": "notebook487deaf8aa", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 70.0, "LinesInsertedFromPrevious": 38.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 32.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Cyclistic bike share
# This is a case study for the capstone project at the end of the google data analytics professional certificate course.
# # Scenario
# You are a junior data analyst working in the marketing analyst team at Cyclistic, a bike-share company in Chicago. The director of marketing believes the company’s future success depends on maximizing the number of annual memberships. Therefore, your team wants to understand how casual riders and annual members use Cyclistic bikes differently. From these insights, your team will design a new marketing strategy to convert casual riders into annual members. But first, Cyclistic executives must approve your recommendations, so they must be backed up with compelling data insights and professional data visualizations.
# ****The scenario is a made up one but the data comes from a real bike share company.***
# # Data source
# The data was downloaded from the following website ->
# https://divvy-tripdata.s3.amazonaws.com/index.html
# The are many other files for different time periods going as far back as 2015. For this analysis we will only be using the newest data available for the past year. The data is constantly updated but this analysis will be using the data from April 2022 to March 2023.
# # Goals
# The main goal of this analysis is to discover if there are any differences in the beavior between casual riders and annual members. I will then provide suggestions that will lead to the increase of annual membership.
# To achive this goal we must look at the specific differences listed below.
# **See how many of the rides are by casual vs member riders**
# * Total number of casual rides vs member rides.
# **Compare the length of the rides**
# * calculate the min, max, mean and median length for the two type of riders.
# * Mean and median time by user type.
# **Compare the differences based on the day of the week**
# * Mean and median ride time based on day of the week.
# * Number of rides based on the day of the week.
# **Compare the differences based on the month**
# * Mean and median ride length based on the month.
# * Number of rides based on the month.
# **Compare the differences based on the type of bike**
# * Number of bike rides based on type of bike.
# * Mean and Median ride time based on type of bike.
# * number of rides for each bike type by each user type.
# * Mean and median ride time for each type of bike bye each user type.
# **Compare the most popular stations**
# * find the top ten most popular stations to start and end a ride. (this will be the total of casuals and members)
# * The precentage split of these popular station by user type.
# * top ten stations for casual riders.
# * top ten stations for member riders.
# **Compare the most popular routes**
# * Find the top ten most popular routes. (this will be the total of casuals and members)
# * The percentage split of these popular stations by user type.
# * Top ten routes for casual riders.
# * Top ten routes for member riders.
# *A hash mark # will be placed next to the parts that have been completed
#
# This R environment comes with many helpful analytics packages installed
# It is defined by the kaggle/rstats Docker image: https://github.com/kaggle/docker-rstats
# For example, here's a helpful package to load
library(tidyverse) # metapackage of all tidyverse packages
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
list.files(path="../input")
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
| false | 0 | 964 | 0 | 964 | 964 |
||
129605855
|
import os
import pandas as pd
import torch
import random
import numpy as np
import torch.nn as nn
from sklearn import preprocessing
import torchvision
import torchaudio # importing torchaudio library for audio processing
from tqdm import tqdm
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset, DataLoader, random_split
from types import SimpleNamespace
cfg = SimpleNamespace()
# paths
cfg.data_folder = ""
cfg.name = "ftt1"
cfg.data_dir = "../input/birdclef-2023/"
cfg.train_data_folder = cfg.data_dir + "train_audio/"
cfg.val_data_folder = cfg.data_dir + "train_audio/"
cfg.test_data_folder = cfg.data_dir + "test_soundscapes/soundscape_29201.ogg"
cfg.birds = np.array(
[
"abethr1",
"abhori1",
"abythr1",
"afbfly1",
"afdfly1",
"afecuc1",
"affeag1",
"afgfly1",
"afghor1",
"afmdov1",
"afpfly1",
"afpkin1",
"afpwag1",
"afrgos1",
"afrgrp1",
"afrjac1",
"afrthr1",
"amesun2",
"augbuz1",
"bagwea1",
"barswa",
"bawhor2",
"bawman1",
"bcbeat1",
"beasun2",
"bkctch1",
"bkfruw1",
"blacra1",
"blacuc1",
"blakit1",
"blaplo1",
"blbpuf2",
"blcapa2",
"blfbus1",
"blhgon1",
"blhher1",
"blksaw1",
"blnmou1",
"blnwea1",
"bltapa1",
"bltbar1",
"bltori1",
"blwlap1",
"brcale1",
"brcsta1",
"brctch1",
"brcwea1",
"brican1",
"brobab1",
"broman1",
"brosun1",
"brrwhe3",
"brtcha1",
"brubru1",
"brwwar1",
"bswdov1",
"btweye2",
"bubwar2",
"butapa1",
"cabgre1",
"carcha1",
"carwoo1",
"categr",
"ccbeat1",
"chespa1",
"chewea1",
"chibat1",
"chtapa3",
"chucis1",
"cibwar1",
"cohmar1",
"colsun2",
"combul2",
"combuz1",
"comsan",
"crefra2",
"crheag1",
"crohor1",
"darbar1",
"darter3",
"didcuc1",
"dotbar1",
"dutdov1",
"easmog1",
"eaywag1",
"edcsun3",
"egygoo",
"equaka1",
"eswdov1",
"eubeat1",
"fatrav1",
"fatwid1",
"fislov1",
"fotdro5",
"gabgos2",
"gargan",
"gbesta1",
"gnbcam2",
"gnhsun1",
"gobbun1",
"gobsta5",
"gobwea1",
"golher1",
"grbcam1",
"grccra1",
"grecor",
"greegr",
"grewoo2",
"grwpyt1",
"gryapa1",
"grywrw1",
"gybfis1",
"gycwar3",
"gyhbus1",
"gyhkin1",
"gyhneg1",
"gyhspa1",
"gytbar1",
"hadibi1",
"hamerk1",
"hartur1",
"helgui",
"hipbab1",
"hoopoe",
"huncis1",
"hunsun2",
"joygre1",
"kerspa2",
"klacuc1",
"kvbsun1",
"laudov1",
"lawgol",
"lesmaw1",
"lessts1",
"libeat1",
"litegr",
"litswi1",
"litwea1",
"loceag1",
"lotcor1",
"lotlap1",
"luebus1",
"mabeat1",
"macshr1",
"malkin1",
"marsto1",
"marsun2",
"mcptit1",
"meypar1",
"moccha1",
"mouwag1",
"ndcsun2",
"nobfly1",
"norbro1",
"norcro1",
"norfis1",
"norpuf1",
"nubwoo1",
"pabspa1",
"palfly2",
"palpri1",
"piecro1",
"piekin1",
"pitwhy",
"purgre2",
"pygbat1",
"quailf1",
"ratcis1",
"raybar1",
"rbsrob1",
"rebfir2",
"rebhor1",
"reboxp1",
"reccor",
"reccuc1",
"reedov1",
"refbar2",
"refcro1",
"reftin1",
"refwar2",
"rehblu1",
"rehwea1",
"reisee2",
"rerswa1",
"rewsta1",
"rindov",
"rocmar2",
"rostur1",
"ruegls1",
"rufcha2",
"sacibi2",
"sccsun2",
"scrcha1",
"scthon1",
"shesta1",
"sichor1",
"sincis1",
"slbgre1",
"slcbou1",
"sltnig1",
"sobfly1",
"somgre1",
"somtit4",
"soucit1",
"soufis1",
"spemou2",
"spepig1",
"spewea1",
"spfbar1",
"spfwea1",
"spmthr1",
"spwlap1",
"squher1",
"strher",
"strsee1",
"stusta1",
"subbus1",
"supsta1",
"tacsun1",
"tafpri1",
"tamdov1",
"thrnig1",
"trobou1",
"varsun2",
"vibsta2",
"vilwea1",
"vimwea1",
"walsta1",
"wbgbir1",
"wbrcha2",
"wbswea1",
"wfbeat1",
"whbcan1",
"whbcou1",
"whbcro2",
"whbtit5",
"whbwea1",
"whbwhe3",
"whcpri2",
"whctur2",
"wheslf1",
"whhsaw1",
"whihel1",
"whrshr1",
"witswa1",
"wlwwar",
"wookin1",
"woosan",
"wtbeat1",
"yebapa1",
"yebbar1",
"yebduc1",
"yebere1",
"yebgre1",
"yebsto1",
"yeccan1",
"yefcan",
"yelbis1",
"yenspu1",
"yertin1",
"yesbar1",
"yespet1",
"yetgre1",
"yewgre1",
]
)
cfg.n_classes = len(cfg.birds)
cfg.train_df = "../input/birdclef-2023/train_metadata.csv"
class AudioUtil:
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
@staticmethod
def rechannel(aud, new_channel):
sig, sr = aud
if sig.shape[0] == new_channel:
# Nothing to do
return aud
if new_channel == 1:
# Convert from stereo to mono by selecting only the first channel
resig = sig[:1, :]
else:
# Convert from mono to stereo by duplicating the first channel
resig = torch.cat([sig, sig, sig])
return (resig, sr)
@staticmethod
def resample(aud, newsr):
sig, sr = aud
if sr == newsr:
# Nothing to do
return aud
num_channels = sig.shape[0]
# Resample first channel
resig = torchaudio.transforms.Resample(sr, newsr)(sig[:1, :])
if num_channels > 1:
# Resample the second channel and merge both channels
retwo = torchaudio.transforms.Resample(sr, newsr)(sig[1:, :])
resig = torch.cat([resig, retwo])
return (resig, newsr)
@staticmethod
def pad_trunc(aud, max_ms):
sig, sr = aud
num_rows, sig_len = sig.shape
max_len = sr // 1000 * max_ms
if sig_len > max_len:
# Truncate the signal to the given length
sig = sig[:, :max_len]
elif sig_len < max_len:
# Length of padding to add at the beginning and end of the signal
pad_begin_len = random.randint(0, max_len - sig_len)
pad_end_len = max_len - sig_len - pad_begin_len
# Pad with 0s
pad_begin = torch.zeros((num_rows, pad_begin_len))
pad_end = torch.zeros((num_rows, pad_end_len))
sig = torch.cat((pad_begin, sig, pad_end), 1)
return (sig, sr)
@staticmethod
def time_shift(aud, shift_limit):
sig, sr = aud
_, sig_len = sig.shape
shift_amt = int(random.random() * shift_limit * sig_len)
return (sig.roll(shift_amt), sr)
@staticmethod
def spectro_gram(aud, n_mels=64, n_fft=1024, hop_len=None):
sig, sr = aud
top_db = 80
# spec has shape [channel, n_mels, time], where channel is mono, stereo etc
spec = torchaudio.transforms.MelSpectrogram(
sr, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels
)(sig)
# Convert to decibels
spec = torchaudio.transforms.AmplitudeToDB(top_db=top_db)(spec)
return spec
@staticmethod
def spectro_augment(spec, max_mask_pct=0.1, n_freq_masks=1, n_time_masks=1):
_, n_mels, n_steps = spec.shape
mask_value = spec.mean()
aug_spec = spec
freq_mask_param = max_mask_pct * n_mels
for _ in range(n_freq_masks):
aug_spec = torchaudio.transforms.FrequencyMasking(freq_mask_param)(
aug_spec, mask_value
)
time_mask_param = max_mask_pct * n_steps
for _ in range(n_time_masks):
aug_spec = torchaudio.transforms.TimeMasking(time_mask_param)(
aug_spec, mask_value
)
return aug_spec
def preprocessData(aud):
duration = 8000
sr = 32000
channel = 3
shift_pct = 0.4
reaud = AudioUtil.resample(aud, sr)
rechan = AudioUtil.rechannel(reaud, channel)
dur_aud = AudioUtil.pad_trunc(rechan, duration)
shift_aud = AudioUtil.time_shift(dur_aud, shift_pct)
sgram = AudioUtil.spectro_gram(shift_aud, n_mels=64, n_fft=1024, hop_len=None)
aug_sgram = AudioUtil.spectro_augment(
sgram, max_mask_pct=0.1, n_freq_masks=2, n_time_masks=2
)
aug_sgram_m, aug_sgram_s = aug_sgram.mean(), aug_sgram.std()
aug_sgram = (aug_sgram - aug_sgram_m) / aug_sgram_s
return aug_sgram
# 定义数据类处理文件
class RawData:
__train_data_path = cfg.train_data_folder
__train_df = pd.read_csv(cfg.train_df)
__labels_t = None
__audio_names = None
__le = preprocessing.LabelEncoder()
@staticmethod
def labels_t():
if RawData.__labels_t is None:
labels = RawData.__train_df["primary_label"]
# RawData.__le = preprocessing.LabelEncoder()
RawData.__labels_t = RawData.__le.fit_transform(labels)
# RawData.__labels_t = torch.as_tensor(targets)
return RawData.__labels_t
@staticmethod
def audio_names():
if RawData.__audio_names is None:
RawData.__audio_names = RawData.__train_df["filename"]
return RawData.__audio_names
@staticmethod
def re_transf(data):
return RawData.__le.inverse_transform(data)
def MakeFrame(audio, duration=5, sr=32000):
frame_length = int(duration * sr)
frame_step = int(duration * sr)
if frame_length > audio.shape[1]:
return audio.unsqueeze(0)
# audio = torch.tensor(audio)
else:
chunks = audio.unfold(1, frame_length, frame_step).transpose(0, 1)
return chunks
import shutil
# data_path = '/kaggle/working/'
save_data_path = "/kaggle/working/data/"
if os.path.exists(save_data_path):
shutil.rmtree(save_data_path)
t_data_path = save_data_path + "data/"
os.mkdir(save_data_path)
os.mkdir(t_data_path)
for tit in cfg.birds[:132]:
os.mkdir(t_data_path + tit)
cfg.birds[132]
train_df = pd.read_csv(cfg.train_df)
n = len(train_df)
# k=int(n/2)
lists = []
for i in range(n):
print(i, end="\r")
data = train_df.iloc[i]
if data["primary_label"] == "lesmaw1":
print(i)
break
# print(data['primary_label'])
loadpath = cfg.train_data_folder + data["filename"]
aud, sr = torchaudio.load(loadpath)
# print(aud.shape)
signal = preprocessData((aud, sr))
savepath = t_data_path + data["filename"][:-4] + ".pth"
torch.save(signal, savepath)
lists.append([data["primary_label"], data["filename"]])
# break
"""train_df = pd.read_csv(cfg.train_df)
lists = []
for i in range(len(train_df)):
data = train_df.iloc[i]
#print(data['primary_label'])
loadpath = cfg.train_data_folder+ data['filename']
aud,sr = torchaudio.load(loadpath)
#print(aud.shape)
frames=MakeFrame(aud)
#print(frames.shape)
i=5
for frame in frames:
signal=preprocessData((frame,sr))
savepath = save_data_path+ data['filename'][:-4]+'_%d'%i+'.pth'
torch.save(signal,savepath)
lists.append([data['primary_label'],savepath])
i+=5
#break"""
pd_lists = pd.DataFrame(lists)
pd_lists.to_csv(save_data_path + "lists.csv", index=False)
import os
import zipfile
import datetime
def file2zip(packagePath, zipPath):
"""
:param packagePath: 文件夹路径
:param zipPath: 压缩包路径
:return:
"""
zip = zipfile.ZipFile(zipPath, "w", zipfile.ZIP_DEFLATED)
for path, dirNames, fileNames in os.walk(packagePath):
fpath = path.replace(packagePath, "")
for name in fileNames:
fullName = os.path.join(path, name)
name = fpath + "\\" + name
zip.write(fullName, name)
zip.close()
# 文件夹路径
# packagePath = '/kaggle/working/'
zipPath = save_data_path + "output.zip"
if os.path.exists(zipPath):
os.remove(zipPath)
file2zip(t_data_path, zipPath)
print("打包完成")
print(datetime.datetime.utcnow())
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/605/129605855.ipynb
| null | null |
[{"Id": 129605855, "ScriptId": 38509583, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12624499, "CreationDate": "05/15/2023 07:33:53", "VersionNumber": 1.0, "Title": "notedta_1", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 323.0, "LinesInsertedFromPrevious": 323.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import os
import pandas as pd
import torch
import random
import numpy as np
import torch.nn as nn
from sklearn import preprocessing
import torchvision
import torchaudio # importing torchaudio library for audio processing
from tqdm import tqdm
import torch.nn.functional as F
import torch.nn.init as init
import torch.optim as optim
import matplotlib.pyplot as plt
from torchvision import transforms
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torch.utils.data import Dataset, DataLoader, random_split
from types import SimpleNamespace
cfg = SimpleNamespace()
# paths
cfg.data_folder = ""
cfg.name = "ftt1"
cfg.data_dir = "../input/birdclef-2023/"
cfg.train_data_folder = cfg.data_dir + "train_audio/"
cfg.val_data_folder = cfg.data_dir + "train_audio/"
cfg.test_data_folder = cfg.data_dir + "test_soundscapes/soundscape_29201.ogg"
cfg.birds = np.array(
[
"abethr1",
"abhori1",
"abythr1",
"afbfly1",
"afdfly1",
"afecuc1",
"affeag1",
"afgfly1",
"afghor1",
"afmdov1",
"afpfly1",
"afpkin1",
"afpwag1",
"afrgos1",
"afrgrp1",
"afrjac1",
"afrthr1",
"amesun2",
"augbuz1",
"bagwea1",
"barswa",
"bawhor2",
"bawman1",
"bcbeat1",
"beasun2",
"bkctch1",
"bkfruw1",
"blacra1",
"blacuc1",
"blakit1",
"blaplo1",
"blbpuf2",
"blcapa2",
"blfbus1",
"blhgon1",
"blhher1",
"blksaw1",
"blnmou1",
"blnwea1",
"bltapa1",
"bltbar1",
"bltori1",
"blwlap1",
"brcale1",
"brcsta1",
"brctch1",
"brcwea1",
"brican1",
"brobab1",
"broman1",
"brosun1",
"brrwhe3",
"brtcha1",
"brubru1",
"brwwar1",
"bswdov1",
"btweye2",
"bubwar2",
"butapa1",
"cabgre1",
"carcha1",
"carwoo1",
"categr",
"ccbeat1",
"chespa1",
"chewea1",
"chibat1",
"chtapa3",
"chucis1",
"cibwar1",
"cohmar1",
"colsun2",
"combul2",
"combuz1",
"comsan",
"crefra2",
"crheag1",
"crohor1",
"darbar1",
"darter3",
"didcuc1",
"dotbar1",
"dutdov1",
"easmog1",
"eaywag1",
"edcsun3",
"egygoo",
"equaka1",
"eswdov1",
"eubeat1",
"fatrav1",
"fatwid1",
"fislov1",
"fotdro5",
"gabgos2",
"gargan",
"gbesta1",
"gnbcam2",
"gnhsun1",
"gobbun1",
"gobsta5",
"gobwea1",
"golher1",
"grbcam1",
"grccra1",
"grecor",
"greegr",
"grewoo2",
"grwpyt1",
"gryapa1",
"grywrw1",
"gybfis1",
"gycwar3",
"gyhbus1",
"gyhkin1",
"gyhneg1",
"gyhspa1",
"gytbar1",
"hadibi1",
"hamerk1",
"hartur1",
"helgui",
"hipbab1",
"hoopoe",
"huncis1",
"hunsun2",
"joygre1",
"kerspa2",
"klacuc1",
"kvbsun1",
"laudov1",
"lawgol",
"lesmaw1",
"lessts1",
"libeat1",
"litegr",
"litswi1",
"litwea1",
"loceag1",
"lotcor1",
"lotlap1",
"luebus1",
"mabeat1",
"macshr1",
"malkin1",
"marsto1",
"marsun2",
"mcptit1",
"meypar1",
"moccha1",
"mouwag1",
"ndcsun2",
"nobfly1",
"norbro1",
"norcro1",
"norfis1",
"norpuf1",
"nubwoo1",
"pabspa1",
"palfly2",
"palpri1",
"piecro1",
"piekin1",
"pitwhy",
"purgre2",
"pygbat1",
"quailf1",
"ratcis1",
"raybar1",
"rbsrob1",
"rebfir2",
"rebhor1",
"reboxp1",
"reccor",
"reccuc1",
"reedov1",
"refbar2",
"refcro1",
"reftin1",
"refwar2",
"rehblu1",
"rehwea1",
"reisee2",
"rerswa1",
"rewsta1",
"rindov",
"rocmar2",
"rostur1",
"ruegls1",
"rufcha2",
"sacibi2",
"sccsun2",
"scrcha1",
"scthon1",
"shesta1",
"sichor1",
"sincis1",
"slbgre1",
"slcbou1",
"sltnig1",
"sobfly1",
"somgre1",
"somtit4",
"soucit1",
"soufis1",
"spemou2",
"spepig1",
"spewea1",
"spfbar1",
"spfwea1",
"spmthr1",
"spwlap1",
"squher1",
"strher",
"strsee1",
"stusta1",
"subbus1",
"supsta1",
"tacsun1",
"tafpri1",
"tamdov1",
"thrnig1",
"trobou1",
"varsun2",
"vibsta2",
"vilwea1",
"vimwea1",
"walsta1",
"wbgbir1",
"wbrcha2",
"wbswea1",
"wfbeat1",
"whbcan1",
"whbcou1",
"whbcro2",
"whbtit5",
"whbwea1",
"whbwhe3",
"whcpri2",
"whctur2",
"wheslf1",
"whhsaw1",
"whihel1",
"whrshr1",
"witswa1",
"wlwwar",
"wookin1",
"woosan",
"wtbeat1",
"yebapa1",
"yebbar1",
"yebduc1",
"yebere1",
"yebgre1",
"yebsto1",
"yeccan1",
"yefcan",
"yelbis1",
"yenspu1",
"yertin1",
"yesbar1",
"yespet1",
"yetgre1",
"yewgre1",
]
)
cfg.n_classes = len(cfg.birds)
cfg.train_df = "../input/birdclef-2023/train_metadata.csv"
class AudioUtil:
@staticmethod
def open(audio_file):
sig, sr = torchaudio.load(audio_file)
return (sig, sr)
@staticmethod
def rechannel(aud, new_channel):
sig, sr = aud
if sig.shape[0] == new_channel:
# Nothing to do
return aud
if new_channel == 1:
# Convert from stereo to mono by selecting only the first channel
resig = sig[:1, :]
else:
# Convert from mono to stereo by duplicating the first channel
resig = torch.cat([sig, sig, sig])
return (resig, sr)
@staticmethod
def resample(aud, newsr):
sig, sr = aud
if sr == newsr:
# Nothing to do
return aud
num_channels = sig.shape[0]
# Resample first channel
resig = torchaudio.transforms.Resample(sr, newsr)(sig[:1, :])
if num_channels > 1:
# Resample the second channel and merge both channels
retwo = torchaudio.transforms.Resample(sr, newsr)(sig[1:, :])
resig = torch.cat([resig, retwo])
return (resig, newsr)
@staticmethod
def pad_trunc(aud, max_ms):
sig, sr = aud
num_rows, sig_len = sig.shape
max_len = sr // 1000 * max_ms
if sig_len > max_len:
# Truncate the signal to the given length
sig = sig[:, :max_len]
elif sig_len < max_len:
# Length of padding to add at the beginning and end of the signal
pad_begin_len = random.randint(0, max_len - sig_len)
pad_end_len = max_len - sig_len - pad_begin_len
# Pad with 0s
pad_begin = torch.zeros((num_rows, pad_begin_len))
pad_end = torch.zeros((num_rows, pad_end_len))
sig = torch.cat((pad_begin, sig, pad_end), 1)
return (sig, sr)
@staticmethod
def time_shift(aud, shift_limit):
sig, sr = aud
_, sig_len = sig.shape
shift_amt = int(random.random() * shift_limit * sig_len)
return (sig.roll(shift_amt), sr)
@staticmethod
def spectro_gram(aud, n_mels=64, n_fft=1024, hop_len=None):
sig, sr = aud
top_db = 80
# spec has shape [channel, n_mels, time], where channel is mono, stereo etc
spec = torchaudio.transforms.MelSpectrogram(
sr, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels
)(sig)
# Convert to decibels
spec = torchaudio.transforms.AmplitudeToDB(top_db=top_db)(spec)
return spec
@staticmethod
def spectro_augment(spec, max_mask_pct=0.1, n_freq_masks=1, n_time_masks=1):
_, n_mels, n_steps = spec.shape
mask_value = spec.mean()
aug_spec = spec
freq_mask_param = max_mask_pct * n_mels
for _ in range(n_freq_masks):
aug_spec = torchaudio.transforms.FrequencyMasking(freq_mask_param)(
aug_spec, mask_value
)
time_mask_param = max_mask_pct * n_steps
for _ in range(n_time_masks):
aug_spec = torchaudio.transforms.TimeMasking(time_mask_param)(
aug_spec, mask_value
)
return aug_spec
def preprocessData(aud):
duration = 8000
sr = 32000
channel = 3
shift_pct = 0.4
reaud = AudioUtil.resample(aud, sr)
rechan = AudioUtil.rechannel(reaud, channel)
dur_aud = AudioUtil.pad_trunc(rechan, duration)
shift_aud = AudioUtil.time_shift(dur_aud, shift_pct)
sgram = AudioUtil.spectro_gram(shift_aud, n_mels=64, n_fft=1024, hop_len=None)
aug_sgram = AudioUtil.spectro_augment(
sgram, max_mask_pct=0.1, n_freq_masks=2, n_time_masks=2
)
aug_sgram_m, aug_sgram_s = aug_sgram.mean(), aug_sgram.std()
aug_sgram = (aug_sgram - aug_sgram_m) / aug_sgram_s
return aug_sgram
# 定义数据类处理文件
class RawData:
__train_data_path = cfg.train_data_folder
__train_df = pd.read_csv(cfg.train_df)
__labels_t = None
__audio_names = None
__le = preprocessing.LabelEncoder()
@staticmethod
def labels_t():
if RawData.__labels_t is None:
labels = RawData.__train_df["primary_label"]
# RawData.__le = preprocessing.LabelEncoder()
RawData.__labels_t = RawData.__le.fit_transform(labels)
# RawData.__labels_t = torch.as_tensor(targets)
return RawData.__labels_t
@staticmethod
def audio_names():
if RawData.__audio_names is None:
RawData.__audio_names = RawData.__train_df["filename"]
return RawData.__audio_names
@staticmethod
def re_transf(data):
return RawData.__le.inverse_transform(data)
def MakeFrame(audio, duration=5, sr=32000):
frame_length = int(duration * sr)
frame_step = int(duration * sr)
if frame_length > audio.shape[1]:
return audio.unsqueeze(0)
# audio = torch.tensor(audio)
else:
chunks = audio.unfold(1, frame_length, frame_step).transpose(0, 1)
return chunks
import shutil
# data_path = '/kaggle/working/'
save_data_path = "/kaggle/working/data/"
if os.path.exists(save_data_path):
shutil.rmtree(save_data_path)
t_data_path = save_data_path + "data/"
os.mkdir(save_data_path)
os.mkdir(t_data_path)
for tit in cfg.birds[:132]:
os.mkdir(t_data_path + tit)
cfg.birds[132]
train_df = pd.read_csv(cfg.train_df)
n = len(train_df)
# k=int(n/2)
lists = []
for i in range(n):
print(i, end="\r")
data = train_df.iloc[i]
if data["primary_label"] == "lesmaw1":
print(i)
break
# print(data['primary_label'])
loadpath = cfg.train_data_folder + data["filename"]
aud, sr = torchaudio.load(loadpath)
# print(aud.shape)
signal = preprocessData((aud, sr))
savepath = t_data_path + data["filename"][:-4] + ".pth"
torch.save(signal, savepath)
lists.append([data["primary_label"], data["filename"]])
# break
"""train_df = pd.read_csv(cfg.train_df)
lists = []
for i in range(len(train_df)):
data = train_df.iloc[i]
#print(data['primary_label'])
loadpath = cfg.train_data_folder+ data['filename']
aud,sr = torchaudio.load(loadpath)
#print(aud.shape)
frames=MakeFrame(aud)
#print(frames.shape)
i=5
for frame in frames:
signal=preprocessData((frame,sr))
savepath = save_data_path+ data['filename'][:-4]+'_%d'%i+'.pth'
torch.save(signal,savepath)
lists.append([data['primary_label'],savepath])
i+=5
#break"""
pd_lists = pd.DataFrame(lists)
pd_lists.to_csv(save_data_path + "lists.csv", index=False)
import os
import zipfile
import datetime
def file2zip(packagePath, zipPath):
"""
:param packagePath: 文件夹路径
:param zipPath: 压缩包路径
:return:
"""
zip = zipfile.ZipFile(zipPath, "w", zipfile.ZIP_DEFLATED)
for path, dirNames, fileNames in os.walk(packagePath):
fpath = path.replace(packagePath, "")
for name in fileNames:
fullName = os.path.join(path, name)
name = fpath + "\\" + name
zip.write(fullName, name)
zip.close()
# 文件夹路径
# packagePath = '/kaggle/working/'
zipPath = save_data_path + "output.zip"
if os.path.exists(zipPath):
os.remove(zipPath)
file2zip(t_data_path, zipPath)
print("打包完成")
print(datetime.datetime.utcnow())
| false | 0 | 4,329 | 0 | 4,329 | 4,329 |
||
129338274
|
# EDA Simplified:<span style="color:
# #b6d553;"> ICR Identifying Age-Related Conditions
# (BETA, English/Mandarin Version)
# 简化探索性数据分析:<span style="color:
# #b6d553;"> 体外细胞研究 识别与年龄相关的状况
# (测试版,中英文版)
# Introduction (介绍)
# In 2022, we went into data analysis on most health-related conditions, from Parkinson's disease to cancer disease as well as strokes in the brain. Until we head into this competition, we realized that most of the health issues came with aging, including the ones we visualized previously. Even worse, aging can be a risk factor for large numbers of complications and diseases. As a result, the ML models we salvaged like XGBoost or Random Forest were used to predict medical conditions from each patient, but it isn't good enough to perform well in predicting such medical issues in each patient. However, people can improve machine learning to improve on detecting medical conditions with measurements of anonymous characteristics. And for getting started on participating in the competition hosted by InVitro Cell Research, let's warp into our data analysis on detecting age-related conditions!
# 2022 年,我们对大多数与健康相关的疾病进行了数据分析,从帕金森病到癌症,再到脑中风。在我们参加这场比赛之前,我们意识到大多数健康问题都伴随着衰老,包括我们之前设想的那些。更糟糕的是,衰老可能是大量并发症和疾病的危险因素。因此,我们挽救的 ML 模型(如 XGBoost 或随机森林)用于预测每位患者的医疗状况,但在预测每位患者的此类医疗问题方面表现不佳。然而,人们可以改进机器学习,以通过测量匿名特征来改进检测医疗状况。为了开始参加由体外细胞研究主办的比赛,让我们进入我们关于检测与年龄相关的状况的数据分析!
# Imports and Data Setup (导入和数据设置)
# To start our data analysis, we first import the pandas module as pd for using data science and then import the numpy module as np for integrating linear algebra into our notebook. Lastly, we import the altair module as alt and plotly module's express module as px for plotting the graphs from a specific data source.
# 为了开始我们的数据分析,我们:
# 1. 将 `pandas` 模块导入为 `pd` 以使用数据科学,然后将 `numpy` 模块导入为 `np` 以将线性代数集成到我们的笔记本中。
# 2. 将 `altair` 模块作为 `alt` 导入,并将 `plotly` 模块的 `express` 模块作为 `px` 导入,以绘制来自特定数据源的图形。
# Data Science and Linear Algebra (数据科学和线性代数)
import pandas as pd
import numpy as np
# Plotting Graphs (绘制图表)
import altair as alt
import plotly.express as px
# After we import the modules that are important for our data analysis, we characterize two dataframes: train_df and greeks_df, to read out the train and greeks CSV files with the pd module's read_csv function. Thenceforth, we display the first five rows of our two newly-created dataframes with the head function.
# 在我们导入对我们的数据分析很重要的模块之后,我们描述了两个数据帧:`train_df` 和 `greeks_df`,以使用 pd 模块的 `read_csv` 函数读取 `train` 和 `greeks` *CSV* 文件。此后,我们使用 `head` 函数显示两个新创建的数据框的前五行。
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
train_df.head()
greeks_df.head()
# Basic Analysis of the Dataframes (数据框的基本分析)
# Now that we generated our two dataframes, let's take a basic look at the greeks_df and train_df dataframes! First, let's find the overall number of data in both dataframes with the len function.
# 现在我们生成了两个数据帧,让我们基本了解一下 `greeks_df` 和 `train_df` 数据帧!首先,让我们使用 `len` 函数找出两个数据框中的数据总数。
len(greeks_df)
len(train_df)
# How interesting. Following running the two code cells above, we counted 617 data entities in both greeks_df and train_df dataframes. In other words, the same number of data entities we noticed from the greeks_df and the train_df dataframes implied to us that they contain separate data about each patient ID by Greek measurement or characteristic.
# 多么有趣。在运行上面的两个代码单元之后,我们计算了 `greeks_df` 和 `train_df` 数据帧中的 **617** 个数据实体。换句话说,我们从 `greeks_df` 和 `train_df` 数据帧中注意到的相同数量的数据实体向我们暗示,它们包含有关每个患者标识符的独立数据(按希腊度量或特征)。
# Now let's find the number of present data in both dataframes! We simply use the isna function into the greeks_df and train_df dataframes to search for any missing values and then add the total number of missing values together with the sum function.
# 现在让我们找出两个数据框中当前数据的数量!我们只需在 `greeks_df` 和 `train_df` 数据帧中使用 `isna` 函数来搜索任何缺失值,然后将缺失值的总数与 `sum` 函数相加。
greeks_df.isna().sum()
train_df.isna().sum()
# While the greeks_df dataframe has no missing values, we spotted 60 missing values in the BQ and EL columns, 3 in the CC column, 2 in CB and FS, and 1 in the DU, FC, FL, and GL columns, tallying a total 134 missing values in the train_df dataframe. Additionally, the number of missing values we found in some columns from the train_df dataframe hinted to us that there are some unknown data on some health characteristics of some patients.
# 虽然 `greeks_df` 数据框没有缺失值,但我们在 `BQ` 和 `EL` 列中发现了 **60** 个缺失值,在 `CC` 列中有 **3** 个,在 `CB` 和 `FS` 中有 **2** 个,在 `DU`、`FC`、`FL` 和 `GL` 列中有 **1** 个,总计`train_df` 数据帧中有 **134** 个缺失值。此外,我们在 `train_df` 数据框的某些列中发现的缺失值数量向我们暗示,某些患者的某些健康特征存在一些未知数据。
# Lastly, for our basic dataframe analysis, let's find the number of columns in the greeks_df and the train_df dataframes! All we need to do is to plug the shape attribute into the two specified dataframes, setting the last index as 1.
# 最后,对于我们的基本数据帧分析,让我们找出 `greeks_df` 和 `train_df` 数据帧中的列数!我们需要做的就是将 `shape` 属性插入到两个指定的数据框中,将最后一个索引设置为 **1**。
greeks_df.shape[1]
train_df.shape[1]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/338/129338274.ipynb
| null | null |
[{"Id": 129338274, "ScriptId": 38447300, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3894504, "CreationDate": "05/12/2023 23:35:20", "VersionNumber": 1.0, "Title": "EDA Simplified: ICR Age-Rel Conditions (B, EN/ZH)", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 86.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# EDA Simplified:<span style="color:
# #b6d553;"> ICR Identifying Age-Related Conditions
# (BETA, English/Mandarin Version)
# 简化探索性数据分析:<span style="color:
# #b6d553;"> 体外细胞研究 识别与年龄相关的状况
# (测试版,中英文版)
# Introduction (介绍)
# In 2022, we went into data analysis on most health-related conditions, from Parkinson's disease to cancer disease as well as strokes in the brain. Until we head into this competition, we realized that most of the health issues came with aging, including the ones we visualized previously. Even worse, aging can be a risk factor for large numbers of complications and diseases. As a result, the ML models we salvaged like XGBoost or Random Forest were used to predict medical conditions from each patient, but it isn't good enough to perform well in predicting such medical issues in each patient. However, people can improve machine learning to improve on detecting medical conditions with measurements of anonymous characteristics. And for getting started on participating in the competition hosted by InVitro Cell Research, let's warp into our data analysis on detecting age-related conditions!
# 2022 年,我们对大多数与健康相关的疾病进行了数据分析,从帕金森病到癌症,再到脑中风。在我们参加这场比赛之前,我们意识到大多数健康问题都伴随着衰老,包括我们之前设想的那些。更糟糕的是,衰老可能是大量并发症和疾病的危险因素。因此,我们挽救的 ML 模型(如 XGBoost 或随机森林)用于预测每位患者的医疗状况,但在预测每位患者的此类医疗问题方面表现不佳。然而,人们可以改进机器学习,以通过测量匿名特征来改进检测医疗状况。为了开始参加由体外细胞研究主办的比赛,让我们进入我们关于检测与年龄相关的状况的数据分析!
# Imports and Data Setup (导入和数据设置)
# To start our data analysis, we first import the pandas module as pd for using data science and then import the numpy module as np for integrating linear algebra into our notebook. Lastly, we import the altair module as alt and plotly module's express module as px for plotting the graphs from a specific data source.
# 为了开始我们的数据分析,我们:
# 1. 将 `pandas` 模块导入为 `pd` 以使用数据科学,然后将 `numpy` 模块导入为 `np` 以将线性代数集成到我们的笔记本中。
# 2. 将 `altair` 模块作为 `alt` 导入,并将 `plotly` 模块的 `express` 模块作为 `px` 导入,以绘制来自特定数据源的图形。
# Data Science and Linear Algebra (数据科学和线性代数)
import pandas as pd
import numpy as np
# Plotting Graphs (绘制图表)
import altair as alt
import plotly.express as px
# After we import the modules that are important for our data analysis, we characterize two dataframes: train_df and greeks_df, to read out the train and greeks CSV files with the pd module's read_csv function. Thenceforth, we display the first five rows of our two newly-created dataframes with the head function.
# 在我们导入对我们的数据分析很重要的模块之后,我们描述了两个数据帧:`train_df` 和 `greeks_df`,以使用 pd 模块的 `read_csv` 函数读取 `train` 和 `greeks` *CSV* 文件。此后,我们使用 `head` 函数显示两个新创建的数据框的前五行。
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
train_df.head()
greeks_df.head()
# Basic Analysis of the Dataframes (数据框的基本分析)
# Now that we generated our two dataframes, let's take a basic look at the greeks_df and train_df dataframes! First, let's find the overall number of data in both dataframes with the len function.
# 现在我们生成了两个数据帧,让我们基本了解一下 `greeks_df` 和 `train_df` 数据帧!首先,让我们使用 `len` 函数找出两个数据框中的数据总数。
len(greeks_df)
len(train_df)
# How interesting. Following running the two code cells above, we counted 617 data entities in both greeks_df and train_df dataframes. In other words, the same number of data entities we noticed from the greeks_df and the train_df dataframes implied to us that they contain separate data about each patient ID by Greek measurement or characteristic.
# 多么有趣。在运行上面的两个代码单元之后,我们计算了 `greeks_df` 和 `train_df` 数据帧中的 **617** 个数据实体。换句话说,我们从 `greeks_df` 和 `train_df` 数据帧中注意到的相同数量的数据实体向我们暗示,它们包含有关每个患者标识符的独立数据(按希腊度量或特征)。
# Now let's find the number of present data in both dataframes! We simply use the isna function into the greeks_df and train_df dataframes to search for any missing values and then add the total number of missing values together with the sum function.
# 现在让我们找出两个数据框中当前数据的数量!我们只需在 `greeks_df` 和 `train_df` 数据帧中使用 `isna` 函数来搜索任何缺失值,然后将缺失值的总数与 `sum` 函数相加。
greeks_df.isna().sum()
train_df.isna().sum()
# While the greeks_df dataframe has no missing values, we spotted 60 missing values in the BQ and EL columns, 3 in the CC column, 2 in CB and FS, and 1 in the DU, FC, FL, and GL columns, tallying a total 134 missing values in the train_df dataframe. Additionally, the number of missing values we found in some columns from the train_df dataframe hinted to us that there are some unknown data on some health characteristics of some patients.
# 虽然 `greeks_df` 数据框没有缺失值,但我们在 `BQ` 和 `EL` 列中发现了 **60** 个缺失值,在 `CC` 列中有 **3** 个,在 `CB` 和 `FS` 中有 **2** 个,在 `DU`、`FC`、`FL` 和 `GL` 列中有 **1** 个,总计`train_df` 数据帧中有 **134** 个缺失值。此外,我们在 `train_df` 数据框的某些列中发现的缺失值数量向我们暗示,某些患者的某些健康特征存在一些未知数据。
# Lastly, for our basic dataframe analysis, let's find the number of columns in the greeks_df and the train_df dataframes! All we need to do is to plug the shape attribute into the two specified dataframes, setting the last index as 1.
# 最后,对于我们的基本数据帧分析,让我们找出 `greeks_df` 和 `train_df` 数据帧中的列数!我们需要做的就是将 `shape` 属性插入到两个指定的数据框中,将最后一个索引设置为 **1**。
greeks_df.shape[1]
train_df.shape[1]
| false | 0 | 1,897 | 2 | 1,897 | 1,897 |
||
129338985
|
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Predicting Age-Related Conditions from Anonymized Health Data
#
# This is a binary classification competition where one class corresponds to the presence of one or more medical conditions while the second class corresponds to having no medical conditions. All of the features in the data are anonymized and the evaluation metric is the balanced log loss. This notebook includes the following:
# 1. Brief EDA of the training data and target variable
# 2. A baseline CatBoost model tuned using Optua
# 3. An elastic net baseline tuned using Optuna
# 3. Feature importance metrics, including an EDA on the important features
# 4. A detailed exploration of the experimental characteristics mentioned in the data description
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Table of Contents
# * [Feature Definitions](#section-zero)
# * [Data and Cohort Characteristics](#section-one)
# * [Target Variable Exploration](#section-two)
# * [Baseline Models - CatBoost and Elastic Net](#section-three)
# * [Feature Importance](#section-four)
# * [EDA on Important Features](#section-five)
# * [Conclusions and Future Work](#section-six)
# Installs
# Imports
import optuna
import plotly
import numpy as np
import polars as pl
import statistics as stats
import plotly.figure_factory as ff
from lets_plot import *
from lets_plot.bistro.corr import *
from sklearn import model_selection
from lets_plot.mapping import as_discrete
from catboost import Pool, CatBoostClassifier
# So the plots look nice
LetsPlot.setup_html()
plotly.offline.init_notebook_mode(connected=True)
#
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Feature Definitions
# Table variable names and descriptions
id_desc = "Unique identifier for each observation"
ab_gl_desc = "56 anonymized health characteristics. All are numeric except for EJ"
class_desc = "Binary target: 1 indicated the presence of 1+ medical conditions"
alpha_desc = "Age related condition, only available for training"
bgd_desc = "Three experimental characteristics - only available for training"
epsilon_desc = "The date the data was collected for this subject"
vars_table = [
["Variable", "Description"],
["ID", id_desc],
["AB - GL", ab_gl_desc],
["Class", class_desc],
["Alpha", alpha_desc],
["Beta, Gamma, Delta", bgd_desc],
["Epsilon", epsilon_desc],
]
colorscale = [[0, "#FF4141"], [0.05, "#FF7A7A"], [1, "#FFD8D8"]]
ff.create_table(vars_table, colorscale=colorscale, height_constant=15)
#
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Data and Cohort Characteristics
# Read in the data
df_train = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
df_test = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
df_greek = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
df_sub = pl.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
print("\033[1m" + "Training set characteristics:" + "\033[0m")
df_train.glimpse()
print("\033[1m" + "Greek data set characteristics:" + "\033[0m")
df_greek.glimpse()
print("\033[1m" + "Test set characteristics:" + "\033[0m")
df_test.glimpse()
print("\033[1m" + "Missingness in the training set:" + "\033[0m")
df_train.null_count()
print("\033[1m" + "Missingness in the greek set:" + "\033[0m")
df_greek.null_count()
print("\033[1m" + "Missingness in the test set:" + "\033[0m")
df_test.null_count()
print("\033[1m" + "Duplicate IDs in the training set" + "\033[0m")
df_train.get_column("Id").is_duplicated().sum()
print("\033[1m" + "Duplicate IDs in the greek set" + "\033[0m")
df_greek.get_column("Id").is_duplicated().sum()
print("\033[1m" + "Duplicate IDs in the test set" + "\033[0m")
df_test.get_column("Id").is_duplicated().sum()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/338/129338985.ipynb
| null | null |
[{"Id": 129338985, "ScriptId": 38455273, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 6329446, "CreationDate": "05/12/2023 23:51:41", "VersionNumber": 3.0, "Title": "ICR EDA + Multiple Baselines", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 195.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 194.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Predicting Age-Related Conditions from Anonymized Health Data
#
# This is a binary classification competition where one class corresponds to the presence of one or more medical conditions while the second class corresponds to having no medical conditions. All of the features in the data are anonymized and the evaluation metric is the balanced log loss. This notebook includes the following:
# 1. Brief EDA of the training data and target variable
# 2. A baseline CatBoost model tuned using Optua
# 3. An elastic net baseline tuned using Optuna
# 3. Feature importance metrics, including an EDA on the important features
# 4. A detailed exploration of the experimental characteristics mentioned in the data description
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Table of Contents
# * [Feature Definitions](#section-zero)
# * [Data and Cohort Characteristics](#section-one)
# * [Target Variable Exploration](#section-two)
# * [Baseline Models - CatBoost and Elastic Net](#section-three)
# * [Feature Importance](#section-four)
# * [EDA on Important Features](#section-five)
# * [Conclusions and Future Work](#section-six)
# Installs
# Imports
import optuna
import plotly
import numpy as np
import polars as pl
import statistics as stats
import plotly.figure_factory as ff
from lets_plot import *
from lets_plot.bistro.corr import *
from sklearn import model_selection
from lets_plot.mapping import as_discrete
from catboost import Pool, CatBoostClassifier
# So the plots look nice
LetsPlot.setup_html()
plotly.offline.init_notebook_mode(connected=True)
#
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Feature Definitions
# Table variable names and descriptions
id_desc = "Unique identifier for each observation"
ab_gl_desc = "56 anonymized health characteristics. All are numeric except for EJ"
class_desc = "Binary target: 1 indicated the presence of 1+ medical conditions"
alpha_desc = "Age related condition, only available for training"
bgd_desc = "Three experimental characteristics - only available for training"
epsilon_desc = "The date the data was collected for this subject"
vars_table = [
["Variable", "Description"],
["ID", id_desc],
["AB - GL", ab_gl_desc],
["Class", class_desc],
["Alpha", alpha_desc],
["Beta, Gamma, Delta", bgd_desc],
["Epsilon", epsilon_desc],
]
colorscale = [[0, "#FF4141"], [0.05, "#FF7A7A"], [1, "#FFD8D8"]]
ff.create_table(vars_table, colorscale=colorscale, height_constant=15)
#
# <p style="font-family: monospace;
# font-weight: bold;
# letter-spacing: 2px;
# color: black;
# font-size: 200%;
# text-align: left;
# padding: 0px;
# border-bottom: 4px solid #FF4E4E" >Data and Cohort Characteristics
# Read in the data
df_train = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
df_test = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
df_greek = pl.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
df_sub = pl.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
print("\033[1m" + "Training set characteristics:" + "\033[0m")
df_train.glimpse()
print("\033[1m" + "Greek data set characteristics:" + "\033[0m")
df_greek.glimpse()
print("\033[1m" + "Test set characteristics:" + "\033[0m")
df_test.glimpse()
print("\033[1m" + "Missingness in the training set:" + "\033[0m")
df_train.null_count()
print("\033[1m" + "Missingness in the greek set:" + "\033[0m")
df_greek.null_count()
print("\033[1m" + "Missingness in the test set:" + "\033[0m")
df_test.null_count()
print("\033[1m" + "Duplicate IDs in the training set" + "\033[0m")
df_train.get_column("Id").is_duplicated().sum()
print("\033[1m" + "Duplicate IDs in the greek set" + "\033[0m")
df_greek.get_column("Id").is_duplicated().sum()
print("\033[1m" + "Duplicate IDs in the test set" + "\033[0m")
df_test.get_column("Id").is_duplicated().sum()
| false | 0 | 1,451 | 0 | 1,451 | 1,451 |
||
129242937
|
# packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
# file overview
# # Arxiv Metadata (JSON file)
# import JSON data
file_name = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json"
dict_arxiv = []
for line in open(file_name, "r"):
dict_arxiv.append(json.loads(line))
# show entry
dict_arxiv[0]
# convert to data frame
df_arxiv = pd.DataFrame.from_dict(dict_arxiv)
# clean up
del dict_arxiv
# preview
df_arxiv.head()
# structure
df_arxiv.info(show_counts=True)
# categories - show top 50 only
df_arxiv.categories.value_counts()[0:50]
# make available for download as CSV
df_arxiv.to_csv("df_arxiv.csv", index=False)
# filter e.g. for Computer Science / AI
df_select = df_arxiv[df_arxiv.categories == "cs.AI"].reset_index()
df_select
# # Kaggle Writeups
# load data
df_writeups = pd.read_csv("../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
# preview
df_writeups.head()
# structure
df_writeups.info()
# show top 50 competitions by count
df_writeups["Title of Competition"].value_counts()[0:50]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/242/129242937.ipynb
| null | null |
[{"Id": 129242937, "ScriptId": 38424616, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1330628, "CreationDate": "05/12/2023 06:04:48", "VersionNumber": 3.0, "Title": "\ud83d\udcc2Kaggle AI Report - Access to data", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 59.0, "LinesInsertedFromPrevious": 11.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 48.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
# file overview
# # Arxiv Metadata (JSON file)
# import JSON data
file_name = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json"
dict_arxiv = []
for line in open(file_name, "r"):
dict_arxiv.append(json.loads(line))
# show entry
dict_arxiv[0]
# convert to data frame
df_arxiv = pd.DataFrame.from_dict(dict_arxiv)
# clean up
del dict_arxiv
# preview
df_arxiv.head()
# structure
df_arxiv.info(show_counts=True)
# categories - show top 50 only
df_arxiv.categories.value_counts()[0:50]
# make available for download as CSV
df_arxiv.to_csv("df_arxiv.csv", index=False)
# filter e.g. for Computer Science / AI
df_select = df_arxiv[df_arxiv.categories == "cs.AI"].reset_index()
df_select
# # Kaggle Writeups
# load data
df_writeups = pd.read_csv("../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
# preview
df_writeups.head()
# structure
df_writeups.info()
# show top 50 competitions by count
df_writeups["Title of Competition"].value_counts()[0:50]
| false | 0 | 387 | 0 | 387 | 387 |
||
129242156
|
import cv2
import numpy as np
import matplotlib.pyplot as plt
path = "/kaggle/input/happy-whale-and-dolphin/train_images/00021adfb725ed.jpg"
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, dsize=(100, 100))
plt.imshow(img)
plt.show()
print(img.shape)
H = img.shape[0]
W = img.shape[1]
print(H, W)
P = [0, 0]
p0 = P[0]
p1 = P[1]
a0 = img[p0, p1, 0] * 0.8
a1 = img[p0, p1, 0] * 1.2
b0 = img[p0, p1, 1] * 0.8
b1 = img[p0, p1, 1] * 1.2
c0 = img[p0, p1, 2] * 0.8
c1 = img[p0, p1, 2] * 1.2
print(a0, a1, b0, b1, c0, c1)
for h in range(H):
for w in range(W):
# if (img[h,w,:]==col0).all():
if a0 < img[h, w, 0] < a1 and b0 < img[h, w, 1] < b1 and c0 < img[h, w, 2] < c1:
img[h, w, :] = np.array([0, 0, 0])
# print(h,w)
P = [99, 0]
p0 = P[0]
p1 = P[1]
a0 = img[p0, p1, 0] * 0.8
a1 = img[p0, p1, 0] * 1.2
b0 = img[p0, p1, 1] * 0.8
b1 = img[p0, p1, 1] * 1.2
c0 = img[p0, p1, 2] * 0.8
c1 = img[p0, p1, 2] * 1.2
print(a0, a1, b0, b1, c0, c1)
for h in range(H):
for w in range(W):
# if (img[h,w,:]==col0).all():
if a0 < img[h, w, 0] < a1 and b0 < img[h, w, 1] < b1 and c0 < img[h, w, 2] < c1:
img[h, w, :] = np.array([0, 0, 0])
# print(h,w)
plt.imshow(img)
plt.show()
print(img.shape)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/242/129242156.ipynb
| null | null |
[{"Id": 129242156, "ScriptId": 38422870, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2648923, "CreationDate": "05/12/2023 05:55:36", "VersionNumber": 2.0, "Title": "notebookb19363831b", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 61.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 60.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import cv2
import numpy as np
import matplotlib.pyplot as plt
path = "/kaggle/input/happy-whale-and-dolphin/train_images/00021adfb725ed.jpg"
img = cv2.imread(path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img = cv2.resize(img, dsize=(100, 100))
plt.imshow(img)
plt.show()
print(img.shape)
H = img.shape[0]
W = img.shape[1]
print(H, W)
P = [0, 0]
p0 = P[0]
p1 = P[1]
a0 = img[p0, p1, 0] * 0.8
a1 = img[p0, p1, 0] * 1.2
b0 = img[p0, p1, 1] * 0.8
b1 = img[p0, p1, 1] * 1.2
c0 = img[p0, p1, 2] * 0.8
c1 = img[p0, p1, 2] * 1.2
print(a0, a1, b0, b1, c0, c1)
for h in range(H):
for w in range(W):
# if (img[h,w,:]==col0).all():
if a0 < img[h, w, 0] < a1 and b0 < img[h, w, 1] < b1 and c0 < img[h, w, 2] < c1:
img[h, w, :] = np.array([0, 0, 0])
# print(h,w)
P = [99, 0]
p0 = P[0]
p1 = P[1]
a0 = img[p0, p1, 0] * 0.8
a1 = img[p0, p1, 0] * 1.2
b0 = img[p0, p1, 1] * 0.8
b1 = img[p0, p1, 1] * 1.2
c0 = img[p0, p1, 2] * 0.8
c1 = img[p0, p1, 2] * 1.2
print(a0, a1, b0, b1, c0, c1)
for h in range(H):
for w in range(W):
# if (img[h,w,:]==col0).all():
if a0 < img[h, w, 0] < a1 and b0 < img[h, w, 1] < b1 and c0 < img[h, w, 2] < c1:
img[h, w, :] = np.array([0, 0, 0])
# print(h,w)
plt.imshow(img)
plt.show()
print(img.shape)
| false | 0 | 724 | 0 | 724 | 724 |
||
129242190
|
# packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
# file overview
# # Arxiv Metadata (JSON file)
# import JSON data
file_name = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json"
dict_arxiv = []
for line in open(file_name, "r"):
dict_arxiv.append(json.loads(line))
# show entry
dict_arxiv[0]
# convert to data frame
df_arxiv = pd.DataFrame.from_dict(dict_arxiv)
# clean up
del dict_arxiv
# preview
df_arxiv.head()
# structure
df_arxiv.info(show_counts=True)
# make available as CSV
df_arxiv.to_csv("df_arxiv.csv", index=False)
# # Kaggle Writeups
# load data
df_writeups = pd.read_csv("../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
# preview
df_writeups.head()
# structure
df_writeups.info()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/242/129242190.ipynb
| null | null |
[{"Id": 129242190, "ScriptId": 38424616, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 1330628, "CreationDate": "05/12/2023 05:56:02", "VersionNumber": 2.0, "Title": "Kaggle AI Report - Access to data", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 49.0, "LinesInsertedFromPrevious": 20.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 29.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# packages
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import json
# file overview
# # Arxiv Metadata (JSON file)
# import JSON data
file_name = "../input/2023-kaggle-ai-report/arxiv_metadata_20230510.json"
dict_arxiv = []
for line in open(file_name, "r"):
dict_arxiv.append(json.loads(line))
# show entry
dict_arxiv[0]
# convert to data frame
df_arxiv = pd.DataFrame.from_dict(dict_arxiv)
# clean up
del dict_arxiv
# preview
df_arxiv.head()
# structure
df_arxiv.info(show_counts=True)
# make available as CSV
df_arxiv.to_csv("df_arxiv.csv", index=False)
# # Kaggle Writeups
# load data
df_writeups = pd.read_csv("../input/2023-kaggle-ai-report/kaggle_writeups_20230510.csv")
# preview
df_writeups.head()
# structure
df_writeups.info()
| false | 0 | 288 | 0 | 288 | 288 |
||
129242911
|
# importing libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report
# importing training data
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train.head()
train.isnull().sum()
train["Cabin"].nunique()
# dropping unnecessary columns of training data
train.drop(["PassengerId", "Name", "Cabin", "Ticket", "Age"], axis=1)
sns.countplot(x="Survived", data=train, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title("Count plot for survived people based on their sex")
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
sns.countplot(x="Pclass", data=train, hue="Sex")
plt.legend(fontsize="large", edgecolor="black")
plt.title("Count plot for class based on their sex")
plt.xlabel("Class")
plt.ylabel("Count")
plt.show()
le = LabelEncoder()
train["Sex"] = le.fit_transform(train["Sex"])
x_train = train[["Pclass", "Sex", "Fare"]]
y_train = train["Survived"]
y_train.shape
test = pd.read_csv("/kaggle/input/titanic/test.csv")
test.shape
test.head()
test.isnull().sum()
test.dropna(subset=["Fare"], inplace=True)
test["Sex"] = le.fit_transform(test["Sex"])
x_test = test[["Pclass", "Sex", "Fare"]]
x_test.head()
log = LogisticRegression()
log.fit(x_train, y_train)
test["Survived"] = log.predict(x_test)
test.head()
def replace(sex):
if sex == 1:
return "Male"
else:
return "Female"
test["Sex"] = test["Sex"].apply(replace)
sns.countplot(x="Survived", data=test, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title("Count plot for survived people based on their sex for logistic regression")
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
test["new_Survived"] = rfc.predict(x_test)
test.head()
sns.countplot(x="new_Survived", data=test, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title(
"Count plot for survived people based on their sex for random forest classifier"
)
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/242/129242911.ipynb
| null | null |
[{"Id": 129242911, "ScriptId": 38425318, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13768211, "CreationDate": "05/12/2023 06:04:30", "VersionNumber": 1.0, "Title": "titanic", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 97.0, "LinesInsertedFromPrevious": 97.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# importing libraries
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report
# importing training data
train = pd.read_csv("/kaggle/input/titanic/train.csv")
train.head()
train.isnull().sum()
train["Cabin"].nunique()
# dropping unnecessary columns of training data
train.drop(["PassengerId", "Name", "Cabin", "Ticket", "Age"], axis=1)
sns.countplot(x="Survived", data=train, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title("Count plot for survived people based on their sex")
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
sns.countplot(x="Pclass", data=train, hue="Sex")
plt.legend(fontsize="large", edgecolor="black")
plt.title("Count plot for class based on their sex")
plt.xlabel("Class")
plt.ylabel("Count")
plt.show()
le = LabelEncoder()
train["Sex"] = le.fit_transform(train["Sex"])
x_train = train[["Pclass", "Sex", "Fare"]]
y_train = train["Survived"]
y_train.shape
test = pd.read_csv("/kaggle/input/titanic/test.csv")
test.shape
test.head()
test.isnull().sum()
test.dropna(subset=["Fare"], inplace=True)
test["Sex"] = le.fit_transform(test["Sex"])
x_test = test[["Pclass", "Sex", "Fare"]]
x_test.head()
log = LogisticRegression()
log.fit(x_train, y_train)
test["Survived"] = log.predict(x_test)
test.head()
def replace(sex):
if sex == 1:
return "Male"
else:
return "Female"
test["Sex"] = test["Sex"].apply(replace)
sns.countplot(x="Survived", data=test, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title("Count plot for survived people based on their sex for logistic regression")
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
rfc = RandomForestClassifier()
rfc.fit(x_train, y_train)
test["new_Survived"] = rfc.predict(x_test)
test.head()
sns.countplot(x="new_Survived", data=test, hue="Sex", palette="colorblind")
plt.legend(edgecolor="black")
plt.title(
"Count plot for survived people based on their sex for random forest classifier"
)
plt.xlabel("Survived people")
plt.ylabel("Count")
plt.show()
| false | 0 | 732 | 1 | 732 | 732 |
||
129235608
|
<jupyter_start><jupyter_text>Spotify and Youtube
Dataset of songs of various artist in the world and for each song is present:
- Several statistics of the music version on spotify, including the number of streams;
- Number of views of the official music video of the song on youtube.
# **Content**
It includes 26 variables for each of the songs collected from spotify. These variables are briefly described next:
- **Track**: name of the song, as visible on the Spotify platform.
- **Artist**: name of the artist.
- **Url_spotify**: the Url of the artist.
- **Album**: the album in wich the song is contained on Spotify.
- **Album_type**: indicates if the song is relesead on Spotify as a single or contained in an album.
- **Uri**: a spotify link used to find the song through the API.
- **Danceability**: describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable.
- **Energy**: is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale. Perceptual features contributing to this attribute include dynamic range, perceived loudness, timbre, onset rate, and general entropy.
- **Key**: the key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. 0 = C, 1 = C♯/D♭, 2 = D, and so on. If no key was detected, the value is -1.
- **Loudness**: the overall loudness of a track in decibels (dB). Loudness values are averaged across the entire track and are useful for comparing relative loudness of tracks. Loudness is the quality of a sound that is the primary psychological correlate of physical strength (amplitude). Values typically range between -60 and 0 db.
- **Speechiness**: detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks.
- **Acousticness**: a confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic.
- **Instrumentalness**: predicts whether a track contains no vocals. "Ooh" and "aah" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly "vocal". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. Values above 0.5 are intended to represent instrumental tracks, but confidence is higher as the value approaches 1.0.
- **Liveness**: detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live.
- **Valence**: a measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry).
- **Tempo**: the overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration.
- **Duration_ms**: the duration of the track in milliseconds.
- **Stream**: number of streams of the song on Spotify.
- **Url_youtube**: url of the video linked to the song on Youtube, if it have any.
- **Title**: title of the videoclip on youtube.
- **Channel**: name of the channel that have published the video.
- **Views**: number of views.
- **Likes**: number of likes.
- **Comments**: number of comments.
- **Description**: description of the video on Youtube.
- **Licensed**: Indicates whether the video represents licensed content, which means that the content was uploaded to a channel linked to a YouTube content partner and then claimed by that partner.
- **official_video**: boolean value that indicates if the video found is the official video of the song.
# **Notes**
These datas are heavily dependent on the time they were collected, which is in this case the 7th of February, 2023.
Kaggle dataset identifier: spotify-and-youtube
<jupyter_script>import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/spotify-and-youtube/Spotify_Youtube.csv")
# Display the first few rows of the dataset
print(df.head())
# Check the dimensions of the dataset
print(df.shape)
# Get information about the dataset
print(df.info())
# Check summary statistics of numerical columns
print(df.describe())
# Histogram of danceability
plt.figure(figsize=(8, 6))
sns.histplot(df["Danceability"], bins=20, kde=True)
plt.title("Distribution of Danceability")
plt.xlabel("Danceability")
plt.ylabel("Count")
plt.show()
# Histogram of energy
plt.figure(figsize=(8, 6))
sns.histplot(df["Energy"], bins=20, kde=True)
plt.title("Distribution of Energy")
plt.xlabel("Energy")
plt.ylabel("Count")
plt.show()
# Count plot of keys
plt.figure(figsize=(10, 6))
sns.countplot(x="Key", data=df)
plt.title("Count of Songs by Key")
plt.xlabel("Key")
plt.ylabel("Count")
plt.show()
# Calculate correlation matrix
correlation_matrix = df[["Danceability", "Energy"]].corr()
# Create a heatmap of the correlation matrix
plt.figure(figsize=(8, 6))
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm")
plt.title("Correlation Matrix")
plt.show()
# Count plot of album types
plt.figure(figsize=(10, 6))
sns.countplot(x="Album_type", data=df)
plt.title("Count of Songs by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Count")
plt.show()
# Top 10 most frequent artists
top_artists = df["Artist"].value_counts().head(10)
# Bar plot of top 10 artists
plt.figure(figsize=(10, 6))
sns.barplot(x=top_artists.index, y=top_artists.values)
plt.title("Top 10 Artists")
plt.xlabel("Artist")
plt.ylabel("Count")
plt.xticks(rotation=45)
plt.show()
# Box plots of danceability and energy by album type
plt.figure(figsize=(10, 6))
sns.boxplot(x="Album_type", y="Danceability", data=df)
plt.title("Distribution of Danceability by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Danceability")
plt.show()
plt.figure(figsize=(10, 6))
sns.boxplot(x="Album_type", y="Energy", data=df)
plt.title("Distribution of Energy by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Energy")
plt.show()
# Violin plot of key distribution by danceability
plt.figure(figsize=(10, 6))
sns.violinplot(x="Key", y="Danceability", data=df)
plt.title("Key Distribution by Danceability")
plt.xlabel("Key")
plt.ylabel("Danceability")
plt.show()
# Scatter plot of danceability vs. energy
plt.figure(figsize=(8, 6))
sns.scatterplot(x="Danceability", y="Energy", data=df)
plt.title("Danceability vs. Energy")
plt.xlabel("Danceability")
plt.ylabel("Energy")
plt.show()
# Filter for top 5 artists
top_artists = df["Artist"].value_counts().head(5)
top_artists_df = df[df["Artist"].isin(top_artists.index)]
# Swarm plot of danceability and energy for top artists
plt.figure(figsize=(10, 6))
sns.swarmplot(x="Artist", y="Danceability", data=top_artists_df)
plt.title("Danceability for Top Artists")
plt.xlabel("Artist")
plt.ylabel("Danceability")
plt.xticks(rotation=45)
plt.show()
plt.figure(figsize=(10, 6))
sns.swarmplot(x="Artist", y="Energy", data=top_artists_df)
plt.title("Energy for Top Artists")
plt.xlabel("Artist")
plt.ylabel("Energy")
plt.xticks(rotation=45)
plt.show()
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# Load the dataset
df = pd.read_csv("/kaggle/input/spotify-and-youtube/Spotify_Youtube.csv")
# Select the numerical features for clustering
features = ["Danceability", "Energy"]
# Perform feature scaling
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df[features])
# Impute missing values
imputer = SimpleImputer(strategy="mean")
scaled_features = imputer.fit_transform(scaled_features)
# Determine the optimal number of clusters using the elbow method
sse = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_features)
sse.append(kmeans.inertia_)
# Plot the elbow curve
plt.plot(range(1, 11), sse, marker="o")
plt.title("Elbow Curve")
plt.xlabel("Number of Clusters (k)")
plt.ylabel("Sum of Squared Distances")
plt.show()
# Choose the optimal number of clusters based on the elbow curve
k = 4
# Perform K-means clustering
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_features)
# Add the cluster labels to the dataset
df["Cluster"] = kmeans.labels_
# Visualize the clusters
plt.scatter(df["Danceability"], df["Energy"], c=df["Cluster"], cmap="viridis")
plt.xlabel("Danceability")
plt.ylabel("Energy")
plt.title("K-means Clustering")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/235/129235608.ipynb
|
spotify-and-youtube
|
salvatorerastelli
|
[{"Id": 129235608, "ScriptId": 38422786, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11098306, "CreationDate": "05/12/2023 04:31:33", "VersionNumber": 1.0, "Title": "notebooke737bed879", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 186.0, "LinesInsertedFromPrevious": 186.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185104232, "KernelVersionId": 129235608, "SourceDatasetVersionId": 5201951}]
|
[{"Id": 5201951, "DatasetId": 3025170, "DatasourceVersionId": 5274235, "CreatorUserId": 12271862, "LicenseName": "CC0: Public Domain", "CreationDate": "03/20/2023 15:43:25", "VersionNumber": 2.0, "Title": "Spotify and Youtube", "Slug": "spotify-and-youtube", "Subtitle": "Statistics for the Top 10 songs of various spotify artists and their yt video.", "Description": "Dataset of songs of various artist in the world and for each song is present:\n- Several statistics of the music version on spotify, including the number of streams;\n- Number of views of the official music video of the song on youtube.\n\n\n# **Content**\nIt includes 26 variables for each of the songs collected from spotify. These variables are briefly described next:\n- **Track**: name of the song, as visible on the Spotify platform.\n- **Artist**: name of the artist.\n- **Url_spotify**: the Url of the artist.\n- **Album**: the album in wich the song is contained on Spotify.\n- **Album_type**: indicates if the song is relesead on Spotify as a single or contained in an album.\n- **Uri**: a spotify link used to find the song through the API.\n- **Danceability**: describes how suitable a track is for dancing based on a combination of musical elements including tempo, rhythm stability, beat strength, and overall regularity. A value of 0.0 is least danceable and 1.0 is most danceable.\n- **Energy**: is a measure from 0.0 to 1.0 and represents a perceptual measure of intensity and activity. Typically, energetic tracks feel fast, loud, and noisy. For example, death metal has high energy, while a Bach prelude scores low on the scale. Perceptual features contributing to this attribute include dynamic range, perceived loudness, timbre, onset rate, and general entropy.\n- **Key**: the key the track is in. Integers map to pitches using standard Pitch Class notation. E.g. 0 = C, 1 = C\u266f/D\u266d, 2 = D, and so on. If no key was detected, the value is -1.\n- **Loudness**: the overall loudness of a track in decibels (dB). Loudness values are averaged across the entire track and are useful for comparing relative loudness of tracks. Loudness is the quality of a sound that is the primary psychological correlate of physical strength (amplitude). Values typically range between -60 and 0 db.\n- **Speechiness**: detects the presence of spoken words in a track. The more exclusively speech-like the recording (e.g. talk show, audio book, poetry), the closer to 1.0 the attribute value. Values above 0.66 describe tracks that are probably made entirely of spoken words. Values between 0.33 and 0.66 describe tracks that may contain both music and speech, either in sections or layered, including such cases as rap music. Values below 0.33 most likely represent music and other non-speech-like tracks.\n- **Acousticness**: a confidence measure from 0.0 to 1.0 of whether the track is acoustic. 1.0 represents high confidence the track is acoustic.\n- **Instrumentalness**: predicts whether a track contains no vocals. \"Ooh\" and \"aah\" sounds are treated as instrumental in this context. Rap or spoken word tracks are clearly \"vocal\". The closer the instrumentalness value is to 1.0, the greater likelihood the track contains no vocal content. Values above 0.5 are intended to represent instrumental tracks, but confidence is higher as the value approaches 1.0.\n- **Liveness**: detects the presence of an audience in the recording. Higher liveness values represent an increased probability that the track was performed live. A value above 0.8 provides strong likelihood that the track is live.\n- **Valence**: a measure from 0.0 to 1.0 describing the musical positiveness conveyed by a track. Tracks with high valence sound more positive (e.g. happy, cheerful, euphoric), while tracks with low valence sound more negative (e.g. sad, depressed, angry).\n- **Tempo**: the overall estimated tempo of a track in beats per minute (BPM). In musical terminology, tempo is the speed or pace of a given piece and derives directly from the average beat duration.\n- **Duration_ms**: the duration of the track in milliseconds.\n- **Stream**: number of streams of the song on Spotify.\n- **Url_youtube**: url of the video linked to the song on Youtube, if it have any.\n- **Title**: title of the videoclip on youtube.\n- **Channel**: name of the channel that have published the video.\n- **Views**: number of views.\n- **Likes**: number of likes.\n- **Comments**: number of comments.\n- **Description**: description of the video on Youtube.\n- **Licensed**: Indicates whether the video represents licensed content, which means that the content was uploaded to a channel linked to a YouTube content partner and then claimed by that partner.\n- **official_video**: boolean value that indicates if the video found is the official video of the song.\n\n# **Notes**\nThese datas are heavily dependent on the time they were collected, which is in this case the 7th of February, 2023.", "VersionNotes": "Data Update 2023/03/20", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3025170, "CreatorUserId": 12271862, "OwnerUserId": 12271862.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5201951.0, "CurrentDatasourceVersionId": 5274235.0, "ForumId": 3064429, "Type": 2, "CreationDate": "03/20/2023 15:22:42", "LastActivityDate": "03/20/2023", "TotalViews": 115230, "TotalDownloads": 17868, "TotalVotes": 494, "TotalKernels": 46}]
|
[{"Id": 12271862, "UserName": "salvatorerastelli", "DisplayName": "Salvatore Rastelli", "RegisterDate": "11/07/2022", "PerformanceTier": 0}]
|
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/spotify-and-youtube/Spotify_Youtube.csv")
# Display the first few rows of the dataset
print(df.head())
# Check the dimensions of the dataset
print(df.shape)
# Get information about the dataset
print(df.info())
# Check summary statistics of numerical columns
print(df.describe())
# Histogram of danceability
plt.figure(figsize=(8, 6))
sns.histplot(df["Danceability"], bins=20, kde=True)
plt.title("Distribution of Danceability")
plt.xlabel("Danceability")
plt.ylabel("Count")
plt.show()
# Histogram of energy
plt.figure(figsize=(8, 6))
sns.histplot(df["Energy"], bins=20, kde=True)
plt.title("Distribution of Energy")
plt.xlabel("Energy")
plt.ylabel("Count")
plt.show()
# Count plot of keys
plt.figure(figsize=(10, 6))
sns.countplot(x="Key", data=df)
plt.title("Count of Songs by Key")
plt.xlabel("Key")
plt.ylabel("Count")
plt.show()
# Calculate correlation matrix
correlation_matrix = df[["Danceability", "Energy"]].corr()
# Create a heatmap of the correlation matrix
plt.figure(figsize=(8, 6))
sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm")
plt.title("Correlation Matrix")
plt.show()
# Count plot of album types
plt.figure(figsize=(10, 6))
sns.countplot(x="Album_type", data=df)
plt.title("Count of Songs by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Count")
plt.show()
# Top 10 most frequent artists
top_artists = df["Artist"].value_counts().head(10)
# Bar plot of top 10 artists
plt.figure(figsize=(10, 6))
sns.barplot(x=top_artists.index, y=top_artists.values)
plt.title("Top 10 Artists")
plt.xlabel("Artist")
plt.ylabel("Count")
plt.xticks(rotation=45)
plt.show()
# Box plots of danceability and energy by album type
plt.figure(figsize=(10, 6))
sns.boxplot(x="Album_type", y="Danceability", data=df)
plt.title("Distribution of Danceability by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Danceability")
plt.show()
plt.figure(figsize=(10, 6))
sns.boxplot(x="Album_type", y="Energy", data=df)
plt.title("Distribution of Energy by Album Type")
plt.xlabel("Album Type")
plt.ylabel("Energy")
plt.show()
# Violin plot of key distribution by danceability
plt.figure(figsize=(10, 6))
sns.violinplot(x="Key", y="Danceability", data=df)
plt.title("Key Distribution by Danceability")
plt.xlabel("Key")
plt.ylabel("Danceability")
plt.show()
# Scatter plot of danceability vs. energy
plt.figure(figsize=(8, 6))
sns.scatterplot(x="Danceability", y="Energy", data=df)
plt.title("Danceability vs. Energy")
plt.xlabel("Danceability")
plt.ylabel("Energy")
plt.show()
# Filter for top 5 artists
top_artists = df["Artist"].value_counts().head(5)
top_artists_df = df[df["Artist"].isin(top_artists.index)]
# Swarm plot of danceability and energy for top artists
plt.figure(figsize=(10, 6))
sns.swarmplot(x="Artist", y="Danceability", data=top_artists_df)
plt.title("Danceability for Top Artists")
plt.xlabel("Artist")
plt.ylabel("Danceability")
plt.xticks(rotation=45)
plt.show()
plt.figure(figsize=(10, 6))
sns.swarmplot(x="Artist", y="Energy", data=top_artists_df)
plt.title("Energy for Top Artists")
plt.xlabel("Artist")
plt.ylabel("Energy")
plt.xticks(rotation=45)
plt.show()
import pandas as pd
import numpy as np
from sklearn.cluster import KMeans
from sklearn.preprocessing import StandardScaler
from sklearn.impute import SimpleImputer
import matplotlib.pyplot as plt
# Load the dataset
df = pd.read_csv("/kaggle/input/spotify-and-youtube/Spotify_Youtube.csv")
# Select the numerical features for clustering
features = ["Danceability", "Energy"]
# Perform feature scaling
scaler = StandardScaler()
scaled_features = scaler.fit_transform(df[features])
# Impute missing values
imputer = SimpleImputer(strategy="mean")
scaled_features = imputer.fit_transform(scaled_features)
# Determine the optimal number of clusters using the elbow method
sse = []
for k in range(1, 11):
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_features)
sse.append(kmeans.inertia_)
# Plot the elbow curve
plt.plot(range(1, 11), sse, marker="o")
plt.title("Elbow Curve")
plt.xlabel("Number of Clusters (k)")
plt.ylabel("Sum of Squared Distances")
plt.show()
# Choose the optimal number of clusters based on the elbow curve
k = 4
# Perform K-means clustering
kmeans = KMeans(n_clusters=k, random_state=42)
kmeans.fit(scaled_features)
# Add the cluster labels to the dataset
df["Cluster"] = kmeans.labels_
# Visualize the clusters
plt.scatter(df["Danceability"], df["Energy"], c=df["Cluster"], cmap="viridis")
plt.xlabel("Danceability")
plt.ylabel("Energy")
plt.title("K-means Clustering")
plt.show()
| false | 1 | 1,531 | 0 | 2,725 | 1,531 |
||
129235002
|
# # Kaun Banega Crorepati
# create a programe capable of displaying questions to the user like KBC
# use list data type to store the questions and their correct answers.
# Display the final amount the person is taking home after playing the game.
def start():
question = [
"1: The International Literacy Day is observed on",
"2.The language of Lakshadweep. a Union Territory of India, is?",
"3.In which group of places the Kumbha Mela is held every twelve years?",
"4. Bahubali festival is related to",
"5.Which day is observed as the World Standards Day?",
"6. Which of the following was the theme of the World Red Cross and Red Crescent Day?",
"7.September 27 is celebrated every year as",
"8.Who is the author of 'Manas Ka-Hans' ?",
"9.The death anniversary of which of the following leaders is observed as Martyrs' Day?",
"10.Who is the author of the epic 'Meghdoot",
"11.'Good Friday' is observed to commemorate the event of",
"12.Who is the author of the book 'Amrit Ki Ore'?",
"13. Which of the following is observed as Sports Day every year?",
"14.World Health Day is observed on",
]
option1 = [
"Sep 8",
"Tamil",
"Ujjain. Purl; Prayag. Haridwar",
"Islam",
"June 26",
"'Dignity for all - focus on women'",
"Teachers' Day",
"Khushwant Singh",
"Smt. Indira Gandhi",
"Vishakadatta",
"birth of Jesus Christ",
"Mukesh Kumar",
"22nd April",
"Apr 7",
]
option2 = [
"Nov 28",
"Hindi",
"Prayag. Haridwar, Ujjain,. Nasik",
"Hinduism",
"Oct 14",
"Dignity for all - focus on Children",
"National Integration Day",
"Prem Chand",
"PI. Jawaharlal Nehru",
"Valmiki",
"birth of' St. Peter",
"Narendra Mohan",
"26th july",
"Mar 6",
]
option3 = [
"May 2",
"Malayalam",
"Rameshwaram. Purl, Badrinath. Dwarika",
"Buddhism",
"Nov 15",
"Focus on health for all",
"World Tourism Day",
"Jayashankar Prasad",
"Mahatma Gandhi",
"Banabhatta",
"crucification 'of Jesus Christ",
"Upendra Nath",
"29th August",
"Mar 15",
]
option4 = [
"Sep 22",
"Telugu",
"Chittakoot, Ujjain, Prayag,'Haridwar",
"Jainism",
"Dec 2",
"Nourishment for all-focus on children",
"International Literacy Day",
"Amrit Lal Nagar",
"Lal Bahadur Shastri",
"Kalidas",
"rebirth of Jesus Christ",
"Nirad C. Choudhary",
"2nd October",
"Apr 28",
]
answers = ["a", "c", "b", "d", "b", "b", "c", "d", "c", "d", "c", "b", "c", "a"]
price = [
5000,
10000,
20000,
40000,
80000,
160000,
320000,
640000,
1250000,
2500000,
5000000,
"1 crore",
"3 crore",
"7 crore",
]
for i in range(0, 14):
print(
" \n",
question[i],
"\n a) ",
option1[i],
"\n b) ",
option2[i],
"\n c) ",
option3[i],
"\n d) ",
option4[i],
)
ans = input("\n Enter your answer: ")
if ans == answers[i]:
print("your answer is correct")
print("Congratulation!! you have won", price[i], "Rupees")
i = i + 1
else:
print("your answer is incorrect")
print("better luck next time!!")
print("Congratulation!! you have won", price[i - 1], "Rupees")
print("GAME OVER !!")
break
print("welcome to kbc")
print("To start the game enter {1} and to Quiet the game enter {2}")
operation = input()
if operation == "1":
print("your question is ")
start()
elif operation == "2":
print("you are exit")
else:
print("invalid input!!")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/235/129235002.ipynb
| null | null |
[{"Id": 129235002, "ScriptId": 38422184, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15066417, "CreationDate": "05/12/2023 04:22:41", "VersionNumber": 1.0, "Title": "KBC_game", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 71.0, "LinesInsertedFromPrevious": 71.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Kaun Banega Crorepati
# create a programe capable of displaying questions to the user like KBC
# use list data type to store the questions and their correct answers.
# Display the final amount the person is taking home after playing the game.
def start():
question = [
"1: The International Literacy Day is observed on",
"2.The language of Lakshadweep. a Union Territory of India, is?",
"3.In which group of places the Kumbha Mela is held every twelve years?",
"4. Bahubali festival is related to",
"5.Which day is observed as the World Standards Day?",
"6. Which of the following was the theme of the World Red Cross and Red Crescent Day?",
"7.September 27 is celebrated every year as",
"8.Who is the author of 'Manas Ka-Hans' ?",
"9.The death anniversary of which of the following leaders is observed as Martyrs' Day?",
"10.Who is the author of the epic 'Meghdoot",
"11.'Good Friday' is observed to commemorate the event of",
"12.Who is the author of the book 'Amrit Ki Ore'?",
"13. Which of the following is observed as Sports Day every year?",
"14.World Health Day is observed on",
]
option1 = [
"Sep 8",
"Tamil",
"Ujjain. Purl; Prayag. Haridwar",
"Islam",
"June 26",
"'Dignity for all - focus on women'",
"Teachers' Day",
"Khushwant Singh",
"Smt. Indira Gandhi",
"Vishakadatta",
"birth of Jesus Christ",
"Mukesh Kumar",
"22nd April",
"Apr 7",
]
option2 = [
"Nov 28",
"Hindi",
"Prayag. Haridwar, Ujjain,. Nasik",
"Hinduism",
"Oct 14",
"Dignity for all - focus on Children",
"National Integration Day",
"Prem Chand",
"PI. Jawaharlal Nehru",
"Valmiki",
"birth of' St. Peter",
"Narendra Mohan",
"26th july",
"Mar 6",
]
option3 = [
"May 2",
"Malayalam",
"Rameshwaram. Purl, Badrinath. Dwarika",
"Buddhism",
"Nov 15",
"Focus on health for all",
"World Tourism Day",
"Jayashankar Prasad",
"Mahatma Gandhi",
"Banabhatta",
"crucification 'of Jesus Christ",
"Upendra Nath",
"29th August",
"Mar 15",
]
option4 = [
"Sep 22",
"Telugu",
"Chittakoot, Ujjain, Prayag,'Haridwar",
"Jainism",
"Dec 2",
"Nourishment for all-focus on children",
"International Literacy Day",
"Amrit Lal Nagar",
"Lal Bahadur Shastri",
"Kalidas",
"rebirth of Jesus Christ",
"Nirad C. Choudhary",
"2nd October",
"Apr 28",
]
answers = ["a", "c", "b", "d", "b", "b", "c", "d", "c", "d", "c", "b", "c", "a"]
price = [
5000,
10000,
20000,
40000,
80000,
160000,
320000,
640000,
1250000,
2500000,
5000000,
"1 crore",
"3 crore",
"7 crore",
]
for i in range(0, 14):
print(
" \n",
question[i],
"\n a) ",
option1[i],
"\n b) ",
option2[i],
"\n c) ",
option3[i],
"\n d) ",
option4[i],
)
ans = input("\n Enter your answer: ")
if ans == answers[i]:
print("your answer is correct")
print("Congratulation!! you have won", price[i], "Rupees")
i = i + 1
else:
print("your answer is incorrect")
print("better luck next time!!")
print("Congratulation!! you have won", price[i - 1], "Rupees")
print("GAME OVER !!")
break
print("welcome to kbc")
print("To start the game enter {1} and to Quiet the game enter {2}")
operation = input()
if operation == "1":
print("your question is ")
start()
elif operation == "2":
print("you are exit")
else:
print("invalid input!!")
| false | 0 | 1,259 | 0 | 1,259 | 1,259 |
||
129235175
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
import os
from tqdm import tqdm
import json
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
train_path = "/kaggle/input/rcnn-data-preprocessing-part-2/Train/"
test_path = "/kaggle/input/rcnn-data-preprocessing-part-2/Test/"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE = 64
IMAGE_SIZE = (224, 224, 3)
train_generator = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2)
train_data = train_generator.flow_from_directory(
train_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
batch_size=BATCH_SIZE,
shuffle=True,
subset="training",
)
val_data = train_generator.flow_from_directory(
train_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
batch_size=BATCH_SIZE,
shuffle=False,
subset="validation",
)
test_generator = ImageDataGenerator(rescale=1.0 / 255)
test_data = test_generator.flow_from_directory(
test_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
shuffle=False,
batch_size=BATCH_SIZE,
)
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
# # VGG16 Model
baseModel = VGG16(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(4096, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(4096, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
for layer in baseModel.layers:
layer.trainable = False
model = Model(inputs=baseModel.input, outputs=headModel)
opt = Adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model.summary()
early_stop = EarlyStopping(patience=2, monitor="val_loss")
results = model.fit_generator(
train_data, epochs=20, validation_data=val_data, callbacks=[early_stop]
)
pd.DataFrame(model.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model.history.history)[["loss", "val_loss"]].plot()
test_pred = model.predict_generator(test_data)
pred_class = [np.argmax(x) for x in test_pred]
test_data.class_indices
true_class = test_data.classes
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(true_class, pred_class))
sns.heatmap(confusion_matrix(true_class, pred_class), annot=True)
mapping_class = test_data.class_indices
mapping_class = dict([(value, key) for key, value in mapping_class.items()])
images, labels = next(iter(test_data))
images = images.reshape(64, 224, 224, 3)
fig, axes = plt.subplots(4, 4, figsize=(16, 16))
for ax, img, label in zip(axes.flat, images[:16], labels[:16]):
ax.imshow(img)
true_label = mapping_class[np.argmax(label)]
pred_prob = model.predict(img.reshape(1, 224, 224, 3))
pred_label = mapping_class[np.argmax(pred_prob)]
prob_class = np.max(pred_prob) * 100
ax.set_title(f"TRUE LABEL: {true_label}", fontweight="bold", fontsize=12)
ax.set_xlabel(
f"PREDICTED LABEL: {pred_label}\nProb({pred_label}) = {(prob_class):.2f}%",
fontweight="bold",
fontsize=10,
color="blue" if true_label == pred_label else "red",
)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.suptitle("PREDICTION for 16 RANDOM TEST IMAGES", size=30, y=1.03, fontweight="bold")
plt.show()
misclassify_pred = np.nonzero(true_class != pred_class)[0]
fig, axes = plt.subplots(4, 4, figsize=(16, 16))
for ax, batch_num, image_num in zip(
axes.flat, misclassify_pred // BATCH_SIZE, misclassify_pred % BATCH_SIZE
):
images, labels = test_data[batch_num]
img = images[image_num]
ax.imshow(img.reshape(*IMAGE_SIZE))
true_label = mapping_class[np.argmax(label)]
pred_prob = model.predict(img.reshape(1, 224, 224, 3))
pred_label = mapping_class[np.argmax(pred_prob)]
prob_class = np.max(pred_prob) * 100
ax.set_title(f"TRUE LABEL: {true_label}", fontweight="bold", fontsize=12)
ax.set_xlabel(
f"PREDICTED LABEL: {pred_label}\nProb({pred_label}) = {(prob_class):.2f}%",
fontweight="bold",
fontsize=10,
color="blue" if true_label == pred_label else "red",
)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.suptitle(
f"MISCLASSIFIED TEST IMAGES ({len(misclassify_pred)} out of {len(true_class)})",
size=20,
y=1.03,
fontweight="bold",
)
plt.show()
model.save("RCNN_crop_weed_classification_model.h5")
from tensorflow.keras.applications import ResNet101
from tensorflow.keras.callbacks import ReduceLROnPlateau
# # RESNET101 Model
baseModel2 = ResNet101(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel2 = baseModel2.output
headModel2 = Flatten(name="flatten")(headModel2)
headModel2 = Dense(2048, activation="relu")(headModel2)
headModel2 = Dropout(0.5)(headModel2)
headModel2 = Dense(2048, activation="relu")(headModel2)
headModel2 = Dropout(0.5)(headModel2)
headModel2 = Dense(3, activation="softmax")(headModel2)
for layer in baseModel2.layers:
layer.trainable = False
model2 = Model(inputs=baseModel2.input, outputs=headModel2)
opt = Adam(lr=0.001)
model2.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model2.summary()
# early_stop = EarlyStopping(patience=2,monitor='val_loss')
early_stopping = EarlyStopping(
monitor="val_accuracy",
min_delta=0.00008,
patience=11,
verbose=1,
restore_best_weights=True,
)
lr_scheduler = ReduceLROnPlateau(
monitor="val_accuracy",
factor=0.5,
patience=7,
min_lr=1e-7,
verbose=1,
)
callbacks = [early_stopping, lr_scheduler]
results2 = model2.fit_generator(
train_data, epochs=20, validation_data=val_data, callbacks=callbacks
)
pd.DataFrame(model2.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model2.history.history)[["loss", "val_loss"]].plot()
test_pred2 = model2.predict_generator(test_data)
pred_class2 = [np.argmax(x) for x in test_pred2]
test_data.class_indices
true_class = test_data.classes
from sklearn.metrics import classification_report
print(classification_report(true_class, pred_class2))
# # Efficient Net
from tensorflow.keras.applications import EfficientNetB4
baseModel3 = EfficientNetB4(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel3 = baseModel3.output
headModel3 = Flatten(name="flatten")(headModel3)
headModel3 = Dense(2048, activation="relu")(headModel3)
headModel3 = Dropout(0.5)(headModel3)
headModel3 = Dense(2048, activation="relu")(headModel3)
headModel3 = Dropout(0.5)(headModel3)
headModel3 = Dense(3, activation="softmax")(headModel3)
for layer in baseModel3.layers:
layer.trainable = False
model3 = Model(inputs=baseModel3.input, outputs=headModel3)
opt = Adam(lr=0.001)
model3.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model3.summary()
# early_stop = EarlyStopping(patience=2,monitor='val_loss')
early_stopping = EarlyStopping(
monitor="val_accuracy",
min_delta=0.00008,
patience=11,
verbose=1,
restore_best_weights=True,
)
lr_scheduler = ReduceLROnPlateau(
monitor="val_accuracy",
factor=0.5,
patience=7,
min_lr=1e-7,
verbose=1,
)
callbacks = [early_stopping, lr_scheduler]
results3 = model3.fit_generator(
train_data, epochs=10, validation_data=val_data, callbacks=callbacks
)
pd.DataFrame(model3.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model3.history.history)[["loss", "val_loss"]].plot()
test_pred3 = model3.predict_generator(test_data)
pred_class3 = [np.argmax(x) for x in test_pred3]
test_data.class_indices
true_class = test_data.classes
print(classification_report(true_class, pred_class3))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/235/129235175.ipynb
| null | null |
[{"Id": 129235175, "ScriptId": 36054443, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14154701, "CreationDate": "05/12/2023 04:25:12", "VersionNumber": 1.0, "Title": "Weed Detection in agricultural plants", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 285.0, "LinesInsertedFromPrevious": 285.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import cv2
import tensorflow as tf
import os
from tqdm import tqdm
import json
import warnings
warnings.filterwarnings("ignore")
import seaborn as sns
train_path = "/kaggle/input/rcnn-data-preprocessing-part-2/Train/"
test_path = "/kaggle/input/rcnn-data-preprocessing-part-2/Test/"
from tensorflow.keras.preprocessing.image import ImageDataGenerator
BATCH_SIZE = 64
IMAGE_SIZE = (224, 224, 3)
train_generator = ImageDataGenerator(rescale=1.0 / 255, validation_split=0.2)
train_data = train_generator.flow_from_directory(
train_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
batch_size=BATCH_SIZE,
shuffle=True,
subset="training",
)
val_data = train_generator.flow_from_directory(
train_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
batch_size=BATCH_SIZE,
shuffle=False,
subset="validation",
)
test_generator = ImageDataGenerator(rescale=1.0 / 255)
test_data = test_generator.flow_from_directory(
test_path,
target_size=(224, 224),
color_mode="rgb",
class_mode="categorical",
shuffle=False,
batch_size=BATCH_SIZE,
)
from tensorflow.keras.applications import VGG16
from tensorflow.keras.layers import Dense, Flatten, Input, Dropout
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import EarlyStopping
# # VGG16 Model
baseModel = VGG16(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel = baseModel.output
headModel = Flatten(name="flatten")(headModel)
headModel = Dense(4096, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(4096, activation="relu")(headModel)
headModel = Dropout(0.5)(headModel)
headModel = Dense(3, activation="softmax")(headModel)
for layer in baseModel.layers:
layer.trainable = False
model = Model(inputs=baseModel.input, outputs=headModel)
opt = Adam(lr=0.001)
model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model.summary()
early_stop = EarlyStopping(patience=2, monitor="val_loss")
results = model.fit_generator(
train_data, epochs=20, validation_data=val_data, callbacks=[early_stop]
)
pd.DataFrame(model.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model.history.history)[["loss", "val_loss"]].plot()
test_pred = model.predict_generator(test_data)
pred_class = [np.argmax(x) for x in test_pred]
test_data.class_indices
true_class = test_data.classes
from sklearn.metrics import classification_report, confusion_matrix
print(classification_report(true_class, pred_class))
sns.heatmap(confusion_matrix(true_class, pred_class), annot=True)
mapping_class = test_data.class_indices
mapping_class = dict([(value, key) for key, value in mapping_class.items()])
images, labels = next(iter(test_data))
images = images.reshape(64, 224, 224, 3)
fig, axes = plt.subplots(4, 4, figsize=(16, 16))
for ax, img, label in zip(axes.flat, images[:16], labels[:16]):
ax.imshow(img)
true_label = mapping_class[np.argmax(label)]
pred_prob = model.predict(img.reshape(1, 224, 224, 3))
pred_label = mapping_class[np.argmax(pred_prob)]
prob_class = np.max(pred_prob) * 100
ax.set_title(f"TRUE LABEL: {true_label}", fontweight="bold", fontsize=12)
ax.set_xlabel(
f"PREDICTED LABEL: {pred_label}\nProb({pred_label}) = {(prob_class):.2f}%",
fontweight="bold",
fontsize=10,
color="blue" if true_label == pred_label else "red",
)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.suptitle("PREDICTION for 16 RANDOM TEST IMAGES", size=30, y=1.03, fontweight="bold")
plt.show()
misclassify_pred = np.nonzero(true_class != pred_class)[0]
fig, axes = plt.subplots(4, 4, figsize=(16, 16))
for ax, batch_num, image_num in zip(
axes.flat, misclassify_pred // BATCH_SIZE, misclassify_pred % BATCH_SIZE
):
images, labels = test_data[batch_num]
img = images[image_num]
ax.imshow(img.reshape(*IMAGE_SIZE))
true_label = mapping_class[np.argmax(label)]
pred_prob = model.predict(img.reshape(1, 224, 224, 3))
pred_label = mapping_class[np.argmax(pred_prob)]
prob_class = np.max(pred_prob) * 100
ax.set_title(f"TRUE LABEL: {true_label}", fontweight="bold", fontsize=12)
ax.set_xlabel(
f"PREDICTED LABEL: {pred_label}\nProb({pred_label}) = {(prob_class):.2f}%",
fontweight="bold",
fontsize=10,
color="blue" if true_label == pred_label else "red",
)
ax.set_xticks([])
ax.set_yticks([])
plt.tight_layout()
fig.suptitle(
f"MISCLASSIFIED TEST IMAGES ({len(misclassify_pred)} out of {len(true_class)})",
size=20,
y=1.03,
fontweight="bold",
)
plt.show()
model.save("RCNN_crop_weed_classification_model.h5")
from tensorflow.keras.applications import ResNet101
from tensorflow.keras.callbacks import ReduceLROnPlateau
# # RESNET101 Model
baseModel2 = ResNet101(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel2 = baseModel2.output
headModel2 = Flatten(name="flatten")(headModel2)
headModel2 = Dense(2048, activation="relu")(headModel2)
headModel2 = Dropout(0.5)(headModel2)
headModel2 = Dense(2048, activation="relu")(headModel2)
headModel2 = Dropout(0.5)(headModel2)
headModel2 = Dense(3, activation="softmax")(headModel2)
for layer in baseModel2.layers:
layer.trainable = False
model2 = Model(inputs=baseModel2.input, outputs=headModel2)
opt = Adam(lr=0.001)
model2.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model2.summary()
# early_stop = EarlyStopping(patience=2,monitor='val_loss')
early_stopping = EarlyStopping(
monitor="val_accuracy",
min_delta=0.00008,
patience=11,
verbose=1,
restore_best_weights=True,
)
lr_scheduler = ReduceLROnPlateau(
monitor="val_accuracy",
factor=0.5,
patience=7,
min_lr=1e-7,
verbose=1,
)
callbacks = [early_stopping, lr_scheduler]
results2 = model2.fit_generator(
train_data, epochs=20, validation_data=val_data, callbacks=callbacks
)
pd.DataFrame(model2.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model2.history.history)[["loss", "val_loss"]].plot()
test_pred2 = model2.predict_generator(test_data)
pred_class2 = [np.argmax(x) for x in test_pred2]
test_data.class_indices
true_class = test_data.classes
from sklearn.metrics import classification_report
print(classification_report(true_class, pred_class2))
# # Efficient Net
from tensorflow.keras.applications import EfficientNetB4
baseModel3 = EfficientNetB4(
weights="imagenet", include_top=False, input_tensor=Input(shape=IMAGE_SIZE)
)
headModel3 = baseModel3.output
headModel3 = Flatten(name="flatten")(headModel3)
headModel3 = Dense(2048, activation="relu")(headModel3)
headModel3 = Dropout(0.5)(headModel3)
headModel3 = Dense(2048, activation="relu")(headModel3)
headModel3 = Dropout(0.5)(headModel3)
headModel3 = Dense(3, activation="softmax")(headModel3)
for layer in baseModel3.layers:
layer.trainable = False
model3 = Model(inputs=baseModel3.input, outputs=headModel3)
opt = Adam(lr=0.001)
model3.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
model3.summary()
# early_stop = EarlyStopping(patience=2,monitor='val_loss')
early_stopping = EarlyStopping(
monitor="val_accuracy",
min_delta=0.00008,
patience=11,
verbose=1,
restore_best_weights=True,
)
lr_scheduler = ReduceLROnPlateau(
monitor="val_accuracy",
factor=0.5,
patience=7,
min_lr=1e-7,
verbose=1,
)
callbacks = [early_stopping, lr_scheduler]
results3 = model3.fit_generator(
train_data, epochs=10, validation_data=val_data, callbacks=callbacks
)
pd.DataFrame(model3.history.history)[["accuracy", "val_accuracy"]].plot()
pd.DataFrame(model3.history.history)[["loss", "val_loss"]].plot()
test_pred3 = model3.predict_generator(test_data)
pred_class3 = [np.argmax(x) for x in test_pred3]
test_data.class_indices
true_class = test_data.classes
print(classification_report(true_class, pred_class3))
| false | 0 | 2,729 | 0 | 2,729 | 2,729 |
||
129332498
|
# Load the data
gold_df = pd.read_csv("/kaggle/input/gold-prices/Commodity Prices - Gold.csv")
# Repeat the process for the other commodities to find their exact filenames and then load them
# Check the data
print(gold_df.head())
# Get a list of all files in the directory
nat_gas_files = os.listdir("/kaggle/input/natural-gas")
wheat_files = os.listdir("/kaggle/input/us-wheat")
# Print the files
print("Natural Gas Files: ", nat_gas_files)
print("Wheat Files: ", wheat_files)
# Load the data
gold_df = pd.read_csv("/kaggle/input/gold-prices/Commodity Prices - Gold.csv")
nat_gas_df = pd.read_csv("/kaggle/input/natural-gas/Commodity Prices - Nat. Gas.csv")
wheat_df = pd.read_csv("/kaggle/input/us-wheat/Commodity Prices - US Wheat.csv")
# Check the data
print(gold_df.head())
print(nat_gas_df.head())
print(wheat_df.head())
print("Gold Data:")
print(gold_df.info())
print("Natural Gas Data:")
print(nat_gas_df.info())
print("Wheat Data:")
print(wheat_df.info())
# Convert 'Date' column to datetime
gold_df["Date"] = pd.to_datetime(gold_df["Date"])
nat_gas_df["Date"] = pd.to_datetime(nat_gas_df["Date"])
wheat_df["Date"] = pd.to_datetime(wheat_df["Date"])
# Set 'Date' as the index
gold_df.set_index("Date", inplace=True)
nat_gas_df.set_index("Date", inplace=True)
wheat_df.set_index("Date", inplace=True)
print(gold_df.head())
print(nat_gas_df.head())
print(wheat_df.head())
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# Instantiate the scaler
scaler = StandardScaler()
# Fit the scaler and transform the data for each commodity
gold_df_scaled = scaler.fit_transform(gold_df[["Close"]])
nat_gas_df_scaled = scaler.fit_transform(nat_gas_df[["Close"]])
wheat_df_scaled = scaler.fit_transform(wheat_df[["Close"]])
# Plot the standardized closing prices
plt.figure(figsize=(14, 7))
plt.plot(gold_df.index, gold_df_scaled, label="Gold")
plt.plot(nat_gas_df.index, nat_gas_df_scaled, label="Natural Gas")
plt.plot(wheat_df.index, wheat_df_scaled, label="US Wheat")
plt.title("Standardized Commodity Prices Over Time")
plt.xlabel("Date")
plt.ylabel("Standardized Price")
plt.legend(loc="best")
plt.grid(True)
plt.show()
# Join all dataframes together
all_commodities = (
gold_df[["Close"]]
.join(nat_gas_df[["Close"]], lsuffix="_gold", rsuffix="_nat_gas")
.join(wheat_df[["Close"]].rename(columns={"Close": "Close_wheat"}))
)
# Calculate correlation
correlation = all_commodities.corr()
# Plot correlation
import seaborn as sns
plt.figure(figsize=(8, 6))
sns.heatmap(correlation, annot=True, cmap="coolwarm")
plt.title("Correlation Matrix of Commodity Prices")
plt.show()
import statsmodels.api as sm
# Merge dataframes on Date
merged_df = pd.merge(gold_df, nat_gas_df, on="Date", how="inner")
# Get the Close prices
x = merged_df["Close_x"]
y = merged_df["Close_y"]
# Add a constant (i.e., bias or intercept) to the predictor
X = sm.add_constant(x)
# Perform the regression
model = sm.OLS(y, X)
results = model.fit()
# Print out the detailed statistics
print(results.summary())
#
# The results shown here are from a simple linear regression model with the price of Gold as the independent variable (x) and the price of Natural Gas as the dependent variable (y).
# Here is a brief explanation of key outputs:
# R-squared: 0.230 means that about 23% of the variation in Natural Gas prices can be explained by changes in Gold prices. This isn't a very high R-squared value, which suggests that there might be other factors influencing the price of Natural Gas that aren't accounted for in this simple model.
# coef for Close_x (Gold prices): -0.0020 suggests that for each unit increase in the price of Gold, the price of Natural Gas decreases by 0.0020 units, on average.
# P>|t| for Close_x: 0.000 indicates a statistically significant relationship between the price of Gold and the price of Natural Gas at a 95% confidence level (as it's less than 0.05).
# Prob (F-statistic): 0.00 is the probability that you would get the current results if the price of Gold had no effect on Natural Gas. The current value (0.00) suggests that the relationship between Gold and Natural Gas prices is statistically significant.
# The Omnibus and Prob(Omnibus) tests the hypothesis that the residuals are normally distributed. Here, the Prob(Omnibus) is 0.000, which means we can reject the null hypothesis of this test and conclude that our data is not normally distributed.
# The Durbin-Watson tests for homoscedasticity. Here, the Durbin-Watson statistic is very close to 0 which might suggest the presence of positive autocorrelation in the residuals.
# Jarque-Bera (JB) and Prob(JB) also test for normality in the residuals. Here, Prob(JB) is also 0.00, indicating that the residuals are not normally distributed.
# Cond. No. checks for multicollinearity. A large condition number (like 2.64e+03 here) can indicate that there might be strong multicollinearity or other numerical problems.
# **These results suggest that although there is a statistically significant relationship between Gold and Natural Gas prices, the model's fit and the residuals' properties might not be ideal. Other factors might need to be considered, or a different model might need to be used to predict Natural Gas prices more accurately based on Gold prices.**
from statsmodels.tsa.stattools import adfuller
def adf_test(timeseries):
print("Results of Dickey-Fuller Test:")
dftest = adfuller(timeseries, autolag="AIC")
dfoutput = pd.Series(
dftest[0:4],
index=[
"Test Statistic",
"p-value",
"#Lags Used",
"Number of Observations Used",
],
)
for key, value in dftest[4].items():
dfoutput["Critical Value (%s)" % key] = value
print(dfoutput)
adf_test(gold_df["Close"])
adf_test(nat_gas_df["Close"])
adf_test(wheat_df["Close"])
# The Dickey-Fuller test is one of the statistical tests for checking stationarity. The null hypothesis of the test is that the time series is not stationary (has some time-dependent structure). The test results comprise of a test statistic and some critical values for different confidence levels. If the test statistic is less than the critical value, we reject the null hypothesis and say that the series is stationary.
# Here are the results for your datasets:
# Gold Data: The test statistic is greater than the critical value at all confidence levels, so we fail to reject the null hypothesis. This suggests that the gold price series is not stationary and has a time-dependent structure.
# Natural Gas Data: The test statistic is less than the critical values at the 1% and 5% level, so we reject the null hypothesis. This suggests that the natural gas price series is stationary.
# Wheat Data: The test statistic is greater than the critical value at all confidence levels, so we fail to reject the null hypothesis. This suggests that the wheat price series is not stationary and has a time-dependent structure.
# We need to differencing the non-stationary series (gold and wheat) before applying time series models like ARIMA.
# Difference the series
gold_df["Close_diff"] = gold_df["Close"].diff()
wheat_df["Close_diff"] = wheat_df["Close"].diff()
# Drop the NaN values
gold_df.dropna(inplace=True)
wheat_df.dropna(inplace=True)
# Plot the differenced data
gold_df["Close_diff"].plot()
plt.title("Differenced Gold series")
plt.show()
wheat_df["Close_diff"].plot()
plt.title("Differenced Wheat series")
plt.show()
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
# fit model
model = ARIMA(
nat_gas_df["Close"], order=(5, 0, 1)
) # You may need to adjust the parameters
model_fit = model.fit()
# summary of fit model
print(model_fit.summary())
# make predictions
predictions = model_fit.predict(
start=len(nat_gas_df["Close"]), end=len(nat_gas_df["Close"]) + 10, dynamic=False
) # Predict next 10 data points
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/332/129332498.ipynb
| null | null |
[{"Id": 129332498, "ScriptId": 38449359, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4454153, "CreationDate": "05/12/2023 21:35:52", "VersionNumber": 3.0, "Title": "Confirming Lack of Correlation", "EvaluationDate": "05/12/2023", "IsChange": false, "TotalLines": 213.0, "LinesInsertedFromPrevious": 0.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 213.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Load the data
gold_df = pd.read_csv("/kaggle/input/gold-prices/Commodity Prices - Gold.csv")
# Repeat the process for the other commodities to find their exact filenames and then load them
# Check the data
print(gold_df.head())
# Get a list of all files in the directory
nat_gas_files = os.listdir("/kaggle/input/natural-gas")
wheat_files = os.listdir("/kaggle/input/us-wheat")
# Print the files
print("Natural Gas Files: ", nat_gas_files)
print("Wheat Files: ", wheat_files)
# Load the data
gold_df = pd.read_csv("/kaggle/input/gold-prices/Commodity Prices - Gold.csv")
nat_gas_df = pd.read_csv("/kaggle/input/natural-gas/Commodity Prices - Nat. Gas.csv")
wheat_df = pd.read_csv("/kaggle/input/us-wheat/Commodity Prices - US Wheat.csv")
# Check the data
print(gold_df.head())
print(nat_gas_df.head())
print(wheat_df.head())
print("Gold Data:")
print(gold_df.info())
print("Natural Gas Data:")
print(nat_gas_df.info())
print("Wheat Data:")
print(wheat_df.info())
# Convert 'Date' column to datetime
gold_df["Date"] = pd.to_datetime(gold_df["Date"])
nat_gas_df["Date"] = pd.to_datetime(nat_gas_df["Date"])
wheat_df["Date"] = pd.to_datetime(wheat_df["Date"])
# Set 'Date' as the index
gold_df.set_index("Date", inplace=True)
nat_gas_df.set_index("Date", inplace=True)
wheat_df.set_index("Date", inplace=True)
print(gold_df.head())
print(nat_gas_df.head())
print(wheat_df.head())
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
# Instantiate the scaler
scaler = StandardScaler()
# Fit the scaler and transform the data for each commodity
gold_df_scaled = scaler.fit_transform(gold_df[["Close"]])
nat_gas_df_scaled = scaler.fit_transform(nat_gas_df[["Close"]])
wheat_df_scaled = scaler.fit_transform(wheat_df[["Close"]])
# Plot the standardized closing prices
plt.figure(figsize=(14, 7))
plt.plot(gold_df.index, gold_df_scaled, label="Gold")
plt.plot(nat_gas_df.index, nat_gas_df_scaled, label="Natural Gas")
plt.plot(wheat_df.index, wheat_df_scaled, label="US Wheat")
plt.title("Standardized Commodity Prices Over Time")
plt.xlabel("Date")
plt.ylabel("Standardized Price")
plt.legend(loc="best")
plt.grid(True)
plt.show()
# Join all dataframes together
all_commodities = (
gold_df[["Close"]]
.join(nat_gas_df[["Close"]], lsuffix="_gold", rsuffix="_nat_gas")
.join(wheat_df[["Close"]].rename(columns={"Close": "Close_wheat"}))
)
# Calculate correlation
correlation = all_commodities.corr()
# Plot correlation
import seaborn as sns
plt.figure(figsize=(8, 6))
sns.heatmap(correlation, annot=True, cmap="coolwarm")
plt.title("Correlation Matrix of Commodity Prices")
plt.show()
import statsmodels.api as sm
# Merge dataframes on Date
merged_df = pd.merge(gold_df, nat_gas_df, on="Date", how="inner")
# Get the Close prices
x = merged_df["Close_x"]
y = merged_df["Close_y"]
# Add a constant (i.e., bias or intercept) to the predictor
X = sm.add_constant(x)
# Perform the regression
model = sm.OLS(y, X)
results = model.fit()
# Print out the detailed statistics
print(results.summary())
#
# The results shown here are from a simple linear regression model with the price of Gold as the independent variable (x) and the price of Natural Gas as the dependent variable (y).
# Here is a brief explanation of key outputs:
# R-squared: 0.230 means that about 23% of the variation in Natural Gas prices can be explained by changes in Gold prices. This isn't a very high R-squared value, which suggests that there might be other factors influencing the price of Natural Gas that aren't accounted for in this simple model.
# coef for Close_x (Gold prices): -0.0020 suggests that for each unit increase in the price of Gold, the price of Natural Gas decreases by 0.0020 units, on average.
# P>|t| for Close_x: 0.000 indicates a statistically significant relationship between the price of Gold and the price of Natural Gas at a 95% confidence level (as it's less than 0.05).
# Prob (F-statistic): 0.00 is the probability that you would get the current results if the price of Gold had no effect on Natural Gas. The current value (0.00) suggests that the relationship between Gold and Natural Gas prices is statistically significant.
# The Omnibus and Prob(Omnibus) tests the hypothesis that the residuals are normally distributed. Here, the Prob(Omnibus) is 0.000, which means we can reject the null hypothesis of this test and conclude that our data is not normally distributed.
# The Durbin-Watson tests for homoscedasticity. Here, the Durbin-Watson statistic is very close to 0 which might suggest the presence of positive autocorrelation in the residuals.
# Jarque-Bera (JB) and Prob(JB) also test for normality in the residuals. Here, Prob(JB) is also 0.00, indicating that the residuals are not normally distributed.
# Cond. No. checks for multicollinearity. A large condition number (like 2.64e+03 here) can indicate that there might be strong multicollinearity or other numerical problems.
# **These results suggest that although there is a statistically significant relationship between Gold and Natural Gas prices, the model's fit and the residuals' properties might not be ideal. Other factors might need to be considered, or a different model might need to be used to predict Natural Gas prices more accurately based on Gold prices.**
from statsmodels.tsa.stattools import adfuller
def adf_test(timeseries):
print("Results of Dickey-Fuller Test:")
dftest = adfuller(timeseries, autolag="AIC")
dfoutput = pd.Series(
dftest[0:4],
index=[
"Test Statistic",
"p-value",
"#Lags Used",
"Number of Observations Used",
],
)
for key, value in dftest[4].items():
dfoutput["Critical Value (%s)" % key] = value
print(dfoutput)
adf_test(gold_df["Close"])
adf_test(nat_gas_df["Close"])
adf_test(wheat_df["Close"])
# The Dickey-Fuller test is one of the statistical tests for checking stationarity. The null hypothesis of the test is that the time series is not stationary (has some time-dependent structure). The test results comprise of a test statistic and some critical values for different confidence levels. If the test statistic is less than the critical value, we reject the null hypothesis and say that the series is stationary.
# Here are the results for your datasets:
# Gold Data: The test statistic is greater than the critical value at all confidence levels, so we fail to reject the null hypothesis. This suggests that the gold price series is not stationary and has a time-dependent structure.
# Natural Gas Data: The test statistic is less than the critical values at the 1% and 5% level, so we reject the null hypothesis. This suggests that the natural gas price series is stationary.
# Wheat Data: The test statistic is greater than the critical value at all confidence levels, so we fail to reject the null hypothesis. This suggests that the wheat price series is not stationary and has a time-dependent structure.
# We need to differencing the non-stationary series (gold and wheat) before applying time series models like ARIMA.
# Difference the series
gold_df["Close_diff"] = gold_df["Close"].diff()
wheat_df["Close_diff"] = wheat_df["Close"].diff()
# Drop the NaN values
gold_df.dropna(inplace=True)
wheat_df.dropna(inplace=True)
# Plot the differenced data
gold_df["Close_diff"].plot()
plt.title("Differenced Gold series")
plt.show()
wheat_df["Close_diff"].plot()
plt.title("Differenced Wheat series")
plt.show()
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
# fit model
model = ARIMA(
nat_gas_df["Close"], order=(5, 0, 1)
) # You may need to adjust the parameters
model_fit = model.fit()
# summary of fit model
print(model_fit.summary())
# make predictions
predictions = model_fit.predict(
start=len(nat_gas_df["Close"]), end=len(nat_gas_df["Close"]) + 10, dynamic=False
) # Predict next 10 data points
| false | 0 | 2,346 | 0 | 2,346 | 2,346 |
||
129345872
|
<jupyter_start><jupyter_text>SF Salaries
One way to understand how a city government works is by looking at who it employs and how its employees are compensated. This data contains the names, job title, and compensation for San Francisco city employees on an annual basis from 2011 to 2014.
[](https://www.kaggle.com/benhamner/d/kaggle/sf-salaries/exploring-the-sf-city-salary-data)
## Exploration Ideas
To help get you started, here are some data exploration ideas:
- How have salaries changed over time between different groups of people?
- How are base pay, overtime pay, and benefits allocated between different groups?
- Is there any evidence of pay discrimination based on gender in this dataset?
- How is budget allocated based on different groups and responsibilities?
Have other ideas you're curious for someone else to explore? Post them in [this forum thread](https://www.kaggle.com/forums/f/977/sf-salaries/t/18264/sf-salaries-dataset).
## Data Description
sf-salaries-release-*.zip (downloadable via the "Download Data" link in the header above) contains a CSV table and a SQLite database (with the same data as the CSV file). Here's the [code that creates this data release](https://github.com/benhamner/sf-salaries).
The original source for this data is [here](http://transparentcalifornia.com/salaries/san-francisco/). We've taken the raw files here and combined/normalized them into a single CSV file as well as a SQLite database with an equivalently-defined table.
Kaggle dataset identifier: sf-salaries
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import libiraries >>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Import Data >>
df = pd.read_csv("../input/sf-salaries/Salaries.csv", low_memory=False)
df.head()
# EDA >>
df["JobTitle"].nunique()
df.info()
# we need to change data type from str to float64 ,that's for applying statistics on these columns
"""but if we change data type ,it will give error becouse there are rows
contain this value 'Not Provided' in these columns
,so we will replace it with nan values"""
df = df.replace("Not Provided", np.nan)
df = df.astype(
{
"BasePay": float,
"OvertimePay": float,
"OtherPay": float,
"Benefits": float,
"TotalPay": float,
"TotalPayBenefits": float,
}
)
df.dtypes
df.describe()
# what is the average BasePay ??
df["BasePay"].mean()
# what is the highest amount of the OvertimePay ??
df["OvertimePay"].max()
# what is the TotalPay of ALBERT PARDINI (inclduding benefits)??
df[df["EmployeeName"] == "ALBERT PARDINI"]["TotalPayBenefits"]
# what is the name of the highest and lowest paid person??
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].max()]["EmployeeName"]
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].min()]["EmployeeName"]
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].min()]
# we notic that there is employees who do not take a salary and owe to the company
# we will count their number
Owe_emp = df.loc[df["TotalPayBenefits"] <= 0, "TotalPayBenefits"].count()
print(Owe_emp)
# what was the avarage of the BasePay of all the employees per Year??
df.groupby("Year")["BasePay"].mean().sort_values()
# what are the most popular jobs??
df["JobTitle"].value_counts().head(5)
# How many job titles were represented by only 1 person in 2013??
(df[df["Year"] == 2013]["JobTitle"].value_counts() == 1).sum()
# How many employees have the word Chief in their job title??
df["JobTitle"].str.lower().str.contains("chief").sum()
# Is there is a correlation between (lenght of the job title string) and (salary)??
df["num_titles"] = df["JobTitle"].apply(len)
# apply fun is very useful when you want to perform functions and calcs on rows.
df["num_titles"]
df[["num_titles", "TotalPayBenefits"]].corr()
# > there is no correlatin
# what is the amount of increase in number of jobs in each year??
job_counts = df.groupby("Year")["Id"].count()
job_increase = job_counts.pct_change()
print(job_increase)
fig, ax = plt.subplots()
ax.bar(job_counts.index, job_counts.values)
ax.set_ylabel("Number of Jobs")
ax2 = ax.twinx()
ax2.set_ylabel("Percentage Increase")
ax.set_title("Number of Jobs and Percentage Increase by Year")
plt.show()
#
# #### visualizing the 5 number summary of TotalPayBenefits
plt.figure(figsize=(8, 3))
sns.boxplot(x=df["TotalPayBenefits"]).set_title("5 number summary of TotalPayBenefits")
# ### Insight:
# The most of employees receive a wage with approximately 100000 per year,
# The number of employees that receive a wage with above 400000 is very small.
# so, what is the title of these people who take more than 400000??
high_earners = df[df["TotalPayBenefits"] > 400000]
high_earners["JobTitle"].unique()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/345/129345872.ipynb
|
sf-salaries
| null |
[{"Id": 129345872, "ScriptId": 38453133, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9435168, "CreationDate": "05/13/2023 02:11:19", "VersionNumber": 1.0, "Title": "SF Salaries-EDA-Questions", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 143.0, "LinesInsertedFromPrevious": 143.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 185304393, "KernelVersionId": 129345872, "SourceDatasetVersionId": 827864}]
|
[{"Id": 827864, "DatasetId": 14, "DatasourceVersionId": 850525, "CreatorUserId": 2326382, "LicenseName": "CC0: Public Domain", "CreationDate": "12/05/2019 23:30:07", "VersionNumber": 5.0, "Title": "SF Salaries", "Slug": "sf-salaries", "Subtitle": "Explore San Francisco city employee salary data", "Description": "One way to understand how a city government works is by looking at who it employs and how its employees are compensated. This data contains the names, job title, and compensation for San Francisco city employees on an annual basis from 2011 to 2014.\n\n[](https://www.kaggle.com/benhamner/d/kaggle/sf-salaries/exploring-the-sf-city-salary-data)\n\n## Exploration Ideas\n\nTo help get you started, here are some data exploration ideas:\n\n - How have salaries changed over time between different groups of people?\n - How are base pay, overtime pay, and benefits allocated between different groups?\n - Is there any evidence of pay discrimination based on gender in this dataset?\n - How is budget allocated based on different groups and responsibilities?\n\nHave other ideas you're curious for someone else to explore? Post them in [this forum thread](https://www.kaggle.com/forums/f/977/sf-salaries/t/18264/sf-salaries-dataset).\n\n## Data Description\n\nsf-salaries-release-*.zip (downloadable via the \"Download Data\" link in the header above) contains a CSV table and a SQLite database (with the same data as the CSV file). Here's the [code that creates this data release](https://github.com/benhamner/sf-salaries).\n\nThe original source for this data is [here](http://transparentcalifornia.com/salaries/san-francisco/). We've taken the raw files here and combined/normalized them into a single CSV file as well as a SQLite database with an equivalently-defined table.", "VersionNotes": "Unzipped and re-uploaded files", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 14, "CreatorUserId": 993, "OwnerUserId": NaN, "OwnerOrganizationId": 4.0, "CurrentDatasetVersionId": 827864.0, "CurrentDatasourceVersionId": 850525.0, "ForumId": 977, "Type": 2, "CreationDate": "12/21/2015 19:40:00", "LastActivityDate": "02/06/2018", "TotalViews": 443682, "TotalDownloads": 69236, "TotalVotes": 805, "TotalKernels": 406}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import libiraries >>
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# Import Data >>
df = pd.read_csv("../input/sf-salaries/Salaries.csv", low_memory=False)
df.head()
# EDA >>
df["JobTitle"].nunique()
df.info()
# we need to change data type from str to float64 ,that's for applying statistics on these columns
"""but if we change data type ,it will give error becouse there are rows
contain this value 'Not Provided' in these columns
,so we will replace it with nan values"""
df = df.replace("Not Provided", np.nan)
df = df.astype(
{
"BasePay": float,
"OvertimePay": float,
"OtherPay": float,
"Benefits": float,
"TotalPay": float,
"TotalPayBenefits": float,
}
)
df.dtypes
df.describe()
# what is the average BasePay ??
df["BasePay"].mean()
# what is the highest amount of the OvertimePay ??
df["OvertimePay"].max()
# what is the TotalPay of ALBERT PARDINI (inclduding benefits)??
df[df["EmployeeName"] == "ALBERT PARDINI"]["TotalPayBenefits"]
# what is the name of the highest and lowest paid person??
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].max()]["EmployeeName"]
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].min()]["EmployeeName"]
df[df["TotalPayBenefits"] == df["TotalPayBenefits"].min()]
# we notic that there is employees who do not take a salary and owe to the company
# we will count their number
Owe_emp = df.loc[df["TotalPayBenefits"] <= 0, "TotalPayBenefits"].count()
print(Owe_emp)
# what was the avarage of the BasePay of all the employees per Year??
df.groupby("Year")["BasePay"].mean().sort_values()
# what are the most popular jobs??
df["JobTitle"].value_counts().head(5)
# How many job titles were represented by only 1 person in 2013??
(df[df["Year"] == 2013]["JobTitle"].value_counts() == 1).sum()
# How many employees have the word Chief in their job title??
df["JobTitle"].str.lower().str.contains("chief").sum()
# Is there is a correlation between (lenght of the job title string) and (salary)??
df["num_titles"] = df["JobTitle"].apply(len)
# apply fun is very useful when you want to perform functions and calcs on rows.
df["num_titles"]
df[["num_titles", "TotalPayBenefits"]].corr()
# > there is no correlatin
# what is the amount of increase in number of jobs in each year??
job_counts = df.groupby("Year")["Id"].count()
job_increase = job_counts.pct_change()
print(job_increase)
fig, ax = plt.subplots()
ax.bar(job_counts.index, job_counts.values)
ax.set_ylabel("Number of Jobs")
ax2 = ax.twinx()
ax2.set_ylabel("Percentage Increase")
ax.set_title("Number of Jobs and Percentage Increase by Year")
plt.show()
#
# #### visualizing the 5 number summary of TotalPayBenefits
plt.figure(figsize=(8, 3))
sns.boxplot(x=df["TotalPayBenefits"]).set_title("5 number summary of TotalPayBenefits")
# ### Insight:
# The most of employees receive a wage with approximately 100000 per year,
# The number of employees that receive a wage with above 400000 is very small.
# so, what is the title of these people who take more than 400000??
high_earners = df[df["TotalPayBenefits"] > 400000]
high_earners["JobTitle"].unique()
| false | 0 | 1,186 | 3 | 1,648 | 1,186 |
||
129345550
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_moons, make_circles, load_iris
from sklearn.preprocessing import MinMaxScaler
def make_spirals(n_samples, noise, random_seed):
np.random.seed(random_seed)
# Generate the coordinates for the points in each spiral arm.
n = np.sqrt(np.random.rand(n_samples, 1)) * 600 * (2 * np.pi) / 360
x1 = (-np.cos(n) * n + np.random.rand(n_samples, 1) * noise) / 7
y1 = (np.sin(n) * n + np.random.rand(n_samples, 1) * noise) / 7
return (
np.vstack((np.hstack((x1, y1)), np.hstack((-x1, -y1)))),
np.hstack((np.zeros(n_samples), np.ones(n_samples))).astype(int),
)
def scale_data(data):
scaler = MinMaxScaler(feature_range=(-1, 1))
return scaler.fit_transform(data)
X_moons, y_moons = make_moons(n_samples=100, noise=0.1, random_state=42)
X_circles, y_circles = make_circles(
n_samples=100, noise=0.075, factor=0.5, random_state=42
)
X_spirals, y_spirals = make_spirals(n_samples=100, noise=0.8, random_seed=42)
datasets = {
"Moons": (scale_data(X_moons), y_moons),
"Circles": (scale_data(X_circles), y_circles),
"Spirals": (scale_data(X_spirals), y_spirals),
}
# Make it pretty
orange = "#FFA630"
blue = "#00A7E1"
gray = "#CCCCCC"
palette = [orange, blue]
from sklearn.metrics import accuracy_score
def plot_multiple_decision_boundaries(
classifier, datasets, resolution=0.015, alpha=0.09, palette="viridis", clf_name=""
):
"""
Plots a 1x3 grid of plots, displaying the decision boundaries of a given untrained classifier on three datasets.
:classifier: An untrained sklearn classifier algorithm (e.g. DecisionTreeClassifier())
:datasets: A dictionary containing three datasets of the format key='dataset name', value=[X, y]
"""
n_datasets = len(datasets)
fig, axes = plt.subplots(
1, n_datasets, figsize=(6 * n_datasets, 6)
) # Adjust the figure size as needed
for ax, (name, (X, y)) in zip(axes, datasets.items()):
# Fit the classifier
classifier.fit(X, y)
# Find training accuracy of classifier
acc = accuracy_score(y, classifier.predict(X))
# Create a mesh of points
dist_from_edge = 0.5
x_min, x_max = X[:, 0].min() - dist_from_edge, X[:, 0].max() + dist_from_edge
y_min, y_max = X[:, 1].min() - dist_from_edge, X[:, 1].max() + dist_from_edge
xx, yy = np.meshgrid(
np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)
)
# Use the classifier to predict the class of each point in the mesh
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Create a dataframe with the results
df = pd.DataFrame(dict(x=xx.ravel(), y=yy.ravel(), label=Z.ravel()))
if clf_name == "":
clf_name = classifier.__class__.__name__
# Plot the results using seaborn
sns.scatterplot(
data=df,
x="x",
y="y",
hue="label",
palette=palette,
alpha=alpha,
legend=False,
ax=ax,
)
sns.scatterplot(
x=X[:, 0],
y=X[:, 1],
s=60,
hue=y,
palette=palette,
alpha=0.9,
edgecolor=None,
legend=False,
ax=ax,
)
ax.set_title(f"Decision Boundary for {name} - {clf_name}")
ax.legend(title=f"Accuracy: {acc:.2f}", loc="upper right").set_bbox_to_anchor(
(0.95, 0.95)
)
plt.tight_layout()
plt.show()
# Plot datasets
plt.figure(figsize=(16, 5))
for i, (dataset_name, (X, y)) in enumerate(datasets.items()):
plt.subplot(1, 3, i + 1)
sns.scatterplot(
x=X[:, 0], y=X[:, 1], hue=y, palette=palette, s=120, alpha=0.9, edgecolor=None
)
plt.title(dataset_name)
plt.show()
# # Logistic Regression Model:
# A logistic regression model learns the weights (coefficients) for the features (x and y) and an intercept term.
# The decision function that logistic regression learns is given by:
# $$ z = b_0 + b_1x_1 + b_2x_2 + ... + b_nx_n $$
# where $x = (x_1, x_2, ..., x_n)$ is a feature vector, and $b_0, b_1, ..., b_n$ are the parameters of the model.
# The logistic regression model makes predictions using a logistic function, often a sigmoid function, which transforms the output of the linear equation into a probability, $p$, that the positive class (1) is the correct classification:
# $$ p = \frac{1}{1 + e^{-z}} $$
# Based on this probability, the predicted class $\hat{y}$ can be determined:
# $$ \hat{y} =
# \begin{cases}
# 1 & \text{if } p \geq 0.5 \\
# 0 & \text{if } p < 0.5
# \end{cases}
# $$
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - Since our logistic regression model is limited to using only linear terms, our decision boundary can only be linear.
# - Since our data has a non-linear structure, you may think this spells the end for our logistic regression model, but you'd be wrong.
# ## Linear separability
# Define linear separability
# ## Polynomial features
# Polynomial features is a preprocessing transformation, and allows us to use simple classifiers like logistic regression with non-linear data.
# We might want to use a simple model as it can allow us to create a very smooth decision boundary.
# Polynomial features will create combinations of all the features, combinations of features to the nth degree.
# For example in our case where we have two terms, x and y, our simple logistic regression with no polynomial features would be defined as:
# $$ logit(p(y = 1|x) = b_0 + b_1x + b_2y $$
# Polynomial features will take every possible combination of these features up to the nth degree.
# Were we to use 3rd degree polynomial features, our feature combinations would be:
# $$
# \begin{align*}
# \text{Constant terms}: & \quad 1 \\
# \text{Linear terms}: & \quad x, \quad y \\
# \text{Quadratic terms}: & \quad x^2, \quad y^2 \\
# \text{Interaction terms}: & \quad xy \\
# \text{Cubic terms}: & \quad x^3, \quad y^3 \\
# \text{Mixed quadratic terms}: & \quad x^2y, \quad xy^2 \\
# \end{align*}
# $$
# Each term also gets its own coefficient.
# The resulting logistic regression equation is:
# $$
# \begin{align*}
# \text{logit}(p(y=1|x)) = & \quad b_0 + b_1x + b_2y + b_3x^2 + b_4y^2 + b_5xy + b_6x^3 \\
# & + b_{7}y^3 + b_{8}x^2y + b_{9}xy^2 \\
# \end{align*}
# $$
# ### Implementing polynomial features
# #### Second degree polynomial features
# We're first goign to implement 2nd degree polynomial features to see how well it can fit our data, followed by 3rd degree.
# Note that as we increase polynomial feature degree, we exponentially increase the number of features.
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
degree = 2 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - The accuracy on the moons dataset has slightly decreased, indicating that second degree polynomial features are of no use in classifying this dataset.
# - Polynomial features with degree 2 has fit the circle data superbly, creating an almost perfect circular decision boundary.
# - The decision boundary for the spiral data has also not changed from the simple linear decision boundary, indicating 2nd degree polynomial features are of no use here.
# #### Third degree polynomial features
# Let's see how 3rd degree features fare in this classification task. This is very similar to the example we referenced earlier.
degree = 3 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - The moons classifier has significantly improved in accuracy, and we can clearly see a 3rd degree polynomial decision boundary separating the data now.
# - The circles boundary remains unchanged. The 2nd degree features were sufficiently complex to model it.
# - The spiral data is now classifying even worse, indicating that the extra degrees of freedom make it harder for the classifier to find a good solution.
# #### 11th degree polynomial features
# Let's turn it up to 11 to see what polynomial features are capable of doing if we really want to crank it up.
degree = 11 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - We can see that our model is now able to fit the moons dataset very well, achieving 99% accuracy.
# - We can now see clear overfitting on the circles data, where the decision boundary is hugging points creating a more warped line, rather than a smooth continuous line.
# - It's also produced a reasonable decision boundary for our spiral data now, where before it really struggled, now it's producing something that looks fairly reasonable, though it still can't find the innermost points.
# ### Wrapping up with logistic regression and polynomial features
# There are many ways we can add features to our data which allow for us to model non-linearity, even with simple classifiers. Other examples include:
# - Trigonometric features, such as sin(x), sin(y), cos(x), cos(y) etc
# - Frequency-based features, which can be obtained using fourier or wavelet transformations for example
# - Derived features, where we can apply mathematical operations to features, like sqrt(x), log(x), sigmoid(x), etc
# Depending on your dataset and model, you can consider using these sorts of features to transform your data.
# Logistic regression has shown when combined with polynomial features, it can make an excellent classifier for the circles dataset. For our other two datasets however, it would make more sense to try a more powerful model, rather than using 11th degree polynomial feature transformations.
# # Support Vector Machines
# Support vector machines are a powerful model that learn how to distinguish between different classes by finding the hyperplane that maximizes the margin between the classes in the feature space.
# In the context of a three-dimensional dataset, an SVM works to identify a two-dimensional plane that best separates the different classes. This optimal plane is chosen such that it maximizes the distance or 'margin' between itself and the nearest data points from each class. The aim is to ensure the greatest possible separation between classes, thereby improving the model's ability to classify new data accurately.
# The decision function that an SVM with a linear kernel learns is given by:
# $$ f(x) = b_0 + b_1x_1 + b_2x_2 + ... + b_nx_n $$
# where $x = (x_1, x_2, ..., x_n)$ is a feature vector, and $b_0, b_1, ..., b_n$ are the parameters of the model. The SVM classifies a new instance x based on the sign of $f(x)$. If $f(x)$ is positive, then the predicted class $\hat{y}$ is the positive class (1), otherwise it is the negative class (0):
# $$ \hat{y} =
# \begin{cases}
# 1 & \text{if } f(x) \geq 0 \\
# 0 & \text{if } f(x) < 0
# \end{cases}
# $$
# Those of you who are actually paying attention would have noticed that this looks remarkably similar to the logit equation.
# That is because both logistic regresion and SVMs both create a hyperplane to try and separate points, with a few key differences:
# **Objective Function**: Logistic regression aims to maximize the likelihood of the model by minimizing the log loss, thus it cares about how probable each point is classified correctly. On the other hand, SVMs do not model probabilities, but rather they aim to find the hyperplane that maximizes the margin between the closest points (called support vectors) of two classes, using the hinge loss function.
# **Margin Maximization**: While logistic regression does not explicitly consider margins, SVMs place a great deal of importance on it. SVMs strive to achieve the largest possible margin between the decision boundary and the nearest points from each class in order to ensure more robust classification.
# **Handling of Outliers**: Logistic regression is sensitive to outliers since it tries to model the probability for each point accurately. In contrast, SVMs primarily focus on the points closest to the decision boundary (support vectors). Thus, they tend to handle outliers better as they do not overly influence the decision boundary.
# **Predictions**: Logistic regression outputs a probability that can be converted to a class prediction using a chosen threshold, often 0.5. SVMs, however, make predictions based on the sign of the function $f(x)$, that is the distance of a point from the hyperplane.
# The equation for the hyperplane created by an SVM is:
# $$f(w)^T f(x) + b = 0$$
# Where $w$ represents the weight vector and $f(x)$ is the decision function, and $b$ is the bias term.
# Let's visualize this decision boundary in 3D.
# - The color of the points indicates the two types of iris.
# - The darker points are the support vectors.
# - The the darker plane in the center is the decision boundary.
# - The lighter planes are the margins created by the boundary.
from sklearn.svm import SVC
import plotly.graph_objects as go
# Load the iris dataset
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df["target"] = iris.target
# Filter dataframe to only include target classes 0 and 1 (just 2 different classes)
df = df[df["target"].isin([0, 1])]
# Use just first three features so we can plot it in 3D
features = iris.feature_names[:3]
X_df = df[features]
y_df = df["target"]
# Train svm model
svc = SVC(kernel="linear", C=1e10)
svc.fit(X_df, y_df)
# Plot the points and decision boundary
# Get the weights and intercept from the SVM
weights = svc.coef_[0]
intercept = svc.intercept_[0]
# Define the grid
x = np.linspace(X_df.iloc[:, 0].min(), X_df.iloc[:, 0].max(), num=100)
y = np.linspace(X_df.iloc[:, 1].min(), X_df.iloc[:, 1].max(), num=100)
x, y = np.meshgrid(x, y)
# Calculate corresponding z (plane height)
z = -(weights[0] * x + weights[1] * y + intercept) / weights[2]
surface = go.Surface(
x=x,
y=y,
z=z,
surfacecolor=np.zeros(z.shape),
colorscale=[[0, gray], [1, gray]],
opacity=0.6,
showscale=False,
)
data_points = go.Scatter3d(
x=X_df.iloc[:, 0],
y=X_df.iloc[:, 1],
z=X_df.iloc[:, 2],
mode="markers",
marker=dict(color=y_df, size=7, opacity=0.9, colorscale=palette),
)
fig = go.Figure(data=[data_points, surface])
# Plot the margin
# Calculate the Euclidean norm of the weights vector
norm_weights = np.linalg.norm(weights)
# Compute the actual margin width
# In theory we should be performing 2/norm weights, but due to how sklearn's SVM boundary
# calculation works, I needed to manually adjust this value.
margin_width = 2.65 / norm_weights
# Calculate z for the positive margin
z_positive_margin = (
-(weights[0] * x + weights[1] * y + intercept - margin_width / 2) / weights[2]
)
# Calculate z for the negative margin
z_negative_margin = (
-(weights[0] * x + weights[1] * y + intercept + margin_width / 2) / weights[2]
)
# Create surfaces for the margin planes
positive_margin_surface = go.Surface(
x=x,
y=y,
z=z_positive_margin,
surfacecolor=np.ones(z_positive_margin.shape),
colorscale=[[0, "gray"], [1, "gray"]],
opacity=0.05,
showscale=False,
)
negative_margin_surface = go.Surface(
x=x,
y=y,
z=z_negative_margin,
surfacecolor=np.ones(z_negative_margin.shape),
colorscale=[[0, "gray"], [1, "gray"]],
opacity=0.05,
showscale=False,
)
# Add these new surfaces to the figure
fig.add_trace(positive_margin_surface)
fig.add_trace(negative_margin_surface)
# Plot the support vectors
# Get the support vectors
support_vectors = svc.support_vectors_
# Create a scatter plot trace for the support vectors
support_vectors_trace = go.Scatter3d(
x=support_vectors[:, 0],
y=support_vectors[:, 1],
z=support_vectors[:, 2],
mode="markers",
marker=dict(color="black", size=7, opacity=0.3),
)
# Add this new trace to the figure
fig.add_trace(support_vectors_trace)
# Configure plot settings
fig.update_layout(
scene=dict(xaxis_title="x", yaxis_title="y", zaxis_title="z"),
title="SVM Decision Boundary on 3D Iris Data",
showlegend=False,
)
fig.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/345/129345550.ipynb
| null | null |
[{"Id": 129345550, "ScriptId": 38457560, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11239747, "CreationDate": "05/13/2023 02:05:47", "VersionNumber": 1.0, "Title": "Fixed fixed poly features", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 357.0, "LinesInsertedFromPrevious": 357.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.datasets import make_moons, make_circles, load_iris
from sklearn.preprocessing import MinMaxScaler
def make_spirals(n_samples, noise, random_seed):
np.random.seed(random_seed)
# Generate the coordinates for the points in each spiral arm.
n = np.sqrt(np.random.rand(n_samples, 1)) * 600 * (2 * np.pi) / 360
x1 = (-np.cos(n) * n + np.random.rand(n_samples, 1) * noise) / 7
y1 = (np.sin(n) * n + np.random.rand(n_samples, 1) * noise) / 7
return (
np.vstack((np.hstack((x1, y1)), np.hstack((-x1, -y1)))),
np.hstack((np.zeros(n_samples), np.ones(n_samples))).astype(int),
)
def scale_data(data):
scaler = MinMaxScaler(feature_range=(-1, 1))
return scaler.fit_transform(data)
X_moons, y_moons = make_moons(n_samples=100, noise=0.1, random_state=42)
X_circles, y_circles = make_circles(
n_samples=100, noise=0.075, factor=0.5, random_state=42
)
X_spirals, y_spirals = make_spirals(n_samples=100, noise=0.8, random_seed=42)
datasets = {
"Moons": (scale_data(X_moons), y_moons),
"Circles": (scale_data(X_circles), y_circles),
"Spirals": (scale_data(X_spirals), y_spirals),
}
# Make it pretty
orange = "#FFA630"
blue = "#00A7E1"
gray = "#CCCCCC"
palette = [orange, blue]
from sklearn.metrics import accuracy_score
def plot_multiple_decision_boundaries(
classifier, datasets, resolution=0.015, alpha=0.09, palette="viridis", clf_name=""
):
"""
Plots a 1x3 grid of plots, displaying the decision boundaries of a given untrained classifier on three datasets.
:classifier: An untrained sklearn classifier algorithm (e.g. DecisionTreeClassifier())
:datasets: A dictionary containing three datasets of the format key='dataset name', value=[X, y]
"""
n_datasets = len(datasets)
fig, axes = plt.subplots(
1, n_datasets, figsize=(6 * n_datasets, 6)
) # Adjust the figure size as needed
for ax, (name, (X, y)) in zip(axes, datasets.items()):
# Fit the classifier
classifier.fit(X, y)
# Find training accuracy of classifier
acc = accuracy_score(y, classifier.predict(X))
# Create a mesh of points
dist_from_edge = 0.5
x_min, x_max = X[:, 0].min() - dist_from_edge, X[:, 0].max() + dist_from_edge
y_min, y_max = X[:, 1].min() - dist_from_edge, X[:, 1].max() + dist_from_edge
xx, yy = np.meshgrid(
np.arange(x_min, x_max, resolution), np.arange(y_min, y_max, resolution)
)
# Use the classifier to predict the class of each point in the mesh
Z = classifier.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# Create a dataframe with the results
df = pd.DataFrame(dict(x=xx.ravel(), y=yy.ravel(), label=Z.ravel()))
if clf_name == "":
clf_name = classifier.__class__.__name__
# Plot the results using seaborn
sns.scatterplot(
data=df,
x="x",
y="y",
hue="label",
palette=palette,
alpha=alpha,
legend=False,
ax=ax,
)
sns.scatterplot(
x=X[:, 0],
y=X[:, 1],
s=60,
hue=y,
palette=palette,
alpha=0.9,
edgecolor=None,
legend=False,
ax=ax,
)
ax.set_title(f"Decision Boundary for {name} - {clf_name}")
ax.legend(title=f"Accuracy: {acc:.2f}", loc="upper right").set_bbox_to_anchor(
(0.95, 0.95)
)
plt.tight_layout()
plt.show()
# Plot datasets
plt.figure(figsize=(16, 5))
for i, (dataset_name, (X, y)) in enumerate(datasets.items()):
plt.subplot(1, 3, i + 1)
sns.scatterplot(
x=X[:, 0], y=X[:, 1], hue=y, palette=palette, s=120, alpha=0.9, edgecolor=None
)
plt.title(dataset_name)
plt.show()
# # Logistic Regression Model:
# A logistic regression model learns the weights (coefficients) for the features (x and y) and an intercept term.
# The decision function that logistic regression learns is given by:
# $$ z = b_0 + b_1x_1 + b_2x_2 + ... + b_nx_n $$
# where $x = (x_1, x_2, ..., x_n)$ is a feature vector, and $b_0, b_1, ..., b_n$ are the parameters of the model.
# The logistic regression model makes predictions using a logistic function, often a sigmoid function, which transforms the output of the linear equation into a probability, $p$, that the positive class (1) is the correct classification:
# $$ p = \frac{1}{1 + e^{-z}} $$
# Based on this probability, the predicted class $\hat{y}$ can be determined:
# $$ \hat{y} =
# \begin{cases}
# 1 & \text{if } p \geq 0.5 \\
# 0 & \text{if } p < 0.5
# \end{cases}
# $$
from sklearn.linear_model import LogisticRegression
model = LogisticRegression()
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - Since our logistic regression model is limited to using only linear terms, our decision boundary can only be linear.
# - Since our data has a non-linear structure, you may think this spells the end for our logistic regression model, but you'd be wrong.
# ## Linear separability
# Define linear separability
# ## Polynomial features
# Polynomial features is a preprocessing transformation, and allows us to use simple classifiers like logistic regression with non-linear data.
# We might want to use a simple model as it can allow us to create a very smooth decision boundary.
# Polynomial features will create combinations of all the features, combinations of features to the nth degree.
# For example in our case where we have two terms, x and y, our simple logistic regression with no polynomial features would be defined as:
# $$ logit(p(y = 1|x) = b_0 + b_1x + b_2y $$
# Polynomial features will take every possible combination of these features up to the nth degree.
# Were we to use 3rd degree polynomial features, our feature combinations would be:
# $$
# \begin{align*}
# \text{Constant terms}: & \quad 1 \\
# \text{Linear terms}: & \quad x, \quad y \\
# \text{Quadratic terms}: & \quad x^2, \quad y^2 \\
# \text{Interaction terms}: & \quad xy \\
# \text{Cubic terms}: & \quad x^3, \quad y^3 \\
# \text{Mixed quadratic terms}: & \quad x^2y, \quad xy^2 \\
# \end{align*}
# $$
# Each term also gets its own coefficient.
# The resulting logistic regression equation is:
# $$
# \begin{align*}
# \text{logit}(p(y=1|x)) = & \quad b_0 + b_1x + b_2y + b_3x^2 + b_4y^2 + b_5xy + b_6x^3 \\
# & + b_{7}y^3 + b_{8}x^2y + b_{9}xy^2 \\
# \end{align*}
# $$
# ### Implementing polynomial features
# #### Second degree polynomial features
# We're first goign to implement 2nd degree polynomial features to see how well it can fit our data, followed by 3rd degree.
# Note that as we increase polynomial feature degree, we exponentially increase the number of features.
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
degree = 2 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - The accuracy on the moons dataset has slightly decreased, indicating that second degree polynomial features are of no use in classifying this dataset.
# - Polynomial features with degree 2 has fit the circle data superbly, creating an almost perfect circular decision boundary.
# - The decision boundary for the spiral data has also not changed from the simple linear decision boundary, indicating 2nd degree polynomial features are of no use here.
# #### Third degree polynomial features
# Let's see how 3rd degree features fare in this classification task. This is very similar to the example we referenced earlier.
degree = 3 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - The moons classifier has significantly improved in accuracy, and we can clearly see a 3rd degree polynomial decision boundary separating the data now.
# - The circles boundary remains unchanged. The 2nd degree features were sufficiently complex to model it.
# - The spiral data is now classifying even worse, indicating that the extra degrees of freedom make it harder for the classifier to find a good solution.
# #### 11th degree polynomial features
# Let's turn it up to 11 to see what polynomial features are capable of doing if we really want to crank it up.
degree = 11 # Set the degree of the polynomial features
model = make_pipeline(PolynomialFeatures(degree), LogisticRegression())
plot_multiple_decision_boundaries(model, datasets, palette=palette)
# #### Observations:
# - We can see that our model is now able to fit the moons dataset very well, achieving 99% accuracy.
# - We can now see clear overfitting on the circles data, where the decision boundary is hugging points creating a more warped line, rather than a smooth continuous line.
# - It's also produced a reasonable decision boundary for our spiral data now, where before it really struggled, now it's producing something that looks fairly reasonable, though it still can't find the innermost points.
# ### Wrapping up with logistic regression and polynomial features
# There are many ways we can add features to our data which allow for us to model non-linearity, even with simple classifiers. Other examples include:
# - Trigonometric features, such as sin(x), sin(y), cos(x), cos(y) etc
# - Frequency-based features, which can be obtained using fourier or wavelet transformations for example
# - Derived features, where we can apply mathematical operations to features, like sqrt(x), log(x), sigmoid(x), etc
# Depending on your dataset and model, you can consider using these sorts of features to transform your data.
# Logistic regression has shown when combined with polynomial features, it can make an excellent classifier for the circles dataset. For our other two datasets however, it would make more sense to try a more powerful model, rather than using 11th degree polynomial feature transformations.
# # Support Vector Machines
# Support vector machines are a powerful model that learn how to distinguish between different classes by finding the hyperplane that maximizes the margin between the classes in the feature space.
# In the context of a three-dimensional dataset, an SVM works to identify a two-dimensional plane that best separates the different classes. This optimal plane is chosen such that it maximizes the distance or 'margin' between itself and the nearest data points from each class. The aim is to ensure the greatest possible separation between classes, thereby improving the model's ability to classify new data accurately.
# The decision function that an SVM with a linear kernel learns is given by:
# $$ f(x) = b_0 + b_1x_1 + b_2x_2 + ... + b_nx_n $$
# where $x = (x_1, x_2, ..., x_n)$ is a feature vector, and $b_0, b_1, ..., b_n$ are the parameters of the model. The SVM classifies a new instance x based on the sign of $f(x)$. If $f(x)$ is positive, then the predicted class $\hat{y}$ is the positive class (1), otherwise it is the negative class (0):
# $$ \hat{y} =
# \begin{cases}
# 1 & \text{if } f(x) \geq 0 \\
# 0 & \text{if } f(x) < 0
# \end{cases}
# $$
# Those of you who are actually paying attention would have noticed that this looks remarkably similar to the logit equation.
# That is because both logistic regresion and SVMs both create a hyperplane to try and separate points, with a few key differences:
# **Objective Function**: Logistic regression aims to maximize the likelihood of the model by minimizing the log loss, thus it cares about how probable each point is classified correctly. On the other hand, SVMs do not model probabilities, but rather they aim to find the hyperplane that maximizes the margin between the closest points (called support vectors) of two classes, using the hinge loss function.
# **Margin Maximization**: While logistic regression does not explicitly consider margins, SVMs place a great deal of importance on it. SVMs strive to achieve the largest possible margin between the decision boundary and the nearest points from each class in order to ensure more robust classification.
# **Handling of Outliers**: Logistic regression is sensitive to outliers since it tries to model the probability for each point accurately. In contrast, SVMs primarily focus on the points closest to the decision boundary (support vectors). Thus, they tend to handle outliers better as they do not overly influence the decision boundary.
# **Predictions**: Logistic regression outputs a probability that can be converted to a class prediction using a chosen threshold, often 0.5. SVMs, however, make predictions based on the sign of the function $f(x)$, that is the distance of a point from the hyperplane.
# The equation for the hyperplane created by an SVM is:
# $$f(w)^T f(x) + b = 0$$
# Where $w$ represents the weight vector and $f(x)$ is the decision function, and $b$ is the bias term.
# Let's visualize this decision boundary in 3D.
# - The color of the points indicates the two types of iris.
# - The darker points are the support vectors.
# - The the darker plane in the center is the decision boundary.
# - The lighter planes are the margins created by the boundary.
from sklearn.svm import SVC
import plotly.graph_objects as go
# Load the iris dataset
iris = load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df["target"] = iris.target
# Filter dataframe to only include target classes 0 and 1 (just 2 different classes)
df = df[df["target"].isin([0, 1])]
# Use just first three features so we can plot it in 3D
features = iris.feature_names[:3]
X_df = df[features]
y_df = df["target"]
# Train svm model
svc = SVC(kernel="linear", C=1e10)
svc.fit(X_df, y_df)
# Plot the points and decision boundary
# Get the weights and intercept from the SVM
weights = svc.coef_[0]
intercept = svc.intercept_[0]
# Define the grid
x = np.linspace(X_df.iloc[:, 0].min(), X_df.iloc[:, 0].max(), num=100)
y = np.linspace(X_df.iloc[:, 1].min(), X_df.iloc[:, 1].max(), num=100)
x, y = np.meshgrid(x, y)
# Calculate corresponding z (plane height)
z = -(weights[0] * x + weights[1] * y + intercept) / weights[2]
surface = go.Surface(
x=x,
y=y,
z=z,
surfacecolor=np.zeros(z.shape),
colorscale=[[0, gray], [1, gray]],
opacity=0.6,
showscale=False,
)
data_points = go.Scatter3d(
x=X_df.iloc[:, 0],
y=X_df.iloc[:, 1],
z=X_df.iloc[:, 2],
mode="markers",
marker=dict(color=y_df, size=7, opacity=0.9, colorscale=palette),
)
fig = go.Figure(data=[data_points, surface])
# Plot the margin
# Calculate the Euclidean norm of the weights vector
norm_weights = np.linalg.norm(weights)
# Compute the actual margin width
# In theory we should be performing 2/norm weights, but due to how sklearn's SVM boundary
# calculation works, I needed to manually adjust this value.
margin_width = 2.65 / norm_weights
# Calculate z for the positive margin
z_positive_margin = (
-(weights[0] * x + weights[1] * y + intercept - margin_width / 2) / weights[2]
)
# Calculate z for the negative margin
z_negative_margin = (
-(weights[0] * x + weights[1] * y + intercept + margin_width / 2) / weights[2]
)
# Create surfaces for the margin planes
positive_margin_surface = go.Surface(
x=x,
y=y,
z=z_positive_margin,
surfacecolor=np.ones(z_positive_margin.shape),
colorscale=[[0, "gray"], [1, "gray"]],
opacity=0.05,
showscale=False,
)
negative_margin_surface = go.Surface(
x=x,
y=y,
z=z_negative_margin,
surfacecolor=np.ones(z_negative_margin.shape),
colorscale=[[0, "gray"], [1, "gray"]],
opacity=0.05,
showscale=False,
)
# Add these new surfaces to the figure
fig.add_trace(positive_margin_surface)
fig.add_trace(negative_margin_surface)
# Plot the support vectors
# Get the support vectors
support_vectors = svc.support_vectors_
# Create a scatter plot trace for the support vectors
support_vectors_trace = go.Scatter3d(
x=support_vectors[:, 0],
y=support_vectors[:, 1],
z=support_vectors[:, 2],
mode="markers",
marker=dict(color="black", size=7, opacity=0.3),
)
# Add this new trace to the figure
fig.add_trace(support_vectors_trace)
# Configure plot settings
fig.update_layout(
scene=dict(xaxis_title="x", yaxis_title="y", zaxis_title="z"),
title="SVM Decision Boundary on 3D Iris Data",
showlegend=False,
)
fig.show()
| false | 0 | 4,841 | 0 | 4,841 | 4,841 |
||
129248425
|
<jupyter_start><jupyter_text>Wild blueberry Yield Prediction Dataset
### Context
Blueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries—both wild (lowbush) and cultivated (highbush)—are all native to North America. The highbush varieties were introduced into Europe during the 1930s.
Blueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as "lowbush blueberries" (synonymous with "wild"), while the species with larger berries growing on taller, cultivated bushes are known as "highbush blueberries". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries.
### Content
"The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches."
Features Unit Description
Clonesize m2 The average blueberry clone size in the field
Honeybee bees/m2/min Honeybee density in the field
Bumbles bees/m2/min Bumblebee density in the field
Andrena bees/m2/min Andrena bee density in the field
Osmia bees/m2/min Osmia bee density in the field
MaxOfUpperTRange ℃ The highest record of the upper band daily air temperature during the bloom season
MinOfUpperTRange ℃ The lowest record of the upper band daily air temperature
AverageOfUpperTRange ℃ The average of the upper band daily air temperature
MaxOfLowerTRange ℃ The highest record of the lower band daily air temperature
MinOfLowerTRange ℃ The lowest record of the lower band daily air temperature
AverageOfLowerTRange ℃ The average of the lower band daily air temperature
RainingDays Day The total number of days during the bloom season, each of which has precipitation larger than zero
AverageRainingDays Day The average of raining days of the entire bloom season
Kaggle dataset identifier: wild-blueberry-yield-prediction-dataset
<jupyter_script>#
# "Blueberry Synthetic Data Analysis"
# 
# # **I. Introduction**
# * The Wild Blueberry Yield Prediction Competition is a challenge that requires participants to use a provided dataset to predict the yield of wild blueberries. The dataset used in this competition is derived from a deep learning model trained on the Wild Blueberry Yield Prediction Dataset, and it has feature distributions that are similar but not identical to the original. Participants are encouraged to explore the differences between the two datasets and evaluate whether incorporating the original dataset into their training could improve their model performance. The competition provides participants with a training dataset, a test dataset, and a sample submission file for them to submit their predictions.
# * Synthetic datasets are important for learning and practicing machine learning skills, as they provide a way to explore and experiment with different models and feature engineering ideas. The use of synthetic data also allows for the creation of datasets that are more representative of real-world data, as it can be difficult to collect and use real-world data in certain scenarios.
# # **II. Background**
# * Kaggle's Playground Series is a collection of challenges designed to provide lightweight challenges for the Kaggle community. These challenges are intended to help participants learn and sharpen their skills in different aspects of machine learning and data science.
# * Synthetic data is often used in these challenges, as it allows for the creation of interesting and complex datasets while maintaining privacy and security concerns. However, generating high-quality synthetic data can be a challenge, as it is important to ensure that the data is representative of real-world data and does not contain artifacts or biases.
# # **III. Data Description**
# The dataset for the competition involves predicting the yield of wild blueberries. The data was generated from a deep learning model trained on the Wild Blueberry Yield Prediction Dataset.
# The synthetic dataset has feature distributions that are similar but not identical to the original dataset. Participants are encouraged to explore the differences between the two datasets and incorporate the original dataset into their training to see if it improves model performance. The competition provides a training dataset, a test dataset, and a sample submission file for participants to submit their predictions. The goal is to strike a balance between having real-world data and ensuring test labels are not publicly available.
# |Features Unit Description:| |
# |---------------|:--------------------|
# |Clonesize m2: The average blueberry clone size in the field|
# |Honeybee bees/m2/min: Honeybee density in the field|
# |Bumbles bees/m2/min: Bumblebee density in the field|
# |Andrena bees/m2/min: Andrena bee density in the field|
# |Osmia bees/m2/min: Osmia bee density in the field|
# |MaxOfUpperTRange ℃ :The highest record of the upper band daily air temperature during the bloom season|
# |MinOfUpperTRange ℃ : The lowest record of the upper band daily air temperature|
# |AverageOfUpperTRange ℃ : The average of the upper band daily air temperature|
# |MaxOfLowerTRange ℃ : The highest record of the lower band daily air temperature|
# |MinOfLowerTRange ℃ : The lowest record of **the** lower band daily air temperature|
# |AverageOfLowerTRange ℃ : The average of the lower band daily air temperature|
# |RainingDays Day: The total number of days during the bloom season, each of which has precipitation larger than zero|
# |AverageRainingDays Day: The average of raining days of the entire bloom season|
# # **IV. Import Modules**
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
plt.rcParams["figure.figsize"] = (12, 6)
plt.style.use("fivethirtyeight")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
import dabl
import warnings
warnings.filterwarnings("ignore")
# # **V. Load the Dataset**
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Load the dataset `into` a Pandas DataFrame
orig_df = pd.read_csv(
"/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv"
).drop(columns="Row#")
orig_df.head().style.set_properties(
**{"background-color": "royalblue", "color": "black", "border-color": "#8b8c8c"}
)
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
train.head().style.set_properties(
**{"background-color": "lightblue", "color": "black", "border-color": "#8b8c8c"}
)
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
test.head().style.set_properties(
**{"background-color": "yellow", "color": "black", "border-color": "#8b8c8c"}
)
submission = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
submission.head().style.set_properties(
**{"background-color": "royalblue", "color": "black", "border-color": "#8b8c8c"}
)
# # **VI. Exploratory Data Analysis**
# * Perform exploratory data analysis on the dataset, including visualizations and statistical analysis
# * Explore the relationship between the features and the target variable (yield)
# #### Use the `info` method to check for missing values and data types
#
orig_df.info()
train.info()
test.info()
submission.info()
print("Shape of orig_df:", orig_df.shape)
print("Shape of train:", train.shape)
print("Shape of test:", test.shape)
print("Shape of submission:", submission.shape)
print("Number of duplicates in original dataset:", orig_df.duplicated().sum())
print("Number of duplicates in training dataset:", train.duplicated().sum())
print("Number of duplicates in testing dataset:", test.duplicated().sum())
print("Number of duplicates in submission dataset:", submission.duplicated().sum())
# drop id and as it is redundant
train = train.drop("id", axis=1)
# #### Checking the missing rows
train.isnull().sum()
orig_df.isnull().sum()
train.drop_duplicates()
train.hist(layout=(5, 4), figsize=(20, 15), bins=20)
plt.show()
dabl.plot(train, target_col="yield")
fig = px.scatter(
train,
x="MaxOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MaxOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MaxOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MinOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MinOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MinOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="AverageOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. AverageOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="AverageOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MaxOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MaxOfLowerTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MaxOfLowerTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MinOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MinOfLowerTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MinOfLowerTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="AverageOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="Viridis",
)
fig.show()
# #### Use the `describe` method to get summary statistics of the dataset
# Calculate the statistical of the dataset
styled_data = (
orig_df.describe()
.style.background_gradient(cmap="coolwarm")
.set_properties(**{"text-align": "center", "border": "1px solid black"})
)
# display styled data
display(styled_data)
# Calculate the statistical of the dataset
styled_data = (
train.describe()
.style.background_gradient(cmap="icefire")
.set_properties(**{"text-align": "center", "border": "1px solid black"})
)
# display styled data
display(styled_data)
# # **VII. Model Training and Evaluation**
# * Split the dataset into training and testing sets
# * Train and evaluate several machine learning models, such as:
# * Linear Regression
# * Decision Tree
# * Random Forest
# * Gradient Boosting
# * Neural Network
#
# * Compare the performance of the models using the Mean Absolute Error (MAE) metric
# Evaluation Submissions will be evaluated using Mean Absolute Error (MAE),
# 
# where each x_i represents the predicted target, y_i represents the ground truth and n is the number of rows in the test set.
# * Visualize the results using a Plotly scatter plot, where the x-axis is the predicted yield and the y-axis is the actual yield
# * Explore the impact of incorporating the original dataset into the training process.
# * Compare the performance of the models using the Mean Absolute Error (MAE) metric with original + synthetic data
import pandas as pd
from sklearn.datasets import make_regression
# load the train.csv dataset
orig_df = pd.read_csv(
"/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv"
).drop(columns="Row#")
# set the random seed for reproducibility
random_seed = 1234
# define the number of synthetic samples to generate
num_samples = 1000
# generate synthetic data using scikit-learn's make_regression function
X, y = make_regression(
n_samples=num_samples,
n_features=len(orig_df.columns) - 1,
n_informative=len(orig_df.columns) - 1,
random_state=random_seed,
)
# create a DataFrame for the synthetic data
df_synth = pd.DataFrame(X, columns=orig_df.columns[:-1])
df_synth["yield"] = y
# save the synthetic data to a CSV file
df_synth.to_csv("synthetic_data.csv", index=False)
# Load synthetic dataset
df_synth = pd.read_csv("synthetic_data.csv")
df_synth.head().style.set_properties(
**{"background-color": "red", "color": "black", "border-color": "#8b8c8c"}
)
orig_df.drop_duplicates()
df_synth.drop_duplicates()
df_synth.info()
df_synth.shape
# Train and evaluate machine learning models using the synthetic dataset
# Split the dataset into training and testing sets
from sklearn.model_selection import train_test_split
X = df_synth.drop(["yield"], axis=1)
y = df_synth["yield"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Train Linear Regression model
lr = LinearRegression()
lr.fit(X_train, y_train)
# Make predictions on test set using Linear Regression model
lr_pred = lr.predict(X_test)
# Compute MAE for Linear Regression model
lr_mae = mean_absolute_error(y_test, lr_pred)
print("Linear Regression MAE:", lr_mae)
# Train Decision Tree model
dt = DecisionTreeRegressor(random_state=42)
dt.fit(X_train, y_train)
# Make predictions on test set using Decision Tree model
dt_pred = dt.predict(X_test)
# Compute MAE for Decision Tree model
dt_mae = mean_absolute_error(y_test, dt_pred)
print("Decision Tree MAE:", dt_mae)
# Train Random Forest model
rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
# Make predictions on test set using Random Forest model
rf_pred = rf.predict(X_test)
# Compute MAE for Random Forest model
rf_mae = mean_absolute_error(y_test, rf_pred)
print("MAE of Random Forest:", rf_mae)
# Train Gradient Boosting model
gb = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb.fit(X_train, y_train)
# Make predictions on test set using Gradient Boosting model
gb_pred = gb.predict(X_test)
# Compute MAE for Gradient Boosting model
gb_mae = mean_absolute_error(y_test, gb_pred)
print("MAE of Gradient Boosting:", gb_mae)
# Train Neural Network model
nn = MLPRegressor(hidden_layer_sizes=(100, 50), max_iter=1000, random_state=42)
nn.fit(X_train, y_train)
# Make predictions on test set using Neural Network model
nn_pred = nn.predict(X_test)
# Compute MAE for Neural Network model
nn_mae = mean_absolute_error(y_test, nn_pred)
print("MAE of Neural Network:", nn_mae)
# Concatenate synthetic and original datasets
df_concat = pd.concat([df_synth, orig_df], ignore_index=True)
# Split concatenated dataset into training and test sets
X_train_concat, X_test_concat, y_train_concat, y_test_concat = train_test_split(
df_concat.drop("yield", axis=1), df_concat["yield"], test_size=0.2, random_state=42
)
from sklearn.impute import SimpleImputer
# Create an instance of SimpleImputer to replace NaN values with mean
imputer = SimpleImputer(strategy="mean")
# Fit imputer on training data and transform both training and test data
X_train_concat_imputed = imputer.fit_transform(X_train_concat)
X_test_concat_imputed = imputer.transform(X_test_concat)
# Train Linear Regression model with imputed data
lr_concat = LinearRegression()
lr_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Linear Regression model with imputed data
lr_pred_concat = lr_concat.predict(X_test_concat_imputed)
# Compute MAE for Linear Regression model with imputed data
lr_mae_concat = mean_absolute_error(y_test_concat, lr_pred_concat)
print(
"Mean Absolute Error (MAE) for Linear Regression model with concatenated dataset:",
lr_mae_concat,
)
# Train Decision Tree model with imputed data
dt_concat = DecisionTreeRegressor(random_state=42)
dt_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Decision Tree model with imputed data
dt_pred_concat = dt_concat.predict(X_test_concat_imputed)
# Compute MAE for Decision Tree model with imputed data
dt_mae_concat = mean_absolute_error(y_test_concat, dt_pred_concat)
print(
"Mean Absolute Error (MAE) for Decision Tree model with concatenated dataset:",
dt_mae_concat,
)
from sklearn.impute import SimpleImputer
# Train Random Forest model with imputed data
rf_concat = RandomForestRegressor(n_estimators=100, random_state=42)
rf_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Random Forest model with imputed data
rf_pred_concat = rf_concat.predict(X_test_concat_imputed)
# Compute MAE for Random Forest model with imputed data
rf_mae_concat = mean_absolute_error(y_test_concat, rf_pred_concat)
print(
"Mean Absolute Error (MAE) for Random Forest model with concatenated dataset:",
rf_mae_concat,
)
# Train Gradient Boosting model with concatenated dataset
gb_concat = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Gradient Boosting model with concatenated dataset
gb_pred_concat = gb_concat.predict(X_test_concat_imputed)
# Compute MAE for Gradient Boosting model with concatenated dataset
gb_mae_concat = mean_absolute_error(y_test_concat, gb_pred_concat)
print(
"Mean Absolute Error (MAE) for Gradient Boosting model with concatenated dataset:",
gb_mae_concat,
)
# Train Neural Network model with concatenated dataset
nn_concat = MLPRegressor(hidden_layer_sizes=(100, 50), max_iter=1000, random_state=42)
nn_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Neural Network model with concatenated dataset
nn_pred_concat = nn_concat.predict(X_test_concat_imputed)
# Compute MAE for Neural Network model with concatenated dataset
nn_mae_concat = mean_absolute_error(y_test_concat, nn_pred_concat)
print(
"Mean Absolute Error (MAE) for Neural Network model with concatenated dataset:",
nn_mae_concat,
)
import plotly.graph_objects as go
# Plot results
fig = px.scatter(
x=y_test_concat, y=rf_pred_concat, title="Random Forest: Synthetic Dataset"
)
fig.add_scatter(x=y_test_concat, y=gb_pred, mode="markers", name="Gradient Boosting")
fig.add_scatter(x=y_test_concat, y=nn_pred, mode="markers", name="Neural Network")
fig.add_scatter(
x=y_test_concat, y=lr_pred_concat, mode="markers", name="Linear Regression"
)
fig.add_scatter(x=y_test_concat, y=dt_pred_concat, mode="markers", name="Decision Tree")
fig.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig.show()
# Compare MAE
print("Linear Regression MAE:", mean_absolute_error(y_test, lr_pred))
print("Decision Tree MAE:", mean_absolute_error(y_test, dt_pred))
print("Random Forest MAE: ", mean_absolute_error(y_test, rf_pred))
print("Gradient Boosting MAE: ", mean_absolute_error(y_test, gb_pred))
print("Neural Network MAE: ", mean_absolute_error(y_test, nn_pred))
# Split data into training and test sets
train = orig_df.sample(frac=0.8, random_state=42)
test = orig_df.drop(train.index)
# Define features and target
features = [
"clonesize",
"honeybee",
"bumbles",
"andrena",
"osmia",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"AverageRainingDays",
"fruitset",
"fruitmass",
"seeds",
]
target = "yield"
# Train and evaluate models on test set
models = [
LinearRegression(),
DecisionTreeRegressor(),
RandomForestRegressor(n_estimators=100, random_state=42),
GradientBoostingRegressor(n_estimators=100, random_state=42),
MLPRegressor(random_state=42),
]
mae_scores = []
for model in models:
model.fit(train[features], train[target])
pred = model.predict(test[features])
mae = mean_absolute_error(test[target], pred)
mae_scores.append(mae)
# Train and evaluate models on original dataset + test set
train_orig = orig_df
train_concat = pd.concat([train, train_orig], axis=0)
X_train_concat = train_concat[features]
y_train_concat = train_concat[target]
X_test_concat = test[features]
y_test_concat = test[target]
models_concat = [
LinearRegression(),
DecisionTreeRegressor(),
RandomForestRegressor(n_estimators=100, random_state=42),
GradientBoostingRegressor(n_estimators=100, random_state=42),
MLPRegressor(random_state=42),
]
mae_scores_concat = []
for model in models_concat:
model.fit(X_train_concat, y_train_concat)
pred_concat = model.predict(X_test_concat)
mae_concat = mean_absolute_error(y_test_concat, pred_concat)
mae_scores_concat.append(mae_concat)
# Visualize results
fig = px.scatter(
x=test[target],
y=models[2].predict(test[features]),
title="Random Forest: Synthetic Dataset",
)
fig.add_scatter(
x=test[target],
y=models[3].predict(test[features]),
mode="markers",
name="Gradient Boosting",
)
fig.add_trace(
go.Scatter(
x=test[target],
y=models[4].predict(test[features]),
mode="markers",
name="Neural Network",
)
)
fig.add_trace(
go.Scatter(
x=[test[target].min(), test[target].max()],
y=[test[target].min(), test[target].max()],
mode="lines",
name="Perfect Fit",
)
)
fig.show()
# Visualize results for the concatenated dataset
fig_concat = px.scatter(
x=y_test_concat,
y=models_concat[2].predict(X_test_concat),
title="Random Forest: Original + Synthetic Dataset",
)
fig_concat.add_scatter(
x=y_test_concat,
y=models_concat[3].predict(X_test_concat),
mode="markers",
name="Gradient Boosting",
)
fig_concat.add_trace(
go.Scatter(
x=y_test_concat,
y=models_concat[4].predict(X_test_concat),
mode="markers",
name="Neural Network",
)
)
fig_concat.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig_concat.show()
# ## **Compare the performance of the models using the Mean Absolute Error (MAE) metric with original + synthetic data**
# Train and evaluate models with original + synthetic data
models_concat = []
mae_concat = []
# Linear Regression
lr_concat = LinearRegression()
lr_concat.fit(X_train_concat, y_train_concat)
lr_pred_concat = lr_concat.predict(X_test_concat)
lr_mae_concat = mean_absolute_error(y_test_concat, lr_pred_concat)
models_concat.append(lr_concat)
mae_concat.append(lr_mae_concat)
# Decision Tree
dt_concat = DecisionTreeRegressor(random_state=42)
dt_concat.fit(X_train_concat, y_train_concat)
dt_pred_concat = dt_concat.predict(X_test_concat)
dt_mae_concat = mean_absolute_error(y_test_concat, dt_pred_concat)
models_concat.append(dt_concat)
mae_concat.append(dt_mae_concat)
# Random Forest
rf_concat = RandomForestRegressor(n_estimators=100, random_state=42)
rf_concat.fit(X_train_concat, y_train_concat)
rf_pred_concat = rf_concat.predict(X_test_concat)
rf_mae_concat = mean_absolute_error(y_test_concat, rf_pred_concat)
models_concat.append(rf_concat)
mae_concat.append(rf_mae_concat)
# Gradient Boosting
gb_concat = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb_concat.fit(X_train_concat, y_train_concat)
gb_pred_concat = gb_concat.predict(X_test_concat)
gb_mae_concat = mean_absolute_error(y_test_concat, gb_pred_concat)
models_concat.append(gb_concat)
mae_concat.append(gb_mae_concat)
# Neural Network
nn_concat = MLPRegressor(random_state=42)
nn_concat.fit(X_train_concat, y_train_concat)
nn_pred_concat = nn_concat.predict(X_test_concat)
nn_mae_concat = mean_absolute_error(y_test_concat, nn_pred_concat)
models_concat.append(nn_concat)
mae_concat.append(nn_mae_concat)
# Print MAE for each model
for i, model in enumerate(
[
"Linear Regression",
"Decision Tree",
"Random Forest",
"Gradient Boosting",
"Neural Network",
]
):
print(model + " MAE (with original + synthetic data): " + str(mae_concat[i]))
# Plot results for original + synthetic data
fig_concat = px.scatter(
x=y_test_concat,
y=models_concat[2].predict(X_test_concat),
title="Random Forest: Original + Synthetic Dataset",
)
fig_concat.add_scatter(
x=y_test_concat,
y=models_concat[3].predict(X_test_concat),
mode="markers",
name="Gradient Boosting",
)
fig_concat.add_trace(
go.Scatter(
x=y_test_concat,
y=models_concat[4].predict(X_test_concat),
mode="markers",
name="Neural Network",
)
)
fig_concat.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig_concat.show()
# # **VIII. Submission**
# Print MAE for each model
for i, model in enumerate(
[
"Linear Regression",
"Decision Tree",
"Random Forest",
"Gradient Boosting",
"Neural Network",
]
):
print(model + " MAE (with original + synthetic data): " + str(mae_concat[i]))
# Select model with lowest MAE on the test set
best_model_concat = models_concat[np.argmin(mae_concat)]
print("Best model (with original + synthetic data): " + str(best_model_concat))
# Get the feature names used to train the model
train_X = train.drop(target, axis=1)
feature_names = train_X.columns
# Check the feature names used to train the model
print(feature_names)
# Check the column names in the submission dataset
print(submission.columns)
# Make sure the feature names in the submission dataset match the ones used to train the model
submission_X = submission.reindex(columns=feature_names)
from sklearn.impute import SimpleImputer
# Create an imputer to replace missing values with the mean
imputer = SimpleImputer(strategy="mean")
# Fit the imputer on the training data
imputer.fit(X_train_concat)
# Transform the submission dataset using the imputer
submission_X_imputed = imputer.transform(submission_X)
# Make predictions on the submission dataset using the best model
submission_y = best_model_concat.predict(submission_X_imputed)
# Extract the submission ids
submission_ids = submission["id"].values
# Create a dataframe with the submission ids and predicted yields
submission = pd.DataFrame({"id": submission_ids, "yield": submission_y})
# Save predictions to a CSV file
submission.to_csv("submission.csv", index=False)
submission
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/248/129248425.ipynb
|
wild-blueberry-yield-prediction-dataset
|
shashwatwork
|
[{"Id": 129248425, "ScriptId": 38424906, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7212220, "CreationDate": "05/12/2023 07:02:09", "VersionNumber": 1.0, "Title": "\"Blueberry Synthetic Data Analysis\"", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 588.0, "LinesInsertedFromPrevious": 588.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 9}]
|
[{"Id": 185126011, "KernelVersionId": 129248425, "SourceDatasetVersionId": 2462316}]
|
[{"Id": 2462316, "DatasetId": 1490445, "DatasourceVersionId": 2504743, "CreatorUserId": 1444085, "LicenseName": "Attribution 4.0 International (CC BY 4.0)", "CreationDate": "07/25/2021 17:48:21", "VersionNumber": 2.0, "Title": "Wild blueberry Yield Prediction Dataset", "Slug": "wild-blueberry-yield-prediction-dataset", "Subtitle": "Predict the yield of Wild Blueberry", "Description": "### Context\n\nBlueberries are perennial flowering plants with blue or purple berries. They are classified in the section Cyanococcus within the genus Vaccinium. Vaccinium also includes cranberries, bilberries, huckleberries, and Madeira blueberries. Commercial blueberries\u2014both wild (lowbush) and cultivated (highbush)\u2014are all native to North America. The highbush varieties were introduced into Europe during the 1930s.\n\nBlueberries are usually prostrate shrubs that can vary in size from 10 centimeters (4 inches) to 4 meters (13 feet) in height. In the commercial production of blueberries, the species with small, pea-size berries growing on low-level bushes are known as \"lowbush blueberries\" (synonymous with \"wild\"), while the species with larger berries growing on taller, cultivated bushes are known as \"highbush blueberries\". Canada is the leading producer of lowbush blueberries, while the United States produces some 40% of the world s supply of highbush blueberries.\n\n### Content\n\n\"The dataset used for predictive modeling was generated by the Wild Blueberry Pollination Simulation Model, which is an open-source, spatially-explicit computer simulation program that enables exploration of how various factors, including plant spatial arrangement, outcrossing and self-pollination, bee species compositions and weather conditions, in isolation and combination, affect pollination efficiency and yield of the wild blueberry agroecosystem. The simulation model has been validated by the field observation and experimental data collected in Maine USA and Canadian Maritimes during the last 30 years and now is a useful tool for hypothesis testing and theory development for wild blueberry pollination researches.\"\n\nFeatures \tUnit\tDescription\nClonesize\tm2\tThe average blueberry clone size in the field\nHoneybee\tbees/m2/min\tHoneybee density in the field\nBumbles\tbees/m2/min\tBumblebee density in the field\nAndrena\tbees/m2/min\tAndrena bee density in the field\nOsmia\tbees/m2/min\tOsmia bee density in the field\nMaxOfUpperTRange\t\u2103\tThe highest record of the upper band daily air temperature during the bloom season\nMinOfUpperTRange\t\u2103\tThe lowest record of the upper band daily air temperature\nAverageOfUpperTRange\t\u2103\tThe average of the upper band daily air temperature\nMaxOfLowerTRange\t\u2103\tThe highest record of the lower band daily air temperature\nMinOfLowerTRange\t\u2103\tThe lowest record of the lower band daily air temperature\nAverageOfLowerTRange\t\u2103\tThe average of the lower band daily air temperature\nRainingDays\tDay\tThe total number of days during the bloom season, each of which has precipitation larger than zero\nAverageRainingDays\tDay\tThe average of raining days of the entire bloom season\n\n### Acknowledgements\n\nQu, Hongchun; Obsie, Efrem; Drummond, Frank (2020), \u201cData for: Wild blueberry yield prediction using a combination of computer simulation and machine learning algorithms\u201d, Mendeley Data, V1, doi: 10.17632/p5hvjzsvn8.1\n\nDataset is outsourced from [here.](https://data.mendeley.com/datasets/p5hvjzsvn8/1)", "VersionNotes": "updated", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1490445, "CreatorUserId": 1444085, "OwnerUserId": 1444085.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2462316.0, "CurrentDatasourceVersionId": 2504743.0, "ForumId": 1510148, "Type": 2, "CreationDate": "07/25/2021 17:47:00", "LastActivityDate": "07/25/2021", "TotalViews": 11876, "TotalDownloads": 1130, "TotalVotes": 48, "TotalKernels": 82}]
|
[{"Id": 1444085, "UserName": "shashwatwork", "DisplayName": "Shashwat Tiwari", "RegisterDate": "11/24/2017", "PerformanceTier": 2}]
|
#
# "Blueberry Synthetic Data Analysis"
# 
# # **I. Introduction**
# * The Wild Blueberry Yield Prediction Competition is a challenge that requires participants to use a provided dataset to predict the yield of wild blueberries. The dataset used in this competition is derived from a deep learning model trained on the Wild Blueberry Yield Prediction Dataset, and it has feature distributions that are similar but not identical to the original. Participants are encouraged to explore the differences between the two datasets and evaluate whether incorporating the original dataset into their training could improve their model performance. The competition provides participants with a training dataset, a test dataset, and a sample submission file for them to submit their predictions.
# * Synthetic datasets are important for learning and practicing machine learning skills, as they provide a way to explore and experiment with different models and feature engineering ideas. The use of synthetic data also allows for the creation of datasets that are more representative of real-world data, as it can be difficult to collect and use real-world data in certain scenarios.
# # **II. Background**
# * Kaggle's Playground Series is a collection of challenges designed to provide lightweight challenges for the Kaggle community. These challenges are intended to help participants learn and sharpen their skills in different aspects of machine learning and data science.
# * Synthetic data is often used in these challenges, as it allows for the creation of interesting and complex datasets while maintaining privacy and security concerns. However, generating high-quality synthetic data can be a challenge, as it is important to ensure that the data is representative of real-world data and does not contain artifacts or biases.
# # **III. Data Description**
# The dataset for the competition involves predicting the yield of wild blueberries. The data was generated from a deep learning model trained on the Wild Blueberry Yield Prediction Dataset.
# The synthetic dataset has feature distributions that are similar but not identical to the original dataset. Participants are encouraged to explore the differences between the two datasets and incorporate the original dataset into their training to see if it improves model performance. The competition provides a training dataset, a test dataset, and a sample submission file for participants to submit their predictions. The goal is to strike a balance between having real-world data and ensuring test labels are not publicly available.
# |Features Unit Description:| |
# |---------------|:--------------------|
# |Clonesize m2: The average blueberry clone size in the field|
# |Honeybee bees/m2/min: Honeybee density in the field|
# |Bumbles bees/m2/min: Bumblebee density in the field|
# |Andrena bees/m2/min: Andrena bee density in the field|
# |Osmia bees/m2/min: Osmia bee density in the field|
# |MaxOfUpperTRange ℃ :The highest record of the upper band daily air temperature during the bloom season|
# |MinOfUpperTRange ℃ : The lowest record of the upper band daily air temperature|
# |AverageOfUpperTRange ℃ : The average of the upper band daily air temperature|
# |MaxOfLowerTRange ℃ : The highest record of the lower band daily air temperature|
# |MinOfLowerTRange ℃ : The lowest record of **the** lower band daily air temperature|
# |AverageOfLowerTRange ℃ : The average of the lower band daily air temperature|
# |RainingDays Day: The total number of days during the bloom season, each of which has precipitation larger than zero|
# |AverageRainingDays Day: The average of raining days of the entire bloom season|
# # **IV. Import Modules**
import pandas as pd
import seaborn as sns
import numpy as np
import matplotlib.pyplot as plt
import plotly.express as px
import plotly.graph_objects as go
plt.rcParams["figure.figsize"] = (12, 6)
plt.style.use("fivethirtyeight")
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.tree import DecisionTreeRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.metrics import mean_absolute_error
from sklearn.linear_model import LinearRegression
import dabl
import warnings
warnings.filterwarnings("ignore")
# # **V. Load the Dataset**
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Load the dataset `into` a Pandas DataFrame
orig_df = pd.read_csv(
"/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv"
).drop(columns="Row#")
orig_df.head().style.set_properties(
**{"background-color": "royalblue", "color": "black", "border-color": "#8b8c8c"}
)
train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
train.head().style.set_properties(
**{"background-color": "lightblue", "color": "black", "border-color": "#8b8c8c"}
)
test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
test.head().style.set_properties(
**{"background-color": "yellow", "color": "black", "border-color": "#8b8c8c"}
)
submission = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
submission.head().style.set_properties(
**{"background-color": "royalblue", "color": "black", "border-color": "#8b8c8c"}
)
# # **VI. Exploratory Data Analysis**
# * Perform exploratory data analysis on the dataset, including visualizations and statistical analysis
# * Explore the relationship between the features and the target variable (yield)
# #### Use the `info` method to check for missing values and data types
#
orig_df.info()
train.info()
test.info()
submission.info()
print("Shape of orig_df:", orig_df.shape)
print("Shape of train:", train.shape)
print("Shape of test:", test.shape)
print("Shape of submission:", submission.shape)
print("Number of duplicates in original dataset:", orig_df.duplicated().sum())
print("Number of duplicates in training dataset:", train.duplicated().sum())
print("Number of duplicates in testing dataset:", test.duplicated().sum())
print("Number of duplicates in submission dataset:", submission.duplicated().sum())
# drop id and as it is redundant
train = train.drop("id", axis=1)
# #### Checking the missing rows
train.isnull().sum()
orig_df.isnull().sum()
train.drop_duplicates()
train.hist(layout=(5, 4), figsize=(20, 15), bins=20)
plt.show()
dabl.plot(train, target_col="yield")
fig = px.scatter(
train,
x="MaxOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MaxOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MaxOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MinOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MinOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MinOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="AverageOfUpperTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. AverageOfUpperTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="AverageOfUpperTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MaxOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MaxOfLowerTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MaxOfLowerTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="MinOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="YlOrRd",
title="Yield vs. MinOfLowerTRange",
)
fig.update_layout(
title_font_size=20, xaxis_title="MinOfLowerTRange", yaxis_title="Yield"
)
fig.show()
fig = px.scatter(
train,
x="AverageOfLowerTRange",
y="yield",
color="yield",
color_continuous_scale="Viridis",
)
fig.show()
# #### Use the `describe` method to get summary statistics of the dataset
# Calculate the statistical of the dataset
styled_data = (
orig_df.describe()
.style.background_gradient(cmap="coolwarm")
.set_properties(**{"text-align": "center", "border": "1px solid black"})
)
# display styled data
display(styled_data)
# Calculate the statistical of the dataset
styled_data = (
train.describe()
.style.background_gradient(cmap="icefire")
.set_properties(**{"text-align": "center", "border": "1px solid black"})
)
# display styled data
display(styled_data)
# # **VII. Model Training and Evaluation**
# * Split the dataset into training and testing sets
# * Train and evaluate several machine learning models, such as:
# * Linear Regression
# * Decision Tree
# * Random Forest
# * Gradient Boosting
# * Neural Network
#
# * Compare the performance of the models using the Mean Absolute Error (MAE) metric
# Evaluation Submissions will be evaluated using Mean Absolute Error (MAE),
# 
# where each x_i represents the predicted target, y_i represents the ground truth and n is the number of rows in the test set.
# * Visualize the results using a Plotly scatter plot, where the x-axis is the predicted yield and the y-axis is the actual yield
# * Explore the impact of incorporating the original dataset into the training process.
# * Compare the performance of the models using the Mean Absolute Error (MAE) metric with original + synthetic data
import pandas as pd
from sklearn.datasets import make_regression
# load the train.csv dataset
orig_df = pd.read_csv(
"/kaggle/input/wild-blueberry-yield-prediction-dataset/WildBlueberryPollinationSimulationData.csv"
).drop(columns="Row#")
# set the random seed for reproducibility
random_seed = 1234
# define the number of synthetic samples to generate
num_samples = 1000
# generate synthetic data using scikit-learn's make_regression function
X, y = make_regression(
n_samples=num_samples,
n_features=len(orig_df.columns) - 1,
n_informative=len(orig_df.columns) - 1,
random_state=random_seed,
)
# create a DataFrame for the synthetic data
df_synth = pd.DataFrame(X, columns=orig_df.columns[:-1])
df_synth["yield"] = y
# save the synthetic data to a CSV file
df_synth.to_csv("synthetic_data.csv", index=False)
# Load synthetic dataset
df_synth = pd.read_csv("synthetic_data.csv")
df_synth.head().style.set_properties(
**{"background-color": "red", "color": "black", "border-color": "#8b8c8c"}
)
orig_df.drop_duplicates()
df_synth.drop_duplicates()
df_synth.info()
df_synth.shape
# Train and evaluate machine learning models using the synthetic dataset
# Split the dataset into training and testing sets
from sklearn.model_selection import train_test_split
X = df_synth.drop(["yield"], axis=1)
y = df_synth["yield"]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
# Train Linear Regression model
lr = LinearRegression()
lr.fit(X_train, y_train)
# Make predictions on test set using Linear Regression model
lr_pred = lr.predict(X_test)
# Compute MAE for Linear Regression model
lr_mae = mean_absolute_error(y_test, lr_pred)
print("Linear Regression MAE:", lr_mae)
# Train Decision Tree model
dt = DecisionTreeRegressor(random_state=42)
dt.fit(X_train, y_train)
# Make predictions on test set using Decision Tree model
dt_pred = dt.predict(X_test)
# Compute MAE for Decision Tree model
dt_mae = mean_absolute_error(y_test, dt_pred)
print("Decision Tree MAE:", dt_mae)
# Train Random Forest model
rf = RandomForestRegressor(n_estimators=100, random_state=42)
rf.fit(X_train, y_train)
# Make predictions on test set using Random Forest model
rf_pred = rf.predict(X_test)
# Compute MAE for Random Forest model
rf_mae = mean_absolute_error(y_test, rf_pred)
print("MAE of Random Forest:", rf_mae)
# Train Gradient Boosting model
gb = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb.fit(X_train, y_train)
# Make predictions on test set using Gradient Boosting model
gb_pred = gb.predict(X_test)
# Compute MAE for Gradient Boosting model
gb_mae = mean_absolute_error(y_test, gb_pred)
print("MAE of Gradient Boosting:", gb_mae)
# Train Neural Network model
nn = MLPRegressor(hidden_layer_sizes=(100, 50), max_iter=1000, random_state=42)
nn.fit(X_train, y_train)
# Make predictions on test set using Neural Network model
nn_pred = nn.predict(X_test)
# Compute MAE for Neural Network model
nn_mae = mean_absolute_error(y_test, nn_pred)
print("MAE of Neural Network:", nn_mae)
# Concatenate synthetic and original datasets
df_concat = pd.concat([df_synth, orig_df], ignore_index=True)
# Split concatenated dataset into training and test sets
X_train_concat, X_test_concat, y_train_concat, y_test_concat = train_test_split(
df_concat.drop("yield", axis=1), df_concat["yield"], test_size=0.2, random_state=42
)
from sklearn.impute import SimpleImputer
# Create an instance of SimpleImputer to replace NaN values with mean
imputer = SimpleImputer(strategy="mean")
# Fit imputer on training data and transform both training and test data
X_train_concat_imputed = imputer.fit_transform(X_train_concat)
X_test_concat_imputed = imputer.transform(X_test_concat)
# Train Linear Regression model with imputed data
lr_concat = LinearRegression()
lr_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Linear Regression model with imputed data
lr_pred_concat = lr_concat.predict(X_test_concat_imputed)
# Compute MAE for Linear Regression model with imputed data
lr_mae_concat = mean_absolute_error(y_test_concat, lr_pred_concat)
print(
"Mean Absolute Error (MAE) for Linear Regression model with concatenated dataset:",
lr_mae_concat,
)
# Train Decision Tree model with imputed data
dt_concat = DecisionTreeRegressor(random_state=42)
dt_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Decision Tree model with imputed data
dt_pred_concat = dt_concat.predict(X_test_concat_imputed)
# Compute MAE for Decision Tree model with imputed data
dt_mae_concat = mean_absolute_error(y_test_concat, dt_pred_concat)
print(
"Mean Absolute Error (MAE) for Decision Tree model with concatenated dataset:",
dt_mae_concat,
)
from sklearn.impute import SimpleImputer
# Train Random Forest model with imputed data
rf_concat = RandomForestRegressor(n_estimators=100, random_state=42)
rf_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Random Forest model with imputed data
rf_pred_concat = rf_concat.predict(X_test_concat_imputed)
# Compute MAE for Random Forest model with imputed data
rf_mae_concat = mean_absolute_error(y_test_concat, rf_pred_concat)
print(
"Mean Absolute Error (MAE) for Random Forest model with concatenated dataset:",
rf_mae_concat,
)
# Train Gradient Boosting model with concatenated dataset
gb_concat = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Gradient Boosting model with concatenated dataset
gb_pred_concat = gb_concat.predict(X_test_concat_imputed)
# Compute MAE for Gradient Boosting model with concatenated dataset
gb_mae_concat = mean_absolute_error(y_test_concat, gb_pred_concat)
print(
"Mean Absolute Error (MAE) for Gradient Boosting model with concatenated dataset:",
gb_mae_concat,
)
# Train Neural Network model with concatenated dataset
nn_concat = MLPRegressor(hidden_layer_sizes=(100, 50), max_iter=1000, random_state=42)
nn_concat.fit(X_train_concat_imputed, y_train_concat)
# Make predictions on test set using Neural Network model with concatenated dataset
nn_pred_concat = nn_concat.predict(X_test_concat_imputed)
# Compute MAE for Neural Network model with concatenated dataset
nn_mae_concat = mean_absolute_error(y_test_concat, nn_pred_concat)
print(
"Mean Absolute Error (MAE) for Neural Network model with concatenated dataset:",
nn_mae_concat,
)
import plotly.graph_objects as go
# Plot results
fig = px.scatter(
x=y_test_concat, y=rf_pred_concat, title="Random Forest: Synthetic Dataset"
)
fig.add_scatter(x=y_test_concat, y=gb_pred, mode="markers", name="Gradient Boosting")
fig.add_scatter(x=y_test_concat, y=nn_pred, mode="markers", name="Neural Network")
fig.add_scatter(
x=y_test_concat, y=lr_pred_concat, mode="markers", name="Linear Regression"
)
fig.add_scatter(x=y_test_concat, y=dt_pred_concat, mode="markers", name="Decision Tree")
fig.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig.show()
# Compare MAE
print("Linear Regression MAE:", mean_absolute_error(y_test, lr_pred))
print("Decision Tree MAE:", mean_absolute_error(y_test, dt_pred))
print("Random Forest MAE: ", mean_absolute_error(y_test, rf_pred))
print("Gradient Boosting MAE: ", mean_absolute_error(y_test, gb_pred))
print("Neural Network MAE: ", mean_absolute_error(y_test, nn_pred))
# Split data into training and test sets
train = orig_df.sample(frac=0.8, random_state=42)
test = orig_df.drop(train.index)
# Define features and target
features = [
"clonesize",
"honeybee",
"bumbles",
"andrena",
"osmia",
"MaxOfUpperTRange",
"MinOfUpperTRange",
"AverageOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
"AverageRainingDays",
"fruitset",
"fruitmass",
"seeds",
]
target = "yield"
# Train and evaluate models on test set
models = [
LinearRegression(),
DecisionTreeRegressor(),
RandomForestRegressor(n_estimators=100, random_state=42),
GradientBoostingRegressor(n_estimators=100, random_state=42),
MLPRegressor(random_state=42),
]
mae_scores = []
for model in models:
model.fit(train[features], train[target])
pred = model.predict(test[features])
mae = mean_absolute_error(test[target], pred)
mae_scores.append(mae)
# Train and evaluate models on original dataset + test set
train_orig = orig_df
train_concat = pd.concat([train, train_orig], axis=0)
X_train_concat = train_concat[features]
y_train_concat = train_concat[target]
X_test_concat = test[features]
y_test_concat = test[target]
models_concat = [
LinearRegression(),
DecisionTreeRegressor(),
RandomForestRegressor(n_estimators=100, random_state=42),
GradientBoostingRegressor(n_estimators=100, random_state=42),
MLPRegressor(random_state=42),
]
mae_scores_concat = []
for model in models_concat:
model.fit(X_train_concat, y_train_concat)
pred_concat = model.predict(X_test_concat)
mae_concat = mean_absolute_error(y_test_concat, pred_concat)
mae_scores_concat.append(mae_concat)
# Visualize results
fig = px.scatter(
x=test[target],
y=models[2].predict(test[features]),
title="Random Forest: Synthetic Dataset",
)
fig.add_scatter(
x=test[target],
y=models[3].predict(test[features]),
mode="markers",
name="Gradient Boosting",
)
fig.add_trace(
go.Scatter(
x=test[target],
y=models[4].predict(test[features]),
mode="markers",
name="Neural Network",
)
)
fig.add_trace(
go.Scatter(
x=[test[target].min(), test[target].max()],
y=[test[target].min(), test[target].max()],
mode="lines",
name="Perfect Fit",
)
)
fig.show()
# Visualize results for the concatenated dataset
fig_concat = px.scatter(
x=y_test_concat,
y=models_concat[2].predict(X_test_concat),
title="Random Forest: Original + Synthetic Dataset",
)
fig_concat.add_scatter(
x=y_test_concat,
y=models_concat[3].predict(X_test_concat),
mode="markers",
name="Gradient Boosting",
)
fig_concat.add_trace(
go.Scatter(
x=y_test_concat,
y=models_concat[4].predict(X_test_concat),
mode="markers",
name="Neural Network",
)
)
fig_concat.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig_concat.show()
# ## **Compare the performance of the models using the Mean Absolute Error (MAE) metric with original + synthetic data**
# Train and evaluate models with original + synthetic data
models_concat = []
mae_concat = []
# Linear Regression
lr_concat = LinearRegression()
lr_concat.fit(X_train_concat, y_train_concat)
lr_pred_concat = lr_concat.predict(X_test_concat)
lr_mae_concat = mean_absolute_error(y_test_concat, lr_pred_concat)
models_concat.append(lr_concat)
mae_concat.append(lr_mae_concat)
# Decision Tree
dt_concat = DecisionTreeRegressor(random_state=42)
dt_concat.fit(X_train_concat, y_train_concat)
dt_pred_concat = dt_concat.predict(X_test_concat)
dt_mae_concat = mean_absolute_error(y_test_concat, dt_pred_concat)
models_concat.append(dt_concat)
mae_concat.append(dt_mae_concat)
# Random Forest
rf_concat = RandomForestRegressor(n_estimators=100, random_state=42)
rf_concat.fit(X_train_concat, y_train_concat)
rf_pred_concat = rf_concat.predict(X_test_concat)
rf_mae_concat = mean_absolute_error(y_test_concat, rf_pred_concat)
models_concat.append(rf_concat)
mae_concat.append(rf_mae_concat)
# Gradient Boosting
gb_concat = GradientBoostingRegressor(n_estimators=100, random_state=42)
gb_concat.fit(X_train_concat, y_train_concat)
gb_pred_concat = gb_concat.predict(X_test_concat)
gb_mae_concat = mean_absolute_error(y_test_concat, gb_pred_concat)
models_concat.append(gb_concat)
mae_concat.append(gb_mae_concat)
# Neural Network
nn_concat = MLPRegressor(random_state=42)
nn_concat.fit(X_train_concat, y_train_concat)
nn_pred_concat = nn_concat.predict(X_test_concat)
nn_mae_concat = mean_absolute_error(y_test_concat, nn_pred_concat)
models_concat.append(nn_concat)
mae_concat.append(nn_mae_concat)
# Print MAE for each model
for i, model in enumerate(
[
"Linear Regression",
"Decision Tree",
"Random Forest",
"Gradient Boosting",
"Neural Network",
]
):
print(model + " MAE (with original + synthetic data): " + str(mae_concat[i]))
# Plot results for original + synthetic data
fig_concat = px.scatter(
x=y_test_concat,
y=models_concat[2].predict(X_test_concat),
title="Random Forest: Original + Synthetic Dataset",
)
fig_concat.add_scatter(
x=y_test_concat,
y=models_concat[3].predict(X_test_concat),
mode="markers",
name="Gradient Boosting",
)
fig_concat.add_trace(
go.Scatter(
x=y_test_concat,
y=models_concat[4].predict(X_test_concat),
mode="markers",
name="Neural Network",
)
)
fig_concat.add_trace(
go.Scatter(
x=[y_test_concat.min(), y_test_concat.max()],
y=[y_test_concat.min(), y_test_concat.max()],
mode="lines",
name="Perfect Fit",
)
)
fig_concat.show()
# # **VIII. Submission**
# Print MAE for each model
for i, model in enumerate(
[
"Linear Regression",
"Decision Tree",
"Random Forest",
"Gradient Boosting",
"Neural Network",
]
):
print(model + " MAE (with original + synthetic data): " + str(mae_concat[i]))
# Select model with lowest MAE on the test set
best_model_concat = models_concat[np.argmin(mae_concat)]
print("Best model (with original + synthetic data): " + str(best_model_concat))
# Get the feature names used to train the model
train_X = train.drop(target, axis=1)
feature_names = train_X.columns
# Check the feature names used to train the model
print(feature_names)
# Check the column names in the submission dataset
print(submission.columns)
# Make sure the feature names in the submission dataset match the ones used to train the model
submission_X = submission.reindex(columns=feature_names)
from sklearn.impute import SimpleImputer
# Create an imputer to replace missing values with the mean
imputer = SimpleImputer(strategy="mean")
# Fit the imputer on the training data
imputer.fit(X_train_concat)
# Transform the submission dataset using the imputer
submission_X_imputed = imputer.transform(submission_X)
# Make predictions on the submission dataset using the best model
submission_y = best_model_concat.predict(submission_X_imputed)
# Extract the submission ids
submission_ids = submission["id"].values
# Create a dataframe with the submission ids and predicted yields
submission = pd.DataFrame({"id": submission_ids, "yield": submission_y})
# Save predictions to a CSV file
submission.to_csv("submission.csv", index=False)
submission
| false | 4 | 7,377 | 9 | 8,113 | 7,377 |
||
129248478
|
<jupyter_start><jupyter_text>Uber Request Data.csv
### Context
This dataset is a part of assignment given by IIITB and Upgrad for Data Science Course.
### Content
This data set is a masked data set which is similar to what data analysts at Uber handle.
Kaggle dataset identifier: uber-request-data
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/uber-request-data/Uber Request Data.csv")
df.head()
df.tail()
df.describe()
df.shape
df.info()
# Convert Request_timestamp to uniform datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
df["Drop timestamp"] = pd.to_datetime(df["Drop timestamp"])
df.info()
df.isnull().sum()
df.Status.value_counts()
# # 1. Which date had the most completed trip durig the two week period?
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by date and calculate the number of completed trips and the mean of trip duration on each date
completed_trips_by_date = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1D"))
.agg({"is_completed": "sum", "trip_duration": "mean"})
)
# Find the date with the highest number of completed trips and the mean of completed trip duration on that date
max_completed_trips_date = completed_trips_by_date["is_completed"].idxmax()
max_completed_trips = completed_trips_by_date["is_completed"].max()
mean_trip_duration = completed_trips_by_date.loc[
max_completed_trips_date, "trip_duration"
]
print("The date with the most completed trips is:", max_completed_trips_date)
print("The number of completed trips on that date is:", max_completed_trips)
print("The mean of completed trip duration on that date is:", mean_trip_duration)
import matplotlib.pyplot as plt
import seaborn as sns
# Group the data by hour and calculate the number of completed trips in each hour
completed_trips_by_hour = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1H"))
.sum()["is_completed"]
)
# Calculate the daily total of completed trips
completed_trips_by_day = completed_trips_by_hour.resample("D").sum()
# Create a line plot of the completed trips over time
sns.lineplot(x=completed_trips_by_day.index, y=completed_trips_by_day.values)
plt.xlabel("Date")
plt.ylabel("Number of Completed Trips")
plt.title("Completed Trips over Time")
plt.show()
# ### Insights:
# - The date with the most completed trips is: 7th Nov, 2016
# - The date with second highest completed trips is: Dec, 2016
# - The number of completed trips on that date is: 601
# - The mean of completed trip duration on that date is: 1372.5707154742097
# # 2. What was the highest no. of completed trips within a 24 hour period?
df.columns
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by hour and calculate the number of completed trips in each hour
completed_trips_by_hour = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1H"))
.sum()["is_completed"]
)
# Find the highest number of completed trips and the date when it occurred
max_completed_trips = completed_trips_by_hour.max()
max_completed_trips_date = completed_trips_by_hour.idxmax()
print(
"The highest number of completed trips within a 24-hour period is:",
max_completed_trips,
)
print(
"The date when the highest number of completed trips occurred is:",
max_completed_trips_date,
)
# Plot the number of completed trips by hour
completed_trips_by_hour.plot(kind="line")
# Set the plot title and axis labels
plt.title("Completed trips by hour")
plt.xlabel("Hour")
plt.ylabel("Number of completed trips")
# Show the plot
plt.show()
# ### Insights-
# - The highest number of completed trips within a 24-hour period is: 47
# - The date when the highest number of completed trips occurred is: 2016-11-07 09:00:00
# # 3.What was the highest no. of completed trips within a 24 hour period of a driver?
import pandas as pd
# Filter the data to only include the relevant columns and completed trips
df = df[
[
"Request id",
"Pickup point",
"Driver id",
"Status",
"Request timestamp",
"Drop timestamp",
"trip_duration",
"is_completed",
]
]
completed_trips = df[df["Status"] == "Completed"]
# Convert the Request timestamp column to datetime format
completed_trips["Request timestamp"] = pd.to_datetime(
completed_trips["Request timestamp"]
)
# Create a new column for the date of the request
completed_trips["Request date"] = completed_trips["Request timestamp"].dt.date
# Group the data by driver and date, and count the number of completed trips for each group
completed_trips_by_driver_date = completed_trips.groupby(["Driver id", "Request date"])[
"Request id"
].count()
# Reset the index of the completed_trips_by_driver_date DataFrame
completed_trips_by_driver_date = completed_trips_by_driver_date.reset_index()
# Group the data by driver and date, and calculate the rolling sum of completed trips for each driver
completed_trips_by_driver = (
completed_trips_by_driver_date.groupby("Driver id")["Request id"]
.rolling(window=24)
.sum()
)
# Find the maximum number of completed trips in any 24 hour period for each driver
max_completed_trips_by_driver = completed_trips_by_driver.groupby("Driver id").max()
# Check if the max_completed_trips_by_driver Series is empty
if max_completed_trips_by_driver.empty:
print("There were no completed trips in any 24 hour period for any driver.")
else:
# Find the driver with the highest number of completed trips in any 24 hour period
driver_with_max_completed_trips = max_completed_trips_by_driver.idxmax()
# Print the result
print(
f"The driver with the highest number of completed trips in any 24 hour period is {driver_with_max_completed_trips}, with a maximum of {max_completed_trips_by_driver.max()} completed trips."
)
# Filter the DataFrame to include only completed trips during the 24-hour period
start_time = max_completed_trips_date
end_time = start_time + pd.Timedelta(hours=24)
completed_trips_period = df[
(df["is_completed"] == 1)
& (df["Request timestamp"] >= start_time)
& (df["Request timestamp"] < end_time)
]
# Group the data by driver ID and calculate the sum of completed trips for each driver
completed_trips_by_driver = completed_trips_period.groupby("Driver id")[
"is_completed"
].sum()
# Sort the results in descending order and select the top three drivers
top_drivers = completed_trips_by_driver.sort_values(ascending=False).head(3)
print(
"Top drivers with the highest number of completed trips during the 24-hour period when the highest number of completed trips occurred:"
)
print(top_drivers)
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by driver and date, and count the number of completed trips
completed_trips_by_driver_and_date = (
df[df["is_completed"] == 1]
.groupby(["Driver id", pd.Grouper(key="Request timestamp", freq="D")])[
"is_completed"
]
.sum()
)
# Find the highest number of completed trips for any driver in a 24-hour period
max_completed_trips_in_24_hours = (
completed_trips_by_driver_and_date.groupby("Driver id")
.rolling(window=24)
.sum()
.max()
)
print(
"The highest number of completed trips within a 24-hour period of a driver is:",
max_completed_trips_in_24_hours,
)
print(completed_trips_by_driver)
# # 4.Which hour of the day had the most requests during the two week period?
# Extract the hour from requested timestamp
df["Request hour"] = df["Request timestamp"].dt.hour
df.head()
import matplotlib.pyplot as plt
import seaborn as sns
# Group the data by hour and count the number of requests in each hour
requests_by_hour = df.groupby("Request hour")["Request id"].count()
# Find the hour with the most requests
max_requests_hour = requests_by_hour.idxmax()
print(
"The hour with the most requests during the two week period is:", max_requests_hour
)
# Select the top 3 frequencies
top_3 = hour_freq_sorted.head(3)
# Create a bar plot of the requests by hour
ax = sns.barplot(x=requests_by_hour.index, y=requests_by_hour.values)
ax.set(
xlabel="Hour of the Day",
ylabel="Number of Requests",
title="Requests by Hour of the Day",
)
# Loop through the top 3 frequencies and add a text label to the corresponding bar
for hour, freq in top_3.iteritems():
plt.text(hour, freq, str(freq), ha="center", va="bottom", color="Green")
plt.show()
# ### Insights
# - The highest peak hours for requests occurred in the evening from 6-8 PM.
# - The hour with the most requests during the two week period was 6 PM, with 510 requests.
# - The second highest peak hour was 8 PM with 492 requests.
# - The third highest peak hour was 7 PM with 473 requests.
# # 5. What percent of all zeros during the two week period occurend on weekends( Friday at 5 p.m to Sunday at 3 p.m)?
df.sample(40)
df.columns
import pandas as pd
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
import pandas as pd
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Check if the 'Response time' column is present in the DataFrame
if "Response time" in df.columns:
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
else:
print("Error: 'Response time' column not found in DataFrame.")
df.columns
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Check if the 'Response time' column is present in the DataFrame
if "Response time" in df.columns:
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
else:
print("Error: 'Response time' column not found in DataFrame.")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/248/129248478.ipynb
|
uber-request-data
|
anupammajhi
|
[{"Id": 129248478, "ScriptId": 38419899, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11189402, "CreationDate": "05/12/2023 07:02:33", "VersionNumber": 2.0, "Title": "Uber Data Analysis", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 332.0, "LinesInsertedFromPrevious": 206.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 126.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185126084, "KernelVersionId": 129248478, "SourceDatasetVersionId": 182068}]
|
[{"Id": 182068, "DatasetId": 78953, "DatasourceVersionId": 192927, "CreatorUserId": 1125300, "LicenseName": "Unknown", "CreationDate": "11/17/2018 23:22:01", "VersionNumber": 1.0, "Title": "Uber Request Data.csv", "Slug": "uber-request-data", "Subtitle": "For Uber Supply Demand Gap - EDA", "Description": "### Context\n\nThis dataset is a part of assignment given by IIITB and Upgrad for Data Science Course.\n\n\n### Content\n\nThis data set is a masked data set which is similar to what data analysts at Uber handle.\n\n\n### Acknowledgements\n\nSources are taken from the PGD Data Science course from Upgrad", "VersionNotes": "Initial release", "TotalCompressedBytes": 395061.0, "TotalUncompressedBytes": 395061.0}]
|
[{"Id": 78953, "CreatorUserId": 1125300, "OwnerUserId": 1125300.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 182068.0, "CurrentDatasourceVersionId": 192927.0, "ForumId": 88310, "Type": 2, "CreationDate": "11/17/2018 23:22:01", "LastActivityDate": "11/17/2018", "TotalViews": 36167, "TotalDownloads": 4630, "TotalVotes": 32, "TotalKernels": 15}]
|
[{"Id": 1125300, "UserName": "anupammajhi", "DisplayName": "Anupam Majhi", "RegisterDate": "06/14/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv("/kaggle/input/uber-request-data/Uber Request Data.csv")
df.head()
df.tail()
df.describe()
df.shape
df.info()
# Convert Request_timestamp to uniform datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
df["Drop timestamp"] = pd.to_datetime(df["Drop timestamp"])
df.info()
df.isnull().sum()
df.Status.value_counts()
# # 1. Which date had the most completed trip durig the two week period?
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by date and calculate the number of completed trips and the mean of trip duration on each date
completed_trips_by_date = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1D"))
.agg({"is_completed": "sum", "trip_duration": "mean"})
)
# Find the date with the highest number of completed trips and the mean of completed trip duration on that date
max_completed_trips_date = completed_trips_by_date["is_completed"].idxmax()
max_completed_trips = completed_trips_by_date["is_completed"].max()
mean_trip_duration = completed_trips_by_date.loc[
max_completed_trips_date, "trip_duration"
]
print("The date with the most completed trips is:", max_completed_trips_date)
print("The number of completed trips on that date is:", max_completed_trips)
print("The mean of completed trip duration on that date is:", mean_trip_duration)
import matplotlib.pyplot as plt
import seaborn as sns
# Group the data by hour and calculate the number of completed trips in each hour
completed_trips_by_hour = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1H"))
.sum()["is_completed"]
)
# Calculate the daily total of completed trips
completed_trips_by_day = completed_trips_by_hour.resample("D").sum()
# Create a line plot of the completed trips over time
sns.lineplot(x=completed_trips_by_day.index, y=completed_trips_by_day.values)
plt.xlabel("Date")
plt.ylabel("Number of Completed Trips")
plt.title("Completed Trips over Time")
plt.show()
# ### Insights:
# - The date with the most completed trips is: 7th Nov, 2016
# - The date with second highest completed trips is: Dec, 2016
# - The number of completed trips on that date is: 601
# - The mean of completed trip duration on that date is: 1372.5707154742097
# # 2. What was the highest no. of completed trips within a 24 hour period?
df.columns
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by hour and calculate the number of completed trips in each hour
completed_trips_by_hour = (
df[df["is_completed"] == 1]
.groupby(pd.Grouper(key="Request timestamp", freq="1H"))
.sum()["is_completed"]
)
# Find the highest number of completed trips and the date when it occurred
max_completed_trips = completed_trips_by_hour.max()
max_completed_trips_date = completed_trips_by_hour.idxmax()
print(
"The highest number of completed trips within a 24-hour period is:",
max_completed_trips,
)
print(
"The date when the highest number of completed trips occurred is:",
max_completed_trips_date,
)
# Plot the number of completed trips by hour
completed_trips_by_hour.plot(kind="line")
# Set the plot title and axis labels
plt.title("Completed trips by hour")
plt.xlabel("Hour")
plt.ylabel("Number of completed trips")
# Show the plot
plt.show()
# ### Insights-
# - The highest number of completed trips within a 24-hour period is: 47
# - The date when the highest number of completed trips occurred is: 2016-11-07 09:00:00
# # 3.What was the highest no. of completed trips within a 24 hour period of a driver?
import pandas as pd
# Filter the data to only include the relevant columns and completed trips
df = df[
[
"Request id",
"Pickup point",
"Driver id",
"Status",
"Request timestamp",
"Drop timestamp",
"trip_duration",
"is_completed",
]
]
completed_trips = df[df["Status"] == "Completed"]
# Convert the Request timestamp column to datetime format
completed_trips["Request timestamp"] = pd.to_datetime(
completed_trips["Request timestamp"]
)
# Create a new column for the date of the request
completed_trips["Request date"] = completed_trips["Request timestamp"].dt.date
# Group the data by driver and date, and count the number of completed trips for each group
completed_trips_by_driver_date = completed_trips.groupby(["Driver id", "Request date"])[
"Request id"
].count()
# Reset the index of the completed_trips_by_driver_date DataFrame
completed_trips_by_driver_date = completed_trips_by_driver_date.reset_index()
# Group the data by driver and date, and calculate the rolling sum of completed trips for each driver
completed_trips_by_driver = (
completed_trips_by_driver_date.groupby("Driver id")["Request id"]
.rolling(window=24)
.sum()
)
# Find the maximum number of completed trips in any 24 hour period for each driver
max_completed_trips_by_driver = completed_trips_by_driver.groupby("Driver id").max()
# Check if the max_completed_trips_by_driver Series is empty
if max_completed_trips_by_driver.empty:
print("There were no completed trips in any 24 hour period for any driver.")
else:
# Find the driver with the highest number of completed trips in any 24 hour period
driver_with_max_completed_trips = max_completed_trips_by_driver.idxmax()
# Print the result
print(
f"The driver with the highest number of completed trips in any 24 hour period is {driver_with_max_completed_trips}, with a maximum of {max_completed_trips_by_driver.max()} completed trips."
)
# Filter the DataFrame to include only completed trips during the 24-hour period
start_time = max_completed_trips_date
end_time = start_time + pd.Timedelta(hours=24)
completed_trips_period = df[
(df["is_completed"] == 1)
& (df["Request timestamp"] >= start_time)
& (df["Request timestamp"] < end_time)
]
# Group the data by driver ID and calculate the sum of completed trips for each driver
completed_trips_by_driver = completed_trips_period.groupby("Driver id")[
"is_completed"
].sum()
# Sort the results in descending order and select the top three drivers
top_drivers = completed_trips_by_driver.sort_values(ascending=False).head(3)
print(
"Top drivers with the highest number of completed trips during the 24-hour period when the highest number of completed trips occurred:"
)
print(top_drivers)
# Calculate the duration of each trip in minutes
df["trip_duration"] = (
df["Drop timestamp"] - df["Request timestamp"]
).dt.total_seconds() / 60
# Add a new column 'is_completed' that indicates whether a trip is completed or not
df["is_completed"] = df["Status"].apply(lambda x: 1 if x == "Trip Completed" else 0)
# Group the data by driver and date, and count the number of completed trips
completed_trips_by_driver_and_date = (
df[df["is_completed"] == 1]
.groupby(["Driver id", pd.Grouper(key="Request timestamp", freq="D")])[
"is_completed"
]
.sum()
)
# Find the highest number of completed trips for any driver in a 24-hour period
max_completed_trips_in_24_hours = (
completed_trips_by_driver_and_date.groupby("Driver id")
.rolling(window=24)
.sum()
.max()
)
print(
"The highest number of completed trips within a 24-hour period of a driver is:",
max_completed_trips_in_24_hours,
)
print(completed_trips_by_driver)
# # 4.Which hour of the day had the most requests during the two week period?
# Extract the hour from requested timestamp
df["Request hour"] = df["Request timestamp"].dt.hour
df.head()
import matplotlib.pyplot as plt
import seaborn as sns
# Group the data by hour and count the number of requests in each hour
requests_by_hour = df.groupby("Request hour")["Request id"].count()
# Find the hour with the most requests
max_requests_hour = requests_by_hour.idxmax()
print(
"The hour with the most requests during the two week period is:", max_requests_hour
)
# Select the top 3 frequencies
top_3 = hour_freq_sorted.head(3)
# Create a bar plot of the requests by hour
ax = sns.barplot(x=requests_by_hour.index, y=requests_by_hour.values)
ax.set(
xlabel="Hour of the Day",
ylabel="Number of Requests",
title="Requests by Hour of the Day",
)
# Loop through the top 3 frequencies and add a text label to the corresponding bar
for hour, freq in top_3.iteritems():
plt.text(hour, freq, str(freq), ha="center", va="bottom", color="Green")
plt.show()
# ### Insights
# - The highest peak hours for requests occurred in the evening from 6-8 PM.
# - The hour with the most requests during the two week period was 6 PM, with 510 requests.
# - The second highest peak hour was 8 PM with 492 requests.
# - The third highest peak hour was 7 PM with 473 requests.
# # 5. What percent of all zeros during the two week period occurend on weekends( Friday at 5 p.m to Sunday at 3 p.m)?
df.sample(40)
df.columns
import pandas as pd
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
import pandas as pd
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Check if the 'Response time' column is present in the DataFrame
if "Response time" in df.columns:
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
else:
print("Error: 'Response time' column not found in DataFrame.")
df.columns
# Convert the Request timestamp column to datetime format
df["Request timestamp"] = pd.to_datetime(df["Request timestamp"])
# Create a new column for the day of the week
df["Day of Week"] = df["Request timestamp"].dt.day_name()
# Create a new column for the hour of the day
df["Request hour"] = df["Request timestamp"].dt.hour
# Check if the 'Response time' column is present in the DataFrame
if "Response time" in df.columns:
# Count the number of zeros during the two week period
num_zeros_total = (df["Response time"] == 0).sum()
# Filter the data to only include weekends (Friday 5pm to Sunday 3pm)
weekend_mask = (
((df["Day of Week"] == "Friday") & (df["Request hour"] >= 17))
| (df["Day of Week"].isin(["Saturday", "Sunday"]))
| ((df["Day of Week"] == "Sunday") & (df["Request hour"] < 15))
)
num_zeros_weekend = ((df["Response time"] == 0) & weekend_mask).sum()
# Calculate the percent of all zeros that occurred on weekends
percent_zeros_weekend = (num_zeros_weekend / num_zeros_total) * 100
print(
f"{percent_zeros_weekend:.2f}% of all zeros during the two week period occurred on weekends."
)
else:
print("Error: 'Response time' column not found in DataFrame.")
| false | 1 | 4,012 | 0 | 4,091 | 4,012 |
||
129248538
|
<jupyter_start><jupyter_text>Food101
Kaggle dataset identifier: food101
<jupyter_script># # 🤔 **I decided to perform Multiclass Classification on all 101 Classes of Food101 Dataset using CNN. To get the best split(which yields better accuracy and loss) I decided to use StratifiedKFold Cross Validation by applying 10 Folds, But as my model has to train on 101 class viz. 101,000 images, it's taking half an hour to run a single epoch even after enabling GPU!**
# # 🙂 **So, I decided to keep this code as it is, those who have higher GPU computation power on their own device, can run this code and look at the accuracy score and loss metric and can even try improving upon my model**
# # 🤗 **Would be Happy if somebody who try this out(using CNN and not Transfer Learning) can acknowledge me whether if there's any error in my logic or any other!**
# # In my other Notebook, I have performed the same using Transfer Learning
# # ⏬ **Importing all required Libraries**
import os
import random
# %matplotlib inline # for printing the images in line!
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input,
Dense,
Flatten,
Conv2D,
MaxPool2D,
AveragePooling2D,
Dropout,
)
from tensorflow.keras.callbacks import EarlyStopping
# # 🛠️ **Splitting Images folder**
# ### **To get all the images from each folder of all 101 classes present in "Images" Directory, I have applied the below logic where the 1st loop will run 101 times and try to get all the 101 folder names and the 2nd folder will run 1000 times each time the 1st loop would run, to get all the 1000 images from each class(folder). Later then... Storing all the images in "all_images" (list) and all the labels into "target_label" list!**
# ### **This is how we can get 101,000 images in single variable with their corresponding labels in target_label (list)!**
images_folder_path = "/kaggle/input/food101/food-101/images"
images_folder_list = sorted(os.listdir("/kaggle/input/food101/food-101/images"))
all_images = []
target_label = []
for i in range(len(images_folder_list)):
x = (
images_folder_path + "/" + images_folder_list[i]
) # for i = 0, /kaggle/input/food101/food-101/images/apple_pie
y = sorted(os.listdir(x))
for j in range(
len(y)
): # for j = 0, so this loop will run 1000 times for each iteration i
sub_folder_files = (
x + "/" + y[j]
) # for j = 0, /kaggle/input/food101/food-101/images/apple_pie/1005649.jpg
all_images.append(sub_folder_files)
target_label.append(
images_folder_list[i]
) # so we'll get the label of that particular image as well!
# # 👀 **Let's look at now... the length of "all_images" and "target_label" variables**
print(
"Length of all images:",
len(all_images),
"\nLength of all target_label:",
len(target_label),
)
# # 🔃 **Converting text_target_labels to integer form... (for the machine to read, understand and interpret easily)**
le = LabelEncoder()
encoded_target_label = le.fit_transform(target_label)
print(
"Encoded_Target_Label is:",
encoded_target_label,
"\nLength of it is:",
len(encoded_target_label),
)
# ## **The below 2 cells are most important, for logic reference!**
x = images_folder_path + "/" + images_folder_list[0]
len(os.listdir(x)), x, sorted(os.listdir(x))[0]
y = images_folder_path + "/" + images_folder_list[0] + "/" + sorted(os.listdir(x))[0]
y
# #### **Testing code and visualizing firstafall on single image...**
x = images_folder_path + "/" + images_folder_list[0]
y = random.sample(os.listdir(x), 1)
# y[0]
img = mpimg.imread(x + "/" + y[0])
plt.imshow(img)
img
with open("/kaggle/input/food101/food-101/meta/labels.txt", "r") as f:
labels_for_visualization = f.read().strip().split("\n")
# # 💁🏼 **Visualizing on each image from every class...**
# let's visualize a random image 1 from each class
def view_random_image_from_each_class(images_folder_path):
"""
Print/Show each image from each class thereby show 101 classes randomly generated each image
"""
plt.figure(figsize=(4, 12), dpi=500)
for i in range(len(images_folder_list)):
ax = plt.subplot(17, 6, i + 1)
# Set up the target class folder
class_folder = images_folder_path + "/" + images_folder_list[i]
# print(class_folder)
# Get the random image path
random_image = random.sample(os.listdir(class_folder), 1)
# Read in the image and plot it using matplotlib
img = mpimg.imread(
class_folder + "/" + random_image[0]
) # this will return image tensor
# random_image[0] --> as to convert the list back into string
# Now let's visualize it using imshow() function
plt.imshow(img)
plt.axis(False)
ax.set_title(labels_for_visualization[i], fontsize=3.5)
ax.text(0, 1.06, f"Shape: {img.shape}", fontsize=3, transform=ax.transAxes)
plt.tight_layout()
view_random_image_from_each_class("/kaggle/input/food101/food-101/images")
# # 🔁 **Converting into DataFrame**
# ### Now let's convert the "all_images" and "encoded_target_label" variable whose type is (list) into the DataFrame to make a Structured (like) dataset and thereby later we can use the method "flow_from_dataframe" from "ImageDataGenerator" class.
df = pd.DataFrame({"filepath": all_images, "label": encoded_target_label}, dtype="str")
df
# # ✏️ **Create ImageDataGenerator Objects...**
# Augmented Image Generator
train_datagen_augmented = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=260,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.4,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest",
)
# Non-Augmented Image Generator
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
# # 📏 **Doing train_test_split**
# ### As I am unable to perform Stratified K Fold Cross Validation due to lack of GPU power and time as well!
X_train, X_test, y_train, y_test = train_test_split(
df["filepath"], df["label"], test_size=0.1, random_state=42
)
len(X_train), len(X_test), type(X_train), type(y_train)
# # **Converting again back into DataFrame**
# ### As from above output we found that, our dataframe has been converted back into Series
X_train = X_train.to_frame()
X_test = X_test.to_frame()
y_train = y_train.to_frame()
y_test = y_test.to_frame()
X_train.index == y_train.index
X_train
y_train
# # 🔗 **Merge (Input_Train, Input Target) & (Valid_Input, Valid_Target)**
train_df = pd.merge(X_train, y_train, left_index=True, right_index=True)
val_df = pd.merge(X_test, y_test, left_index=True, right_index=True)
train_df
val_df
# # 〰️ **Flowing images from DataFrame**
# Augmented Train Data...
train_data_augmented = train_datagen_augmented.flow_from_dataframe(
dataframe=train_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# Non-Augmented Train Data...
train_data = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# Non-Augmented Test Data...
test_data = test_datagen.flow_from_dataframe(
dataframe=val_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# # 👀 **Visualizing our images and Shapes...**
# For Visualization purpose, Unpacking the tuples and knowing the shape, len etc. of augmented/non_augmented images and labels
augmented_images, augmented_labels = train_data_augmented.next()
non_augmented_images, non_augmented_labels = train_data.next()
print(
"Augmented Image shape:",
augmented_images.shape,
"\nNon-Augmented Image shape:",
non_augmented_images.shape,
"\nAugmented Images Labels are:",
augmented_labels,
"\nNon Augmented Images Labels are:",
non_augmented_labels,
)
# Using matplotlib library to visualize our images...
random_number = random.randint(0, 32) # because our batch size are 32
print(f"Showing Randomly Generated image number: {random_number}")
# setting up the figure and axes...
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(4, 2), dpi=170)
axs[0].imshow(
non_augmented_images[random_number]
) # from train_data images, select randomly
axs[0].set_title(f"Original image")
axs[0].axis(True)
axs[1].imshow(augmented_images[random_number])
axs[1].set_title(f"Augmented image")
axs[1].axis(True)
plt.tight_layout()
plt.show()
print("===" * 30)
# # 🏋️♂️ **Creating a CNN Model using Functional API**
# Creating a model now using Functional API rather than Sequential API...
input_layer = Input(shape=(224, 224, 3))
Layer_1 = Conv2D(filters=10, kernel_size=3, activation="relu")(input_layer)
Layer_2 = MaxPool2D(pool_size=2, padding="valid")(Layer_1)
Layer_3 = Conv2D(filters=10, kernel_size=3, activation="relu")(Layer_2)
Layer_4 = AveragePooling2D(pool_size=2, padding="valid")(Layer_3)
Layer_5 = Flatten()(Layer_4)
output_layer = Dense(101, activation="softmax")(Layer_5)
# Defining the model by specifying the input and output layers...
model = Model(inputs=input_layer, outputs=output_layer)
# Compiling the model...
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
# fitting upon the model...
history = model.fit(
train_data_augmented,
epochs=5,
steps_per_epoch=len(train_data_augmented),
validation_data=test_data,
validation_steps=len(test_data),
)
# # 📈 **Checking the Accuracy of our model...**
# Now, let's plot the curve between losses and accuracy and epochs...
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
# using matplotlib library, let's plot the curves now...
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5, 2), dpi=150)
axes[0].plot(epochs, accuracy, "r", label="Training Accuracy")
axes[0].plot(epochs, val_accuracy, "b", label="Validation Accuracy")
axes[0].set_title("Training v/s Validation Accuracy")
axes[0].legend()
axes[1].plot(epochs, loss, "r", label="Training Loss")
axes[1].plot(epochs, val_loss, "b", label="Validation Loss")
axes[1].set_title("Training v/s Validation Loss")
axes[1].legend()
plt.show()
# # 🥲 **The below entire commented out because though it is workable, it is taking 2.5 hours for each fold and on an average 30 mins for each epoch, so... as 10 folds I have mentioned, it will take 25 hours to run!**
# # Defining the Stratified K Fold Cross Validator
# skf = StratifiedKFold(n_splits=10, shuffle=True)
# fold_no = 1
# for train_index, val_index in skf.split(df['filepath'], df['label']):
# train_df = df.iloc[train_index]
# val_df = df.iloc[val_index]
# # print(type(train_df))
# # print(train_index, val_index)
# # Augmented Train Data...
# train_data_augmented = train_datagen_augmented.flow_from_dataframe(dataframe=train_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224, 224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # Non-Augmented Train Data...
# train_data = train_datagen.flow_from_dataframe(dataframe=train_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224,224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # Non-Augmented Test Data...
# test_data = test_datagen.flow_from_dataframe(dataframe=val_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224,224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # For Visualization purpose, Unpacking the tuples and knowing the shape, len etc. of augmented/non_augmented images and labels
# augmented_images, augmented_labels = train_data_augmented.next()
# non_augmented_images, non_augmented_labels = train_data.next()
# print("Augmented Image shape:", augmented_images.shape,
# "\nNon-Augmented Image shape:", non_augmented_images.shape,
# "\nAugmented Images Labels are:", augmented_labels,
# "\nNon Augmented Images Labels are:", non_augmented_labels)
# # Using matplotlib library to visualize our images...
# random_number = random.randint(0, 32) # because our batch size are 32
# print(f"Showing Randomly Generated image number: {random_number}")
# # setting up the figure and axes...
# fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(4,2), dpi=170)
# axs[0].imshow(non_augmented_images[random_number]) # from train_data images, select randomly
# axs[0].set_title(f"Original image")
# axs[0].axis(True)
# axs[1].imshow(augmented_images[random_number])
# axs[1].set_title(f"Augmented image")
# axs[1].axis(True)
# plt.tight_layout()
# plt.show()
# print("==="*30)
# # Creating a model now using Functional API rather than Sequential API...
# input_layer = Input(shape=(224, 224, 3))
# Layer_1 = Conv2D(filters=10, kernel_size=3, activation='relu')(input_layer)
# Layer_2 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_1)
# Layer_3 = MaxPool2D(pool_size=2, padding='valid')(Layer_2)
# Layer_4 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_3)
# Layer_5 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_4)
# Layer_6 = AveragePooling2D(pool_size=2, padding='valid')(Layer_5)
# Layer_7 = Flatten()(Layer_6)
# output_layer = Dense(101, activation='softmax')(Layer_7)
# # Defining the model by specifying the input and output layers...
# model = Model(inputs=input_layer, outputs=output_layer)
# # Compiling the model...
# model.compile(loss='categorical_crossentropy',
# optimizer=tf.keras.optimizers.Adam(lr=0.001),
# metrics=['accuracy'])
# # fitting upon the model...
# history = model.fit(train_data_augmented,
# epochs=5,
# steps_per_epoch=len(train_data_augmented),
# validation_data=test_data,
# validation_steps=len(test_data))
# # Now, let's plot the curve between losses and accuracy and epochs...
# accuracy = history.history['accuracy']
# val_accuracy = history.history['val_accuracy']
# loss = history.history['loss']
# val_loss = history.history['val_loss']
# epochs = range(len(accuracy))
# # using matplotlib library, let's plot the curves now...
# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5,2), dpi=150)
# axes[0].plot(epochs, accuracy, 'r', label='Training Accuracy')
# axes[0].plot(epochs, val_accuracy, 'b', label='Validation Accuracy')
# axes[0].set_title("Training v/s Validation Accuracy")
# axes[0].legend()
# axes[1].plot(epochs, loss, 'r', label='Training Loss')
# axes[1].plot(epochs, val_loss, 'b', label='Validation Loss')
# axes[1].set_title("Training v/s Validation Loss")
# axes[1].legend()
# plt.show()
# fold_no = fold_no + 1
# print("\n===> Train Data Length chosen:", len(train_index),
# ", Validation Data Length chosen:", len(val_index), "\n")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/248/129248538.ipynb
|
food101
|
crybread
|
[{"Id": 129248538, "ScriptId": 38259051, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10685390, "CreationDate": "05/12/2023 07:03:05", "VersionNumber": 1.0, "Title": "CNN Multiclass on FOOD101 (All 101 classes)", "EvaluationDate": "05/12/2023", "IsChange": true, "TotalLines": 399.0, "LinesInsertedFromPrevious": 399.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 185126182, "KernelVersionId": 129248538, "SourceDatasetVersionId": 4961490}]
|
[{"Id": 4961490, "DatasetId": 2877506, "DatasourceVersionId": 5029907, "CreatorUserId": 11995483, "LicenseName": "Unknown", "CreationDate": "02/07/2023 18:18:20", "VersionNumber": 1.0, "Title": "Food101", "Slug": "food101", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2877506, "CreatorUserId": 11995483, "OwnerUserId": 11995483.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 4961490.0, "CurrentDatasourceVersionId": 5029907.0, "ForumId": 2913940, "Type": 2, "CreationDate": "02/07/2023 18:18:20", "LastActivityDate": "02/07/2023", "TotalViews": 222, "TotalDownloads": 13, "TotalVotes": 3, "TotalKernels": 1}]
|
[{"Id": 11995483, "UserName": "crybread", "DisplayName": "CryBread", "RegisterDate": "10/18/2022", "PerformanceTier": 0}]
|
# # 🤔 **I decided to perform Multiclass Classification on all 101 Classes of Food101 Dataset using CNN. To get the best split(which yields better accuracy and loss) I decided to use StratifiedKFold Cross Validation by applying 10 Folds, But as my model has to train on 101 class viz. 101,000 images, it's taking half an hour to run a single epoch even after enabling GPU!**
# # 🙂 **So, I decided to keep this code as it is, those who have higher GPU computation power on their own device, can run this code and look at the accuracy score and loss metric and can even try improving upon my model**
# # 🤗 **Would be Happy if somebody who try this out(using CNN and not Transfer Learning) can acknowledge me whether if there's any error in my logic or any other!**
# # In my other Notebook, I have performed the same using Transfer Learning
# # ⏬ **Importing all required Libraries**
import os
import random
# %matplotlib inline # for printing the images in line!
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import pandas as pd
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Model
from tensorflow.keras.layers import (
Input,
Dense,
Flatten,
Conv2D,
MaxPool2D,
AveragePooling2D,
Dropout,
)
from tensorflow.keras.callbacks import EarlyStopping
# # 🛠️ **Splitting Images folder**
# ### **To get all the images from each folder of all 101 classes present in "Images" Directory, I have applied the below logic where the 1st loop will run 101 times and try to get all the 101 folder names and the 2nd folder will run 1000 times each time the 1st loop would run, to get all the 1000 images from each class(folder). Later then... Storing all the images in "all_images" (list) and all the labels into "target_label" list!**
# ### **This is how we can get 101,000 images in single variable with their corresponding labels in target_label (list)!**
images_folder_path = "/kaggle/input/food101/food-101/images"
images_folder_list = sorted(os.listdir("/kaggle/input/food101/food-101/images"))
all_images = []
target_label = []
for i in range(len(images_folder_list)):
x = (
images_folder_path + "/" + images_folder_list[i]
) # for i = 0, /kaggle/input/food101/food-101/images/apple_pie
y = sorted(os.listdir(x))
for j in range(
len(y)
): # for j = 0, so this loop will run 1000 times for each iteration i
sub_folder_files = (
x + "/" + y[j]
) # for j = 0, /kaggle/input/food101/food-101/images/apple_pie/1005649.jpg
all_images.append(sub_folder_files)
target_label.append(
images_folder_list[i]
) # so we'll get the label of that particular image as well!
# # 👀 **Let's look at now... the length of "all_images" and "target_label" variables**
print(
"Length of all images:",
len(all_images),
"\nLength of all target_label:",
len(target_label),
)
# # 🔃 **Converting text_target_labels to integer form... (for the machine to read, understand and interpret easily)**
le = LabelEncoder()
encoded_target_label = le.fit_transform(target_label)
print(
"Encoded_Target_Label is:",
encoded_target_label,
"\nLength of it is:",
len(encoded_target_label),
)
# ## **The below 2 cells are most important, for logic reference!**
x = images_folder_path + "/" + images_folder_list[0]
len(os.listdir(x)), x, sorted(os.listdir(x))[0]
y = images_folder_path + "/" + images_folder_list[0] + "/" + sorted(os.listdir(x))[0]
y
# #### **Testing code and visualizing firstafall on single image...**
x = images_folder_path + "/" + images_folder_list[0]
y = random.sample(os.listdir(x), 1)
# y[0]
img = mpimg.imread(x + "/" + y[0])
plt.imshow(img)
img
with open("/kaggle/input/food101/food-101/meta/labels.txt", "r") as f:
labels_for_visualization = f.read().strip().split("\n")
# # 💁🏼 **Visualizing on each image from every class...**
# let's visualize a random image 1 from each class
def view_random_image_from_each_class(images_folder_path):
"""
Print/Show each image from each class thereby show 101 classes randomly generated each image
"""
plt.figure(figsize=(4, 12), dpi=500)
for i in range(len(images_folder_list)):
ax = plt.subplot(17, 6, i + 1)
# Set up the target class folder
class_folder = images_folder_path + "/" + images_folder_list[i]
# print(class_folder)
# Get the random image path
random_image = random.sample(os.listdir(class_folder), 1)
# Read in the image and plot it using matplotlib
img = mpimg.imread(
class_folder + "/" + random_image[0]
) # this will return image tensor
# random_image[0] --> as to convert the list back into string
# Now let's visualize it using imshow() function
plt.imshow(img)
plt.axis(False)
ax.set_title(labels_for_visualization[i], fontsize=3.5)
ax.text(0, 1.06, f"Shape: {img.shape}", fontsize=3, transform=ax.transAxes)
plt.tight_layout()
view_random_image_from_each_class("/kaggle/input/food101/food-101/images")
# # 🔁 **Converting into DataFrame**
# ### Now let's convert the "all_images" and "encoded_target_label" variable whose type is (list) into the DataFrame to make a Structured (like) dataset and thereby later we can use the method "flow_from_dataframe" from "ImageDataGenerator" class.
df = pd.DataFrame({"filepath": all_images, "label": encoded_target_label}, dtype="str")
df
# # ✏️ **Create ImageDataGenerator Objects...**
# Augmented Image Generator
train_datagen_augmented = ImageDataGenerator(
rescale=1.0 / 255,
rotation_range=260,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.4,
horizontal_flip=True,
vertical_flip=True,
fill_mode="nearest",
)
# Non-Augmented Image Generator
train_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
# # 📏 **Doing train_test_split**
# ### As I am unable to perform Stratified K Fold Cross Validation due to lack of GPU power and time as well!
X_train, X_test, y_train, y_test = train_test_split(
df["filepath"], df["label"], test_size=0.1, random_state=42
)
len(X_train), len(X_test), type(X_train), type(y_train)
# # **Converting again back into DataFrame**
# ### As from above output we found that, our dataframe has been converted back into Series
X_train = X_train.to_frame()
X_test = X_test.to_frame()
y_train = y_train.to_frame()
y_test = y_test.to_frame()
X_train.index == y_train.index
X_train
y_train
# # 🔗 **Merge (Input_Train, Input Target) & (Valid_Input, Valid_Target)**
train_df = pd.merge(X_train, y_train, left_index=True, right_index=True)
val_df = pd.merge(X_test, y_test, left_index=True, right_index=True)
train_df
val_df
# # 〰️ **Flowing images from DataFrame**
# Augmented Train Data...
train_data_augmented = train_datagen_augmented.flow_from_dataframe(
dataframe=train_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# Non-Augmented Train Data...
train_data = train_datagen.flow_from_dataframe(
dataframe=train_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# Non-Augmented Test Data...
test_data = test_datagen.flow_from_dataframe(
dataframe=val_df,
directory="/kaggle/input/food101/food-101/images",
x_col="filepath",
y_col="label",
target_size=(224, 224),
batch_size=32,
class_mode="categorical",
shuffle=True,
)
# # 👀 **Visualizing our images and Shapes...**
# For Visualization purpose, Unpacking the tuples and knowing the shape, len etc. of augmented/non_augmented images and labels
augmented_images, augmented_labels = train_data_augmented.next()
non_augmented_images, non_augmented_labels = train_data.next()
print(
"Augmented Image shape:",
augmented_images.shape,
"\nNon-Augmented Image shape:",
non_augmented_images.shape,
"\nAugmented Images Labels are:",
augmented_labels,
"\nNon Augmented Images Labels are:",
non_augmented_labels,
)
# Using matplotlib library to visualize our images...
random_number = random.randint(0, 32) # because our batch size are 32
print(f"Showing Randomly Generated image number: {random_number}")
# setting up the figure and axes...
fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(4, 2), dpi=170)
axs[0].imshow(
non_augmented_images[random_number]
) # from train_data images, select randomly
axs[0].set_title(f"Original image")
axs[0].axis(True)
axs[1].imshow(augmented_images[random_number])
axs[1].set_title(f"Augmented image")
axs[1].axis(True)
plt.tight_layout()
plt.show()
print("===" * 30)
# # 🏋️♂️ **Creating a CNN Model using Functional API**
# Creating a model now using Functional API rather than Sequential API...
input_layer = Input(shape=(224, 224, 3))
Layer_1 = Conv2D(filters=10, kernel_size=3, activation="relu")(input_layer)
Layer_2 = MaxPool2D(pool_size=2, padding="valid")(Layer_1)
Layer_3 = Conv2D(filters=10, kernel_size=3, activation="relu")(Layer_2)
Layer_4 = AveragePooling2D(pool_size=2, padding="valid")(Layer_3)
Layer_5 = Flatten()(Layer_4)
output_layer = Dense(101, activation="softmax")(Layer_5)
# Defining the model by specifying the input and output layers...
model = Model(inputs=input_layer, outputs=output_layer)
# Compiling the model...
model.compile(
loss="categorical_crossentropy",
optimizer=tf.keras.optimizers.Adam(lr=0.001),
metrics=["accuracy"],
)
# fitting upon the model...
history = model.fit(
train_data_augmented,
epochs=5,
steps_per_epoch=len(train_data_augmented),
validation_data=test_data,
validation_steps=len(test_data),
)
# # 📈 **Checking the Accuracy of our model...**
# Now, let's plot the curve between losses and accuracy and epochs...
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(len(accuracy))
# using matplotlib library, let's plot the curves now...
fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5, 2), dpi=150)
axes[0].plot(epochs, accuracy, "r", label="Training Accuracy")
axes[0].plot(epochs, val_accuracy, "b", label="Validation Accuracy")
axes[0].set_title("Training v/s Validation Accuracy")
axes[0].legend()
axes[1].plot(epochs, loss, "r", label="Training Loss")
axes[1].plot(epochs, val_loss, "b", label="Validation Loss")
axes[1].set_title("Training v/s Validation Loss")
axes[1].legend()
plt.show()
# # 🥲 **The below entire commented out because though it is workable, it is taking 2.5 hours for each fold and on an average 30 mins for each epoch, so... as 10 folds I have mentioned, it will take 25 hours to run!**
# # Defining the Stratified K Fold Cross Validator
# skf = StratifiedKFold(n_splits=10, shuffle=True)
# fold_no = 1
# for train_index, val_index in skf.split(df['filepath'], df['label']):
# train_df = df.iloc[train_index]
# val_df = df.iloc[val_index]
# # print(type(train_df))
# # print(train_index, val_index)
# # Augmented Train Data...
# train_data_augmented = train_datagen_augmented.flow_from_dataframe(dataframe=train_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224, 224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # Non-Augmented Train Data...
# train_data = train_datagen.flow_from_dataframe(dataframe=train_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224,224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # Non-Augmented Test Data...
# test_data = test_datagen.flow_from_dataframe(dataframe=val_df,
# directory='/kaggle/input/food101/food-101/images',
# x_col='filepath',
# y_col='label',
# target_size=(224,224),
# batch_size=32,
# class_mode='categorical',
# shuffle=True)
# # For Visualization purpose, Unpacking the tuples and knowing the shape, len etc. of augmented/non_augmented images and labels
# augmented_images, augmented_labels = train_data_augmented.next()
# non_augmented_images, non_augmented_labels = train_data.next()
# print("Augmented Image shape:", augmented_images.shape,
# "\nNon-Augmented Image shape:", non_augmented_images.shape,
# "\nAugmented Images Labels are:", augmented_labels,
# "\nNon Augmented Images Labels are:", non_augmented_labels)
# # Using matplotlib library to visualize our images...
# random_number = random.randint(0, 32) # because our batch size are 32
# print(f"Showing Randomly Generated image number: {random_number}")
# # setting up the figure and axes...
# fig, axs = plt.subplots(nrows=1, ncols=2, figsize=(4,2), dpi=170)
# axs[0].imshow(non_augmented_images[random_number]) # from train_data images, select randomly
# axs[0].set_title(f"Original image")
# axs[0].axis(True)
# axs[1].imshow(augmented_images[random_number])
# axs[1].set_title(f"Augmented image")
# axs[1].axis(True)
# plt.tight_layout()
# plt.show()
# print("==="*30)
# # Creating a model now using Functional API rather than Sequential API...
# input_layer = Input(shape=(224, 224, 3))
# Layer_1 = Conv2D(filters=10, kernel_size=3, activation='relu')(input_layer)
# Layer_2 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_1)
# Layer_3 = MaxPool2D(pool_size=2, padding='valid')(Layer_2)
# Layer_4 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_3)
# Layer_5 = Conv2D(filters=10, kernel_size=3, activation='relu')(Layer_4)
# Layer_6 = AveragePooling2D(pool_size=2, padding='valid')(Layer_5)
# Layer_7 = Flatten()(Layer_6)
# output_layer = Dense(101, activation='softmax')(Layer_7)
# # Defining the model by specifying the input and output layers...
# model = Model(inputs=input_layer, outputs=output_layer)
# # Compiling the model...
# model.compile(loss='categorical_crossentropy',
# optimizer=tf.keras.optimizers.Adam(lr=0.001),
# metrics=['accuracy'])
# # fitting upon the model...
# history = model.fit(train_data_augmented,
# epochs=5,
# steps_per_epoch=len(train_data_augmented),
# validation_data=test_data,
# validation_steps=len(test_data))
# # Now, let's plot the curve between losses and accuracy and epochs...
# accuracy = history.history['accuracy']
# val_accuracy = history.history['val_accuracy']
# loss = history.history['loss']
# val_loss = history.history['val_loss']
# epochs = range(len(accuracy))
# # using matplotlib library, let's plot the curves now...
# fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(5,2), dpi=150)
# axes[0].plot(epochs, accuracy, 'r', label='Training Accuracy')
# axes[0].plot(epochs, val_accuracy, 'b', label='Validation Accuracy')
# axes[0].set_title("Training v/s Validation Accuracy")
# axes[0].legend()
# axes[1].plot(epochs, loss, 'r', label='Training Loss')
# axes[1].plot(epochs, val_loss, 'b', label='Validation Loss')
# axes[1].set_title("Training v/s Validation Loss")
# axes[1].legend()
# plt.show()
# fold_no = fold_no + 1
# print("\n===> Train Data Length chosen:", len(train_index),
# ", Validation Data Length chosen:", len(val_index), "\n")
| false | 0 | 5,151 | 5 | 5,172 | 5,151 |
||
129702862
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
import matplotlib.pyplot as plt
url = "https://raw.githubusercontent.com/logan-lauton/Capsicum-Research/main/data/cap_data_github.csv"
cap_data = pd.read_csv(url)
cap_data
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Satisfaction_growing_plants"],
bins=range(1, 7),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Satisfaction of Growers", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Level of Satisfaction")
plt.ylabel("# of Responses")
plt.xticks(
range(2, 6), ["Dissatisfied", "Neutral", "Satisfied", "Completely Satisfied"]
)
plt.show()
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Familiar_with_capsicum"],
bins=range(2, 6),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Capsicum Knowledge of Growers", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Level of Familiarity")
plt.ylabel("# of Responses")
plt.xticks(range(3, 6), ["Heard of", "Familiar", "Knowledgeable"])
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Save_seeds"],
bins=range(1, 7),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Do Growers Save Seeds?", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Are Seeds Saved?")
plt.ylabel("# of Responses")
plt.xticks(
range(1, 6),
["Completely Disagree", "Disagree", "Neutral", "Agree", "Completely Agree"],
)
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/702/129702862.ipynb
| null | null |
[{"Id": 129702862, "ScriptId": 38562348, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14335370, "CreationDate": "05/15/2023 21:58:48", "VersionNumber": 1.0, "Title": "notebookbcd8a6445d", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 67.0, "LinesInsertedFromPrevious": 67.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
import matplotlib.pyplot as plt
url = "https://raw.githubusercontent.com/logan-lauton/Capsicum-Research/main/data/cap_data_github.csv"
cap_data = pd.read_csv(url)
cap_data
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Satisfaction_growing_plants"],
bins=range(1, 7),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Satisfaction of Growers", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Level of Satisfaction")
plt.ylabel("# of Responses")
plt.xticks(
range(2, 6), ["Dissatisfied", "Neutral", "Satisfied", "Completely Satisfied"]
)
plt.show()
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Familiar_with_capsicum"],
bins=range(2, 6),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Capsicum Knowledge of Growers", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Level of Familiarity")
plt.ylabel("# of Responses")
plt.xticks(range(3, 6), ["Heard of", "Familiar", "Knowledgeable"])
sns.set_style("whitegrid")
plt.figure(figsize=(10, 6))
plt.hist(
cap_data["Save_seeds"],
bins=range(1, 7),
rwidth=0.8,
color="lightgrey",
edgecolor="black",
)
plt.title("Do Growers Save Seeds?", loc="left", fontsize=14, fontweight="bold")
plt.xlabel("Are Seeds Saved?")
plt.ylabel("# of Responses")
plt.xticks(
range(1, 6),
["Completely Disagree", "Disagree", "Neutral", "Agree", "Completely Agree"],
)
plt.show()
| false | 0 | 712 | 0 | 712 | 712 |
||
129702705
|
<jupyter_start><jupyter_text>Iris Flower Dataset
### Context
The Iris flower data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. The data set consists of 50 samples from each of three species of Iris (Iris Setosa, Iris virginica, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.
This dataset became a typical test case for many statistical classification techniques in machine learning such as support vector machines
### Content
The dataset contains a set of 150 records under 5 attributes - Petal Length, Petal Width, Sepal Length, Sepal width and Class(Species).
Kaggle dataset identifier: iris-flower-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('iris-flower-dataset/IRIS.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 150 entries, 0 to 149
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sepal_length 150 non-null float64
1 sepal_width 150 non-null float64
2 petal_length 150 non-null float64
3 petal_width 150 non-null float64
4 species 150 non-null object
dtypes: float64(4), object(1)
memory usage: 6.0+ KB
<jupyter_text>Examples:
{
"sepal_length": 5.1,
"sepal_width": 3.5,
"petal_length": 1.4,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.9,
"sepal_width": 3.0,
"petal_length": 1.4,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.7,
"sepal_width": 3.2,
"petal_length": 1.3,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.6,
"sepal_width": 3.1,
"petal_length": 1.5,
"petal_width": 0.2,
"species": "Iris-setosa"
}
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch.nn as nn
from torch.utils.data import TensorDataset
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# following the book ml with scikit and pytorch
iris_data = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
iris_data.columns
X = iris_data[["sepal_length", "sepal_width", "petal_length", "petal_width"]].copy()
y = iris_data["species"].copy()
X.head()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train_norm = (X_train - np.mean(X_train)) / np.std(X_train)
X_train_norm = X_train_norm.to_numpy()
X_train_norm = torch.from_numpy(X_train_norm).float()
y_train = torch.from_numpy(y_train)
train_ds = TensorDataset(X_train_norm, y_train)
torch.manual_seed(1)
batch_size = 2
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.layer1 = nn.Linear(input_size, hidden_size)
self.layer2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.layer1(x)
x = nn.Sigmoid()(x)
x = self.layer2(x)
x = nn.Softmax(dim=1)(x)
return x
input_size = X_train_norm.shape[1]
hidden_size = 16
output_size = 3
model = Model(input_size, hidden_size, output_size)
learning_rate = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
num_epochs = 100
loss_hist = [0] * num_epochs
accuracy_hist = [0] * num_epochs
for epoch in range(num_epochs):
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist[epoch] += loss.item() * y_batch.size(0)
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist[epoch] += is_correct.mean()
loss_hist[epoch] /= len(train_dl.dataset)
accuracy_hist[epoch] /= len(train_dl.dataset)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(loss_hist, lw=3)
ax.set_title("Training Loss", size=15)
ax.set_xlabel("Epoch", size=15)
ax.tick_params(axis="both", which="major", labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(accuracy_hist, lw=3)
ax.set_title("Training accuracy", size=15)
ax.set_xlabel("Epoch", size=15)
ax.tick_params(axis="both", which="major", labelsize=15)
plt.show()
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
X_test_norm = X_test_norm.to_numpy()
X_test_norm = torch.from_numpy(X_test_norm).float()
y_test = torch.from_numpy(y_test)
pred_test = model(X_test_norm)
correct = (torch.argmax(pred_test, dim=1) == y_test).float()
accuracy = correct.mean()
print(f"Test Acc.: {accuracy:.3f}")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/702/129702705.ipynb
|
iris-flower-dataset
|
arshid
|
[{"Id": 129702705, "ScriptId": 38563976, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12364689, "CreationDate": "05/15/2023 21:56:30", "VersionNumber": 2.0, "Title": "pytorch mlp iris flower classification", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 116.0, "LinesInsertedFromPrevious": 2.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 114.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
|
[{"Id": 186033088, "KernelVersionId": 129702705, "SourceDatasetVersionId": 23404}]
|
[{"Id": 23404, "DatasetId": 17860, "DatasourceVersionId": 23408, "CreatorUserId": 1272228, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2018 15:18:06", "VersionNumber": 1.0, "Title": "Iris Flower Dataset", "Slug": "iris-flower-dataset", "Subtitle": "Iris flower data set used for multi-class classification.", "Description": "### Context\n\nThe Iris flower data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. The data set consists of 50 samples from each of three species of Iris (Iris Setosa, Iris virginica, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.\n\nThis dataset became a typical test case for many statistical classification techniques in machine learning such as support vector machines\n\n\n### Content\n\nThe dataset contains a set of 150 records under 5 attributes - Petal Length, Petal Width, Sepal Length, Sepal width and Class(Species).\n\n\n### Acknowledgements\n\nThis dataset is free and is publicly available at the UCI Machine Learning Repository", "VersionNotes": "Initial release", "TotalCompressedBytes": 4617.0, "TotalUncompressedBytes": 4617.0}]
|
[{"Id": 17860, "CreatorUserId": 1272228, "OwnerUserId": 1272228.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23404.0, "CurrentDatasourceVersionId": 23408.0, "ForumId": 25592, "Type": 2, "CreationDate": "03/22/2018 15:18:06", "LastActivityDate": "03/22/2018", "TotalViews": 467985, "TotalDownloads": 107813, "TotalVotes": 688, "TotalKernels": 925}]
|
[{"Id": 1272228, "UserName": "arshid", "DisplayName": "MathNerd", "RegisterDate": "09/17/2017", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import torch.nn as nn
from torch.utils.data import TensorDataset
import torch
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# following the book ml with scikit and pytorch
iris_data = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
iris_data.columns
X = iris_data[["sepal_length", "sepal_width", "petal_length", "petal_width"]].copy()
y = iris_data["species"].copy()
X.head()
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
y = le.fit_transform(y)
y
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=1)
X_train_norm = (X_train - np.mean(X_train)) / np.std(X_train)
X_train_norm = X_train_norm.to_numpy()
X_train_norm = torch.from_numpy(X_train_norm).float()
y_train = torch.from_numpy(y_train)
train_ds = TensorDataset(X_train_norm, y_train)
torch.manual_seed(1)
batch_size = 2
train_dl = DataLoader(train_ds, batch_size, shuffle=True)
class Model(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super().__init__()
self.layer1 = nn.Linear(input_size, hidden_size)
self.layer2 = nn.Linear(hidden_size, output_size)
def forward(self, x):
x = self.layer1(x)
x = nn.Sigmoid()(x)
x = self.layer2(x)
x = nn.Softmax(dim=1)(x)
return x
input_size = X_train_norm.shape[1]
hidden_size = 16
output_size = 3
model = Model(input_size, hidden_size, output_size)
learning_rate = 0.001
loss_fn = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
num_epochs = 100
loss_hist = [0] * num_epochs
accuracy_hist = [0] * num_epochs
for epoch in range(num_epochs):
for x_batch, y_batch in train_dl:
pred = model(x_batch)
loss = loss_fn(pred, y_batch)
loss.backward()
optimizer.step()
optimizer.zero_grad()
loss_hist[epoch] += loss.item() * y_batch.size(0)
is_correct = (torch.argmax(pred, dim=1) == y_batch).float()
accuracy_hist[epoch] += is_correct.mean()
loss_hist[epoch] /= len(train_dl.dataset)
accuracy_hist[epoch] /= len(train_dl.dataset)
fig = plt.figure(figsize=(12, 5))
ax = fig.add_subplot(1, 2, 1)
ax.plot(loss_hist, lw=3)
ax.set_title("Training Loss", size=15)
ax.set_xlabel("Epoch", size=15)
ax.tick_params(axis="both", which="major", labelsize=15)
ax = fig.add_subplot(1, 2, 2)
ax.plot(accuracy_hist, lw=3)
ax.set_title("Training accuracy", size=15)
ax.set_xlabel("Epoch", size=15)
ax.tick_params(axis="both", which="major", labelsize=15)
plt.show()
X_test_norm = (X_test - np.mean(X_train)) / np.std(X_train)
X_test_norm = X_test_norm.to_numpy()
X_test_norm = torch.from_numpy(X_test_norm).float()
y_test = torch.from_numpy(y_test)
pred_test = model(X_test_norm)
correct = (torch.argmax(pred_test, dim=1) == y_test).float()
accuracy = correct.mean()
print(f"Test Acc.: {accuracy:.3f}")
|
[{"iris-flower-dataset/IRIS.csv": {"column_names": "[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"species\"]", "column_data_types": "{\"sepal_length\": \"float64\", \"sepal_width\": \"float64\", \"petal_length\": \"float64\", \"petal_width\": \"float64\", \"species\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 sepal_length 150 non-null float64\n 1 sepal_width 150 non-null float64\n 2 petal_length 150 non-null float64\n 3 petal_width 150 non-null float64\n 4 species 150 non-null object \ndtypes: float64(4), object(1)\nmemory usage: 6.0+ KB\n", "summary": "{\"sepal_length\": {\"count\": 150.0, \"mean\": 5.843333333333334, \"std\": 0.828066127977863, \"min\": 4.3, \"25%\": 5.1, \"50%\": 5.8, \"75%\": 6.4, \"max\": 7.9}, \"sepal_width\": {\"count\": 150.0, \"mean\": 3.0540000000000003, \"std\": 0.4335943113621737, \"min\": 2.0, \"25%\": 2.8, \"50%\": 3.0, \"75%\": 3.3, \"max\": 4.4}, \"petal_length\": {\"count\": 150.0, \"mean\": 3.758666666666666, \"std\": 1.7644204199522626, \"min\": 1.0, \"25%\": 1.6, \"50%\": 4.35, \"75%\": 5.1, \"max\": 6.9}, \"petal_width\": {\"count\": 150.0, \"mean\": 1.1986666666666668, \"std\": 0.7631607417008411, \"min\": 0.1, \"25%\": 0.3, \"50%\": 1.3, \"75%\": 1.8, \"max\": 2.5}}", "examples": "{\"sepal_length\":{\"0\":5.1,\"1\":4.9,\"2\":4.7,\"3\":4.6},\"sepal_width\":{\"0\":3.5,\"1\":3.0,\"2\":3.2,\"3\":3.1},\"petal_length\":{\"0\":1.4,\"1\":1.4,\"2\":1.3,\"3\":1.5},\"petal_width\":{\"0\":0.2,\"1\":0.2,\"2\":0.2,\"3\":0.2},\"species\":{\"0\":\"Iris-setosa\",\"1\":\"Iris-setosa\",\"2\":\"Iris-setosa\",\"3\":\"Iris-setosa\"}}"}}]
| true | 1 |
<start_data_description><data_path>iris-flower-dataset/IRIS.csv:
<column_names>
['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
<column_types>
{'sepal_length': 'float64', 'sepal_width': 'float64', 'petal_length': 'float64', 'petal_width': 'float64', 'species': 'object'}
<dataframe_Summary>
{'sepal_length': {'count': 150.0, 'mean': 5.843333333333334, 'std': 0.828066127977863, 'min': 4.3, '25%': 5.1, '50%': 5.8, '75%': 6.4, 'max': 7.9}, 'sepal_width': {'count': 150.0, 'mean': 3.0540000000000003, 'std': 0.4335943113621737, 'min': 2.0, '25%': 2.8, '50%': 3.0, '75%': 3.3, 'max': 4.4}, 'petal_length': {'count': 150.0, 'mean': 3.758666666666666, 'std': 1.7644204199522626, 'min': 1.0, '25%': 1.6, '50%': 4.35, '75%': 5.1, 'max': 6.9}, 'petal_width': {'count': 150.0, 'mean': 1.1986666666666668, 'std': 0.7631607417008411, 'min': 0.1, '25%': 0.3, '50%': 1.3, '75%': 1.8, 'max': 2.5}}
<dataframe_info>
RangeIndex: 150 entries, 0 to 149
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sepal_length 150 non-null float64
1 sepal_width 150 non-null float64
2 petal_length 150 non-null float64
3 petal_width 150 non-null float64
4 species 150 non-null object
dtypes: float64(4), object(1)
memory usage: 6.0+ KB
<some_examples>
{'sepal_length': {'0': 5.1, '1': 4.9, '2': 4.7, '3': 4.6}, 'sepal_width': {'0': 3.5, '1': 3.0, '2': 3.2, '3': 3.1}, 'petal_length': {'0': 1.4, '1': 1.4, '2': 1.3, '3': 1.5}, 'petal_width': {'0': 0.2, '1': 0.2, '2': 0.2, '3': 0.2}, 'species': {'0': 'Iris-setosa', '1': 'Iris-setosa', '2': 'Iris-setosa', '3': 'Iris-setosa'}}
<end_description>
| 1,248 | 5 | 1,947 | 1,248 |
129702071
|
<jupyter_start><jupyter_text>Real Estate DataSet
Concerns housing values in suburbs of Boston.
5. Number of Instances: 506
6. Number of Attributes: 13 continuous attributes (including "class"
attribute "MEDV"), 1 binary-valued attribute.
7. Attribute Information:
1. CRIM per capita crime rate by town
2. ZN proportion of residential land zoned for lots over
25,000 sq.ft.
3. INDUS proportion of non-retail business acres per town
4. CHAS Charles River dummy variable (= 1 if tract bounds
river; 0 otherwise)
5. NOX nitric oxides concentration (parts per 10 million)
6. RM average number of rooms per dwelling
7. AGE proportion of owner-occupied units built prior to 1940
8. DIS weighted distances to five Boston employment centres
9. RAD index of accessibility to radial highways
10. TAX full-value property-tax rate per $10,000
11. PTRATIO pupil-teacher ratio by town
12. B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks
by town
13. LSTAT % lower status of the population
14. MEDV Median value of owner-occupied homes in $1000's
8. Missing Attribute Values: None.
Kaggle dataset identifier: real-estate-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **#Price Predictor**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
housing = pd.read_csv("/kaggle/input/real-estate-dataset/data.csv")
housing.head()
housing.info()
housing["CHAS"].value_counts()
housing.describe()
# housing.hist(bins = 50, figsize = (20,25))
# # Train-test split
np.random.seed(42)
def split_train_test(data, test_ratio):
shuffled = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
print(shuffled)
test_indices = shuffled[:test_set_size]
train_indices = shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(housing, 0.2)
## print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n")
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n")
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["CHAS"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set
strat_test_set["CHAS"].value_counts()
strat_train_set["CHAS"].value_counts()
housing = strat_train_set.copy()
# ## Looking for Correlation
corr_matrix = housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes = ["MEDV", "RM", "ZN", "LSTAT"]
scatter_matrix(housing[attributes], figsize=(12, 8))
housing.plot(kind="scatter", x="RM", y="MEDV", alpha=0.9)
# # **## Attribute combinations**
housing["TAXRM"] = housing["TAX"] / housing["RM"]
housing.head()
corr_matrix = housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
housing.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=0.8)
housing = strat_train_set.drop("MEDV", axis=1)
housing_labels = strat_train_set["MEDV"].copy()
# # Missing attributes
## To take care of missing values, you have three options:
## 1 = Get rid off the missing data points.
## 2 = Get rid off the attribute. (cannot do this if the attribute is highly negetive or positively correlated to our target variable)
## 3 = Set the value to (0, mean or median).
# a = housing.dropna(subset=["RM"]) #option 1
# a.shape
# housing.info()
# housing.drop("RM",axis =1) option 2
# note that RM column wont be there and original data is unchanged
# housing.info()
median = housing["RM"].median() # option 3
median
housing["RM"].fillna(median)
housing.describe() # before we started imputer
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
imputer.fit(housing)
imputer.statistics_
X = imputer.transform(housing)
housing_tr = pd.DataFrame(X, columns=housing.columns)
housing_tr.describe()
# # Scikit-learn design
# Primarily, three types of objects
# 1. Estimators - It Estimates some parameter based on a dataset. Eg. Imputer It has a fit method and transform method.Fit method - Fits the dataset and calculates internal parameters
# 2. Transformers - transform method takes input and returns output based on the learnings from fit(). It also has a convenience function called fit_transform() which fits and transforms.
# 3. Predictors - LinerRegression model is an example of predictor. fit() and predict() are two common functions. It also gives score functions which will evaluate the predictions.
# # Feature Scaling
# Primarily, Two types of feature scaling methods
# 1. Min-max scaling (Normalization)
# (value-min)/(max-min)
# Sklearn provides a class called MinMaxScaler for this
# 2. Standardization
# (value-mean)/std
# Sklearn provides a class called StandardScaler for this
#
# # Creating a Pipeline
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline = Pipeline(
[
("imputer", SimpleImputer(strategy="median")),
("std_scaler", StandardScaler()),
]
)
housing_num_tr = my_pipeline.fit_transform(housing)
housing_num_tr.shape
# # Selecting desired model
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# model = LinearRegression()
# model = DecisionTreeRegressor()
model = RandomForestRegressor()
model.fit(housing_num_tr, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
prepared_data = my_pipeline.transform(some_data)
model.predict(prepared_data)
list(some_labels)
# # **Evaluating the model**
from sklearn.metrics import mean_squared_error
housing_predictions = model.predict(housing_num_tr)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_mse
# # **Cross Validation**
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
model, housing_num_tr, housing_labels, scoring="neg_mean_squared_error", cv=10
)
rmse_scores = np.sqrt(-scores)
rmse_scores
def print_scores(scores):
print("Scores", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
print_scores(rmse_scores)
# Decisiontree
# Scores [4.59320814 3.94068831 4.73456414 5.2260604 4.75674104 2.80261202
# 6.94054893 4.0612986 3.61379302 3.52593959]
# Mean: 4.419545420361814
# Standard deviation: 1.0802272160682334
# Scores [5.66658666 4.44508419 5.81280069 4.83395793 4.79407641 3.78038514
# 8.94771005 4.60112855 3.85185723 5.19342927]
# Mean: 5.19270161243899
# Standard deviation: 1.4035016858670373
# Scores [3.0156727 3.95652734 4.78965837 4.08222363 3.00375459 2.30497579
# 6.9512471 2.89076137 3.27099817 3.59033198]
# Mean: 3.7856151036807972
# Standard deviation: 1.250865925782546
# # **Saving the model**
from joblib import dump, load
dump(model, "Boston.joblib")
# # **Testing the model**
X_test = strat_test_set.drop("MEDV", axis=1)
Y_test = strat_test_set["MEDV"].copy()
X_test_prepared = my_pipeline.transform(X_test)
final_predictions = model.predict(X_test_prepared)
final_mse = mean_squared_error(Y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
# print(final_predictions,list(Y_test))
final_rmse
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/702/129702071.ipynb
|
real-estate-dataset
|
arslanali4343
|
[{"Id": 129702071, "ScriptId": 37460635, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11015153, "CreationDate": "05/15/2023 21:46:40", "VersionNumber": 12.0, "Title": "notebookc678d02d50", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 245.0, "LinesInsertedFromPrevious": 86.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 159.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186032306, "KernelVersionId": 129702071, "SourceDatasetVersionId": 1523358}]
|
[{"Id": 1523358, "DatasetId": 898072, "DatasourceVersionId": 1557741, "CreatorUserId": 4795937, "LicenseName": "Other (specified in description)", "CreationDate": "09/28/2020 21:25:33", "VersionNumber": 1.0, "Title": "Real Estate DataSet", "Slug": "real-estate-dataset", "Subtitle": "Dragon Real Estate - Price Predictor", "Description": "Concerns housing values in suburbs of Boston.\n\n5. Number of Instances: 506\n\n6. Number of Attributes: 13 continuous attributes (including \"class\"\n attribute \"MEDV\"), 1 binary-valued attribute.\n\n7. Attribute Information:\n\n 1. CRIM per capita crime rate by town\n 2. ZN proportion of residential land zoned for lots over \n 25,000 sq.ft.\n 3. INDUS proportion of non-retail business acres per town\n 4. CHAS Charles River dummy variable (= 1 if tract bounds \n river; 0 otherwise)\n 5. NOX nitric oxides concentration (parts per 10 million)\n 6. RM average number of rooms per dwelling\n 7. AGE proportion of owner-occupied units built prior to 1940\n 8. DIS weighted distances to five Boston employment centres\n 9. RAD index of accessibility to radial highways\n 10. TAX full-value property-tax rate per $10,000\n 11. PTRATIO pupil-teacher ratio by town\n 12. B 1000(Bk - 0.63)^2 where Bk is the proportion of blacks \n by town\n 13. LSTAT % lower status of the population\n 14. MEDV Median value of owner-occupied homes in $1000's\n\n8. Missing Attribute Values: None.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 898072, "CreatorUserId": 4795937, "OwnerUserId": 4795937.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1523358.0, "CurrentDatasourceVersionId": 1557741.0, "ForumId": 913698, "Type": 2, "CreationDate": "09/28/2020 21:25:33", "LastActivityDate": "09/28/2020", "TotalViews": 187006, "TotalDownloads": 20786, "TotalVotes": 360, "TotalKernels": 41}]
|
[{"Id": 4795937, "UserName": "arslanali4343", "DisplayName": "Arslan Ali", "RegisterDate": "04/02/2020", "PerformanceTier": 3}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # **#Price Predictor**
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sb
housing = pd.read_csv("/kaggle/input/real-estate-dataset/data.csv")
housing.head()
housing.info()
housing["CHAS"].value_counts()
housing.describe()
# housing.hist(bins = 50, figsize = (20,25))
# # Train-test split
np.random.seed(42)
def split_train_test(data, test_ratio):
shuffled = np.random.permutation(len(data))
test_set_size = int(len(data) * test_ratio)
print(shuffled)
test_indices = shuffled[:test_set_size]
train_indices = shuffled[test_set_size:]
return data.iloc[train_indices], data.iloc[test_indices]
train_set, test_set = split_train_test(housing, 0.2)
## print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n")
from sklearn.model_selection import train_test_split
train_set, test_set = train_test_split(housing, test_size=0.2, random_state=42)
print(f"Rows in train set: {len(train_set)}\nRows in test set: {len(test_set)}\n")
from sklearn.model_selection import StratifiedShuffleSplit
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(housing, housing["CHAS"]):
strat_train_set = housing.loc[train_index]
strat_test_set = housing.loc[test_index]
strat_test_set
strat_test_set["CHAS"].value_counts()
strat_train_set["CHAS"].value_counts()
housing = strat_train_set.copy()
# ## Looking for Correlation
corr_matrix = housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
from pandas.plotting import scatter_matrix
attributes = ["MEDV", "RM", "ZN", "LSTAT"]
scatter_matrix(housing[attributes], figsize=(12, 8))
housing.plot(kind="scatter", x="RM", y="MEDV", alpha=0.9)
# # **## Attribute combinations**
housing["TAXRM"] = housing["TAX"] / housing["RM"]
housing.head()
corr_matrix = housing.corr()
corr_matrix["MEDV"].sort_values(ascending=False)
housing.plot(kind="scatter", x="TAXRM", y="MEDV", alpha=0.8)
housing = strat_train_set.drop("MEDV", axis=1)
housing_labels = strat_train_set["MEDV"].copy()
# # Missing attributes
## To take care of missing values, you have three options:
## 1 = Get rid off the missing data points.
## 2 = Get rid off the attribute. (cannot do this if the attribute is highly negetive or positively correlated to our target variable)
## 3 = Set the value to (0, mean or median).
# a = housing.dropna(subset=["RM"]) #option 1
# a.shape
# housing.info()
# housing.drop("RM",axis =1) option 2
# note that RM column wont be there and original data is unchanged
# housing.info()
median = housing["RM"].median() # option 3
median
housing["RM"].fillna(median)
housing.describe() # before we started imputer
from sklearn.impute import SimpleImputer
imputer = SimpleImputer(strategy="median")
imputer.fit(housing)
imputer.statistics_
X = imputer.transform(housing)
housing_tr = pd.DataFrame(X, columns=housing.columns)
housing_tr.describe()
# # Scikit-learn design
# Primarily, three types of objects
# 1. Estimators - It Estimates some parameter based on a dataset. Eg. Imputer It has a fit method and transform method.Fit method - Fits the dataset and calculates internal parameters
# 2. Transformers - transform method takes input and returns output based on the learnings from fit(). It also has a convenience function called fit_transform() which fits and transforms.
# 3. Predictors - LinerRegression model is an example of predictor. fit() and predict() are two common functions. It also gives score functions which will evaluate the predictions.
# # Feature Scaling
# Primarily, Two types of feature scaling methods
# 1. Min-max scaling (Normalization)
# (value-min)/(max-min)
# Sklearn provides a class called MinMaxScaler for this
# 2. Standardization
# (value-mean)/std
# Sklearn provides a class called StandardScaler for this
#
# # Creating a Pipeline
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
my_pipeline = Pipeline(
[
("imputer", SimpleImputer(strategy="median")),
("std_scaler", StandardScaler()),
]
)
housing_num_tr = my_pipeline.fit_transform(housing)
housing_num_tr.shape
# # Selecting desired model
from sklearn.linear_model import LinearRegression
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor
# model = LinearRegression()
# model = DecisionTreeRegressor()
model = RandomForestRegressor()
model.fit(housing_num_tr, housing_labels)
some_data = housing.iloc[:5]
some_labels = housing_labels.iloc[:5]
prepared_data = my_pipeline.transform(some_data)
model.predict(prepared_data)
list(some_labels)
# # **Evaluating the model**
from sklearn.metrics import mean_squared_error
housing_predictions = model.predict(housing_num_tr)
lin_mse = mean_squared_error(housing_labels, housing_predictions)
lin_rmse = np.sqrt(lin_mse)
lin_mse
# # **Cross Validation**
from sklearn.model_selection import cross_val_score
scores = cross_val_score(
model, housing_num_tr, housing_labels, scoring="neg_mean_squared_error", cv=10
)
rmse_scores = np.sqrt(-scores)
rmse_scores
def print_scores(scores):
print("Scores", scores)
print("Mean:", scores.mean())
print("Standard deviation:", scores.std())
print_scores(rmse_scores)
# Decisiontree
# Scores [4.59320814 3.94068831 4.73456414 5.2260604 4.75674104 2.80261202
# 6.94054893 4.0612986 3.61379302 3.52593959]
# Mean: 4.419545420361814
# Standard deviation: 1.0802272160682334
# Scores [5.66658666 4.44508419 5.81280069 4.83395793 4.79407641 3.78038514
# 8.94771005 4.60112855 3.85185723 5.19342927]
# Mean: 5.19270161243899
# Standard deviation: 1.4035016858670373
# Scores [3.0156727 3.95652734 4.78965837 4.08222363 3.00375459 2.30497579
# 6.9512471 2.89076137 3.27099817 3.59033198]
# Mean: 3.7856151036807972
# Standard deviation: 1.250865925782546
# # **Saving the model**
from joblib import dump, load
dump(model, "Boston.joblib")
# # **Testing the model**
X_test = strat_test_set.drop("MEDV", axis=1)
Y_test = strat_test_set["MEDV"].copy()
X_test_prepared = my_pipeline.transform(X_test)
final_predictions = model.predict(X_test_prepared)
final_mse = mean_squared_error(Y_test, final_predictions)
final_rmse = np.sqrt(final_mse)
# print(final_predictions,list(Y_test))
final_rmse
| false | 1 | 2,446 | 1 | 2,811 | 2,446 |
||
129696126
|
import pandas as pd
import warnings
warnings.filterwarnings(action="ignore")
train = pd.read_csv(
"../input/birdclef-2022/train_metadata.csv",
)
train.head()
# Code adapted from https://www.kaggle.com/shahules/bird-watch-complete-eda-fe
# Make sure to check out the entire notebook.
import plotly.graph_objects as go
# Unique eBird codes
species = train["primary_label"].value_counts()
# Make bar chart
fig = go.Figure(
data=[go.Bar(y=species.values, x=species.index)],
layout=go.Layout(margin=go.layout.Margin(l=0, r=0, b=10, t=50)),
)
# Show chart
fig.update_layout(title="Number of traning samples per species")
fig.show()
import os
from types import SimpleNamespace
import numpy as np
cfg = SimpleNamespace()
cfg.data_dir = "../input/birdclef-2022/"
cfg.train_data_folder = cfg.data_dir + "train_audio/"
cfg.val_data_folder = cfg.data_dir + "train_audio/"
import numpy as np
import pandas as pd
from scipy.io import wavfile
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.utils import to_categorical
# Load and preprocess the dataset
csv_path = "../input/birdclef-2022/train_metadata.csv"
# Read CSV file
data_df = pd.read_csv(csv_path)
# Extract audio paths and labels
audio_paths = data_df["filename"].tolist()
labels = data_df["common_name"].tolist()
X = []
y = []
# Iterate over audio paths
for audio_path, label in zip(audio_paths, labels):
sample_rate, audio = wavfile.read(audio_path)
# Preprocessing steps (e.g., pad, normalize, spectrogram conversion)
# ...
# Append preprocessed audio and label to X and y
X.append(preprocessed_audio)
y.append(label)
X = np.array(X)
y = np.array(y)
# Split the dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Normalize the input features
X_train = X_train / np.max(X_train)
X_test = X_test / np.max(X_test)
# Convert labels to categorical format
num_classes = len(np.unique(labels))
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
# Define the CNN architecture
model = keras.Sequential(
[
Conv2D(
32, kernel_size=(3, 3), activation="relu", input_shape=(X_train.shape[1:])
),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation="relu"),
Dense(num_classes, activation="softmax"),
]
)
# Compile the model
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Train the model
model.fit(X_train, y_train, batch_size=32, epochs=10, validation_data=(X_test, y_test))
# Evaluate the model on test data
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print("Test Loss:", test_loss)
print("Test Accuracy:", test_accuracy)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/696/129696126.ipynb
| null | null |
[{"Id": 129696126, "ScriptId": 38567306, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7041326, "CreationDate": "05/15/2023 20:26:23", "VersionNumber": 1.0, "Title": "mini-project", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 102.0, "LinesInsertedFromPrevious": 102.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import pandas as pd
import warnings
warnings.filterwarnings(action="ignore")
train = pd.read_csv(
"../input/birdclef-2022/train_metadata.csv",
)
train.head()
# Code adapted from https://www.kaggle.com/shahules/bird-watch-complete-eda-fe
# Make sure to check out the entire notebook.
import plotly.graph_objects as go
# Unique eBird codes
species = train["primary_label"].value_counts()
# Make bar chart
fig = go.Figure(
data=[go.Bar(y=species.values, x=species.index)],
layout=go.Layout(margin=go.layout.Margin(l=0, r=0, b=10, t=50)),
)
# Show chart
fig.update_layout(title="Number of traning samples per species")
fig.show()
import os
from types import SimpleNamespace
import numpy as np
cfg = SimpleNamespace()
cfg.data_dir = "../input/birdclef-2022/"
cfg.train_data_folder = cfg.data_dir + "train_audio/"
cfg.val_data_folder = cfg.data_dir + "train_audio/"
import numpy as np
import pandas as pd
from scipy.io import wavfile
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense
from tensorflow.keras.utils import to_categorical
# Load and preprocess the dataset
csv_path = "../input/birdclef-2022/train_metadata.csv"
# Read CSV file
data_df = pd.read_csv(csv_path)
# Extract audio paths and labels
audio_paths = data_df["filename"].tolist()
labels = data_df["common_name"].tolist()
X = []
y = []
# Iterate over audio paths
for audio_path, label in zip(audio_paths, labels):
sample_rate, audio = wavfile.read(audio_path)
# Preprocessing steps (e.g., pad, normalize, spectrogram conversion)
# ...
# Append preprocessed audio and label to X and y
X.append(preprocessed_audio)
y.append(label)
X = np.array(X)
y = np.array(y)
# Split the dataset into train and test sets
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
# Normalize the input features
X_train = X_train / np.max(X_train)
X_test = X_test / np.max(X_test)
# Convert labels to categorical format
num_classes = len(np.unique(labels))
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
# Define the CNN architecture
model = keras.Sequential(
[
Conv2D(
32, kernel_size=(3, 3), activation="relu", input_shape=(X_train.shape[1:])
),
MaxPooling2D(pool_size=(2, 2)),
Conv2D(64, kernel_size=(3, 3), activation="relu"),
MaxPooling2D(pool_size=(2, 2)),
Flatten(),
Dense(128, activation="relu"),
Dense(num_classes, activation="softmax"),
]
)
# Compile the model
model.compile(
loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adam(),
metrics=["accuracy"],
)
# Train the model
model.fit(X_train, y_train, batch_size=32, epochs=10, validation_data=(X_test, y_test))
# Evaluate the model on test data
test_loss, test_accuracy = model.evaluate(X_test, y_test)
print("Test Loss:", test_loss)
print("Test Accuracy:", test_accuracy)
| false | 0 | 971 | 0 | 971 | 971 |
||
129696146
|
# # **Comparison of Importance Metrics for Feature Selection**
# I like to return to the Titanic competition to test new concepts I'm learning, so this time I wanted to take a look at different feature importance metrics and what they tell us about this dataset, to inform feature selection and engineering.
# In particular, I will compare:
# 1. Mutual information scores
# 2. Permutation importance
# 3. Partial dependence plots
# 4. SHAP values
# Since some of these metrics operate on a fitted model, I will compare **7 baselines models** to illustrate differences and similarities.
# Let's dive in! 🤿
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Models
from sklearn.ensemble._forest import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.gaussian_process._gpc import GaussianProcessClassifier
from sklearn.ensemble._gb import GradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingClassifier,
)
from sklearn.neural_network._multilayer_perceptron import MLPClassifier
from sklearn.svm._classes import SVC
from sklearn.linear_model._ridge import RidgeClassifier
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (4, 4)
seed = 17
import warnings
warnings.filterwarnings("ignore")
# # **Preprocessing**
# Here we load the data and perform a check on missing values. Subsequently, we address those missing values by imputation or by dropping the feature if it not of interest. In this case I drop right away *"Name", "Ticket", "Cabin", "PassengerId"*.
training_set = pd.read_csv("/kaggle/input/titanic/train.csv")
submission_set = pd.read_csv("/kaggle/input/titanic/test.csv")
training_set.head()
# Check which columns have missing values
print("Missing values in training set : ")
print(training_set.isna().sum())
print("\nMissing values in submission set : ")
print(submission_set.isna().sum())
X = training_set.copy()
# We apply all transformations both to the trainig set and the submission set
X_subm = submission_set.copy()
# We remove the target column from the training data
y = X.pop("Survived")
# We choose which features to ignore from starters and drop them
columns_to_drop = ["Name", "Ticket", "Cabin", "PassengerId"]
X = X.drop(columns=columns_to_drop)
X_subm = X_subm.drop(columns=columns_to_drop)
# We fill the missing values in the training and submission sets
X["Age"].fillna(X["Age"].median(), inplace=True)
X["Embarked"].fillna("UNK", inplace=True)
X_subm["Age"].fillna(X["Age"].median(), inplace=True)
X_subm["Fare"].fillna(X_subm["Fare"].median(), inplace=True)
# We encode categorical variables to a numerical representation
enc_sex = LabelEncoder()
X["Sex"] = enc_sex.fit_transform(X["Sex"])
X_subm["Sex"] = enc_sex.transform(X_subm["Sex"])
enc_emb = LabelEncoder()
X["Embarked"] = enc_sex.fit_transform(X["Embarked"])
X_subm["Embarked"] = enc_sex.transform(X_subm["Embarked"])
# And we split it in train and test sets.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed
)
# # **Training the models**
# Here we train a group of baseline models using all the features and compute the accuracy on the test set.
# Dictionary of models to test
models = {
"RandomForest": RandomForestClassifier(random_state=seed),
"LogisticRegression": LogisticRegression(solver="liblinear", random_state=seed),
"XGB": XGBClassifier(random_state=seed),
"GaussianProcess": GaussianProcessClassifier(random_state=seed),
"HistGradientBoosting": HistGradientBoostingClassifier(random_state=seed),
"MLP": MLPClassifier(
hidden_layer_sizes=(100, 100, 100),
max_iter=1000,
activation="logistic",
random_state=seed,
),
"SVC": SVC(random_state=seed),
}
for n, (model_name, model) in enumerate(models.items()):
model.fit(X_train, y_train)
preds = model.predict(X_test)
score = accuracy_score(y_test, preds)
print("Accuracy of", model_name, ":", np.round(score, 4))
# # **Mutual information**
# The ***mutual information score*** is a non-parametric measure that quantifies the dependence between two variables, regardless of their distribution. The mutual information score ranges from 0 (indicating no relationship between the variables) to a maximum value (indicating a perfect relationship). It can be used in feature selection, clustering, and classification tasks, where the aim is to identify the most informative variables for a given problem. Mutual information scores are calculated on the data, rather than on the specific fitted model.
# Documentation for **[mutual_info_regression](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.mutual_info_regression.html)** from the scikit-learn library.
# **Observation**: the score tells us that only ***Sex***, ***Age***, ***Pclass*** and ***Fare*** have some level of impact on the survival predictions, in decreasing order of importance. This suggests that excluding Embarked, Parch and SibSp may be possible without a significan loss in predictive ability by the models.
from sklearn.feature_selection import mutual_info_regression
X_mi = X_train.copy()
for colname in X_mi.columns:
X_mi[colname], _ = X_mi[colname].factorize()
def make_mi_scores(X, y):
mi_scores = mutual_info_regression(X, y, random_state=seed)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
mi_scores = make_mi_scores(X_mi, y_train)
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores, height=0.6, color="springgreen")
plt.yticks(width, ticks)
# plt.xticks([0.01*i for i in range(20)])
plt.title("Mutual Information Scores", weight="bold")
plt.figure(figsize=(3, 3), dpi=80)
fig = plt.gcf()
fig.tight_layout()
plt.grid(axis="x", linestyle="--", color="0.8")
plot_mi_scores(mi_scores)
# # **Permutation importance**
# ***Permutation importance*** is calculated considering a model predictions for the test set and performing random permutations of the values of a given feature. If the prediction is insensitive to these permutation for a given feature, its permuatation importance score will be low, and this means that the model does not rely on this feature to predict the target, therefore this variable can be ignored.
# Documentation for **[PermutationImportance](https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html)**
# **Observation**: each model considers a different subset of features important. The majority model concur in deriving most information out of *"Sex"*, but for some it is *"Fare"* that has the highest score. Overall *"Embarked"*, *"SibSp"* and *"Parch"* score low for all models, in accordance with the Mutual Information results, except LogisticRegression and GaussianProcess that have a 0.02 score for *"SibSp"*
import eli5
from eli5.sklearn import PermutationImportance
from IPython.display import display
for n, (model_name, model) in enumerate(models.items()):
perm = PermutationImportance(model, random_state=seed).fit(X_test, y_test)
print("\n", model_name)
display(eli5.show_weights(perm, feature_names=list(X_test.columns)))
# Now let's retrain the models using only the subset of features with a weight greater than 0.01 and recalculate the accuracy. In 6 out of 7 cases **excluding the uninformative features increased the accuracy of the predictions**!
to_keep = {
"RandomForest": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"LogisticRegression": ["Sex", "Pclass", "SibSp"],
"XGB": ["Sex", "Pclass", "Age", "Fare"],
"GaussianProcess": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"HistGradientBoosting": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"MLP": ["Sex", "Age", "SibSp"],
"SVC": ["Fare", "Age"],
}
for n, (model_name, model) in enumerate(models.items()):
model.fit(X_train[to_keep[model_name]], y_train)
preds = model.predict(X_test[to_keep[model_name]])
score = accuracy_score(y_test, preds)
print("Accuracy of", model_name, ":", np.round(score, 4))
# ### **Comparison of accuracy with all features or only with relevant features for each model**
# |Model| Accuracy with all features | With reduced features set|
# |----|-----|----|
# |RandomForest|0.7989 | 0.8045|
# | LogisticRegression|0.7765 | 0.7877|
# | XGB|0.8045 | 0.8268|
# | GaussianProcess|0.6816 | 0.6927|
# | HistGradientBoosting|0.8212 | 0.8045|
# | MLP|0.7877 | 0.7989|
# | SVC|0.6760 | 0.6816|
# # **Partial Dependence Plots**
# Partial dependence plots can help us visualize the relationship between the target variable (Survival) and a specific feature *while keeping other variables constant*. They provide insights into how each feature impacts the target, revealing trends, interactions, and non-linearities.
# For example, we can see that, for all models that used Sex as a feature, 0 (Female) has a higher correlation with Survival than 1 (Male). Concerning features with continuous values, such as Age, for almost all models, the highest correlation with survival is found in the case of infants.
# Another interesting example is looking at the SibSp plot for the GasussianProcess model. What this trend is telling us is that single people have the lowest correlation with survival, but that beyond 1 sibling or spouse there isn't added benefit in belonging to larger families in terms of survival chances.
# Documentation for **[PartialDependenceDisplay](https://scikit-learn.org/stable/modules/generated/sklearn.inspection.PartialDependenceDisplay.html)**
from sklearn.inspection import PartialDependenceDisplay
colors = plt.get_cmap("Accent")(np.linspace(0.0, 1.0, 7))
for n, (model_name, model) in enumerate(models.items()):
figsize = (6, 3) if len(to_keep[model_name]) > 3 else (6, 1.5)
fig, ax = plt.subplots(figsize=figsize, dpi=80)
ax.set_title(model_name, weight="bold")
display = PartialDependenceDisplay.from_estimator(
model,
X_test[to_keep[model_name]],
to_keep[model_name],
ax=ax,
line_kw={"color": colors[n]},
)
plt.subplots_adjust(top=1.6)
plt.show()
# Interestingly, we can also explore 2D partial dependence plot, to uncover interaction between the features. Below are two examples.
# ### **Age & Fare in the SVC model**
fig, ax = plt.subplots()
fnames = [("Age", "Fare")]
disp = PartialDependenceDisplay.from_estimator(
models["SVC"], X_test[to_keep["SVC"]], fnames, ax=ax
)
plt.show()
# ### **Age & Sex in the MLP model**
fig, ax = plt.subplots()
fnames = [("Age", "Sex")]
disp = PartialDependenceDisplay.from_estimator(
models["MLP"], X_test[to_keep["MLP"]], fnames, ax=ax
)
plt.show()
# # **SHAP**
# SHAP (Shapley Additive Explanations) assigns importance values to each feature in a prediction model, quantifying its contribution towards the prediction outcome. In this way, it can be clarified what led to a given prediction, providing a comprehensive understanding of feature impact and facilitating model explanation and evaluation.
# In the following I compared for example the predictions of all models for the first entry in the test set
import shap
for n, (model_name, model) in enumerate(models.items()):
# Fits the explainer
explainer = shap.Explainer(model.predict, X_test[to_keep[model_name]])
# Calculates the SHAP values - It takes some time
shap_values = explainer(X_test[to_keep[model_name]])
# shap.plots.bar(shap_values[0])
print(model_name)
shap.initjs()
shap.plots.force(shap_values[0], matplotlib=True, show=True)
# In addition to single predictions, we can get also look at summary plots for all the entries in the dataset. Below I plotted the results for the XGB model. It can be seen, for example, that high values of "Sex", (red = male), reduces the value of the prediction, which tranelates to a lower survival chance. If we look at Age, on the other hand, we can see a strong effect on the prediction for the extreme values (very high age lowers the predicions, very low age increases it) and a more modest effect for the intermediate values (purple), that tend to cluster around zero.
explainer = shap.TreeExplainer(models["XGB"])
shap_values = explainer.shap_values(X_test[to_keep["XGB"]])
shap.summary_plot(shap_values, X_test[to_keep["XGB"]])
# # **Submission**
# After we have completed all preliminary analysis and chosen the best modelling approach, we can retrain the best model using the whole training data to further increase its accuracy (in this final stage the test set is actually the one used to score the submission) and submit our predicitions.
models["XGB"].fit(X[to_keep["XGB"]], y)
results = models["XGB"].predict(X_subm[to_keep["XGB"]])
submission_df = pd.DataFrame(
{"PassengerId": submission_set["PassengerId"], "Survived": results}
)
submission_df.to_csv("submission.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/696/129696146.ipynb
| null | null |
[{"Id": 129696146, "ScriptId": 38449382, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13287139, "CreationDate": "05/15/2023 20:26:36", "VersionNumber": 1.0, "Title": "Comparison of Feature Importance Metrics \ud83d\udcca", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 273.0, "LinesInsertedFromPrevious": 273.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
| null | null | null | null |
# # **Comparison of Importance Metrics for Feature Selection**
# I like to return to the Titanic competition to test new concepts I'm learning, so this time I wanted to take a look at different feature importance metrics and what they tell us about this dataset, to inform feature selection and engineering.
# In particular, I will compare:
# 1. Mutual information scores
# 2. Permutation importance
# 3. Partial dependence plots
# 4. SHAP values
# Since some of these metrics operate on a fitted model, I will compare **7 baselines models** to illustrate differences and similarities.
# Let's dive in! 🤿
import pandas as pd
import numpy as np
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
# Models
from sklearn.ensemble._forest import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from xgboost import XGBClassifier
from sklearn.gaussian_process._gpc import GaussianProcessClassifier
from sklearn.ensemble._gb import GradientBoostingClassifier
from sklearn.ensemble._hist_gradient_boosting.gradient_boosting import (
HistGradientBoostingClassifier,
)
from sklearn.neural_network._multilayer_perceptron import MLPClassifier
from sklearn.svm._classes import SVC
from sklearn.linear_model._ridge import RidgeClassifier
from matplotlib import pyplot as plt
plt.rcParams["figure.figsize"] = (4, 4)
seed = 17
import warnings
warnings.filterwarnings("ignore")
# # **Preprocessing**
# Here we load the data and perform a check on missing values. Subsequently, we address those missing values by imputation or by dropping the feature if it not of interest. In this case I drop right away *"Name", "Ticket", "Cabin", "PassengerId"*.
training_set = pd.read_csv("/kaggle/input/titanic/train.csv")
submission_set = pd.read_csv("/kaggle/input/titanic/test.csv")
training_set.head()
# Check which columns have missing values
print("Missing values in training set : ")
print(training_set.isna().sum())
print("\nMissing values in submission set : ")
print(submission_set.isna().sum())
X = training_set.copy()
# We apply all transformations both to the trainig set and the submission set
X_subm = submission_set.copy()
# We remove the target column from the training data
y = X.pop("Survived")
# We choose which features to ignore from starters and drop them
columns_to_drop = ["Name", "Ticket", "Cabin", "PassengerId"]
X = X.drop(columns=columns_to_drop)
X_subm = X_subm.drop(columns=columns_to_drop)
# We fill the missing values in the training and submission sets
X["Age"].fillna(X["Age"].median(), inplace=True)
X["Embarked"].fillna("UNK", inplace=True)
X_subm["Age"].fillna(X["Age"].median(), inplace=True)
X_subm["Fare"].fillna(X_subm["Fare"].median(), inplace=True)
# We encode categorical variables to a numerical representation
enc_sex = LabelEncoder()
X["Sex"] = enc_sex.fit_transform(X["Sex"])
X_subm["Sex"] = enc_sex.transform(X_subm["Sex"])
enc_emb = LabelEncoder()
X["Embarked"] = enc_sex.fit_transform(X["Embarked"])
X_subm["Embarked"] = enc_sex.transform(X_subm["Embarked"])
# And we split it in train and test sets.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, shuffle=True, random_state=seed
)
# # **Training the models**
# Here we train a group of baseline models using all the features and compute the accuracy on the test set.
# Dictionary of models to test
models = {
"RandomForest": RandomForestClassifier(random_state=seed),
"LogisticRegression": LogisticRegression(solver="liblinear", random_state=seed),
"XGB": XGBClassifier(random_state=seed),
"GaussianProcess": GaussianProcessClassifier(random_state=seed),
"HistGradientBoosting": HistGradientBoostingClassifier(random_state=seed),
"MLP": MLPClassifier(
hidden_layer_sizes=(100, 100, 100),
max_iter=1000,
activation="logistic",
random_state=seed,
),
"SVC": SVC(random_state=seed),
}
for n, (model_name, model) in enumerate(models.items()):
model.fit(X_train, y_train)
preds = model.predict(X_test)
score = accuracy_score(y_test, preds)
print("Accuracy of", model_name, ":", np.round(score, 4))
# # **Mutual information**
# The ***mutual information score*** is a non-parametric measure that quantifies the dependence between two variables, regardless of their distribution. The mutual information score ranges from 0 (indicating no relationship between the variables) to a maximum value (indicating a perfect relationship). It can be used in feature selection, clustering, and classification tasks, where the aim is to identify the most informative variables for a given problem. Mutual information scores are calculated on the data, rather than on the specific fitted model.
# Documentation for **[mutual_info_regression](https://scikit-learn.org/stable/modules/generated/sklearn.feature_selection.mutual_info_regression.html)** from the scikit-learn library.
# **Observation**: the score tells us that only ***Sex***, ***Age***, ***Pclass*** and ***Fare*** have some level of impact on the survival predictions, in decreasing order of importance. This suggests that excluding Embarked, Parch and SibSp may be possible without a significan loss in predictive ability by the models.
from sklearn.feature_selection import mutual_info_regression
X_mi = X_train.copy()
for colname in X_mi.columns:
X_mi[colname], _ = X_mi[colname].factorize()
def make_mi_scores(X, y):
mi_scores = mutual_info_regression(X, y, random_state=seed)
mi_scores = pd.Series(mi_scores, name="MI Scores", index=X.columns)
mi_scores = mi_scores.sort_values(ascending=False)
return mi_scores
mi_scores = make_mi_scores(X_mi, y_train)
def plot_mi_scores(scores):
scores = scores.sort_values(ascending=True)
width = np.arange(len(scores))
ticks = list(scores.index)
plt.barh(width, scores, height=0.6, color="springgreen")
plt.yticks(width, ticks)
# plt.xticks([0.01*i for i in range(20)])
plt.title("Mutual Information Scores", weight="bold")
plt.figure(figsize=(3, 3), dpi=80)
fig = plt.gcf()
fig.tight_layout()
plt.grid(axis="x", linestyle="--", color="0.8")
plot_mi_scores(mi_scores)
# # **Permutation importance**
# ***Permutation importance*** is calculated considering a model predictions for the test set and performing random permutations of the values of a given feature. If the prediction is insensitive to these permutation for a given feature, its permuatation importance score will be low, and this means that the model does not rely on this feature to predict the target, therefore this variable can be ignored.
# Documentation for **[PermutationImportance](https://eli5.readthedocs.io/en/latest/blackbox/permutation_importance.html)**
# **Observation**: each model considers a different subset of features important. The majority model concur in deriving most information out of *"Sex"*, but for some it is *"Fare"* that has the highest score. Overall *"Embarked"*, *"SibSp"* and *"Parch"* score low for all models, in accordance with the Mutual Information results, except LogisticRegression and GaussianProcess that have a 0.02 score for *"SibSp"*
import eli5
from eli5.sklearn import PermutationImportance
from IPython.display import display
for n, (model_name, model) in enumerate(models.items()):
perm = PermutationImportance(model, random_state=seed).fit(X_test, y_test)
print("\n", model_name)
display(eli5.show_weights(perm, feature_names=list(X_test.columns)))
# Now let's retrain the models using only the subset of features with a weight greater than 0.01 and recalculate the accuracy. In 6 out of 7 cases **excluding the uninformative features increased the accuracy of the predictions**!
to_keep = {
"RandomForest": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"LogisticRegression": ["Sex", "Pclass", "SibSp"],
"XGB": ["Sex", "Pclass", "Age", "Fare"],
"GaussianProcess": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"HistGradientBoosting": ["Sex", "Pclass", "Age", "Fare", "SibSp"],
"MLP": ["Sex", "Age", "SibSp"],
"SVC": ["Fare", "Age"],
}
for n, (model_name, model) in enumerate(models.items()):
model.fit(X_train[to_keep[model_name]], y_train)
preds = model.predict(X_test[to_keep[model_name]])
score = accuracy_score(y_test, preds)
print("Accuracy of", model_name, ":", np.round(score, 4))
# ### **Comparison of accuracy with all features or only with relevant features for each model**
# |Model| Accuracy with all features | With reduced features set|
# |----|-----|----|
# |RandomForest|0.7989 | 0.8045|
# | LogisticRegression|0.7765 | 0.7877|
# | XGB|0.8045 | 0.8268|
# | GaussianProcess|0.6816 | 0.6927|
# | HistGradientBoosting|0.8212 | 0.8045|
# | MLP|0.7877 | 0.7989|
# | SVC|0.6760 | 0.6816|
# # **Partial Dependence Plots**
# Partial dependence plots can help us visualize the relationship between the target variable (Survival) and a specific feature *while keeping other variables constant*. They provide insights into how each feature impacts the target, revealing trends, interactions, and non-linearities.
# For example, we can see that, for all models that used Sex as a feature, 0 (Female) has a higher correlation with Survival than 1 (Male). Concerning features with continuous values, such as Age, for almost all models, the highest correlation with survival is found in the case of infants.
# Another interesting example is looking at the SibSp plot for the GasussianProcess model. What this trend is telling us is that single people have the lowest correlation with survival, but that beyond 1 sibling or spouse there isn't added benefit in belonging to larger families in terms of survival chances.
# Documentation for **[PartialDependenceDisplay](https://scikit-learn.org/stable/modules/generated/sklearn.inspection.PartialDependenceDisplay.html)**
from sklearn.inspection import PartialDependenceDisplay
colors = plt.get_cmap("Accent")(np.linspace(0.0, 1.0, 7))
for n, (model_name, model) in enumerate(models.items()):
figsize = (6, 3) if len(to_keep[model_name]) > 3 else (6, 1.5)
fig, ax = plt.subplots(figsize=figsize, dpi=80)
ax.set_title(model_name, weight="bold")
display = PartialDependenceDisplay.from_estimator(
model,
X_test[to_keep[model_name]],
to_keep[model_name],
ax=ax,
line_kw={"color": colors[n]},
)
plt.subplots_adjust(top=1.6)
plt.show()
# Interestingly, we can also explore 2D partial dependence plot, to uncover interaction between the features. Below are two examples.
# ### **Age & Fare in the SVC model**
fig, ax = plt.subplots()
fnames = [("Age", "Fare")]
disp = PartialDependenceDisplay.from_estimator(
models["SVC"], X_test[to_keep["SVC"]], fnames, ax=ax
)
plt.show()
# ### **Age & Sex in the MLP model**
fig, ax = plt.subplots()
fnames = [("Age", "Sex")]
disp = PartialDependenceDisplay.from_estimator(
models["MLP"], X_test[to_keep["MLP"]], fnames, ax=ax
)
plt.show()
# # **SHAP**
# SHAP (Shapley Additive Explanations) assigns importance values to each feature in a prediction model, quantifying its contribution towards the prediction outcome. In this way, it can be clarified what led to a given prediction, providing a comprehensive understanding of feature impact and facilitating model explanation and evaluation.
# In the following I compared for example the predictions of all models for the first entry in the test set
import shap
for n, (model_name, model) in enumerate(models.items()):
# Fits the explainer
explainer = shap.Explainer(model.predict, X_test[to_keep[model_name]])
# Calculates the SHAP values - It takes some time
shap_values = explainer(X_test[to_keep[model_name]])
# shap.plots.bar(shap_values[0])
print(model_name)
shap.initjs()
shap.plots.force(shap_values[0], matplotlib=True, show=True)
# In addition to single predictions, we can get also look at summary plots for all the entries in the dataset. Below I plotted the results for the XGB model. It can be seen, for example, that high values of "Sex", (red = male), reduces the value of the prediction, which tranelates to a lower survival chance. If we look at Age, on the other hand, we can see a strong effect on the prediction for the extreme values (very high age lowers the predicions, very low age increases it) and a more modest effect for the intermediate values (purple), that tend to cluster around zero.
explainer = shap.TreeExplainer(models["XGB"])
shap_values = explainer.shap_values(X_test[to_keep["XGB"]])
shap.summary_plot(shap_values, X_test[to_keep["XGB"]])
# # **Submission**
# After we have completed all preliminary analysis and chosen the best modelling approach, we can retrain the best model using the whole training data to further increase its accuracy (in this final stage the test set is actually the one used to score the submission) and submit our predicitions.
models["XGB"].fit(X[to_keep["XGB"]], y)
results = models["XGB"].predict(X_subm[to_keep["XGB"]])
submission_df = pd.DataFrame(
{"PassengerId": submission_set["PassengerId"], "Survived": results}
)
submission_df.to_csv("submission.csv", index=False)
| false | 0 | 3,830 | 5 | 3,830 | 3,830 |
||
129696086
|
<jupyter_start><jupyter_text>Credit Card Fraud Detection
Context
---------
It is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.
Content
---------
The dataset contains transactions made by credit cards in September 2013 by European cardholders.
This dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.
It contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise.
Given the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.
Update (03/05/2021)
---------
A simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.
Acknowledgements
---------
The dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Université Libre de Bruxelles) on big data mining and fraud detection.
More details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project
Please cite the following works:
Andrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015
Dal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon
Dal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE
Dal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)
Carcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-Aël; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier
Carcillo, Fabrizio; Le Borgne, Yann-Aël; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing
Bertrand Lebichot, Yann-Aël Le Borgne, Liyun He, Frederic Oblé, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019
Fabrizio Carcillo, Yann-Aël Le Borgne, Olivier Caelen, Frederic Oblé, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019
Yann-Aël Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook)
Bertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Oblé, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics
[1]: https://www.researchgate.net/project/Fraud-detection-5
[2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/
[3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification
[4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective
[5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy
[6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf
[7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark
[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection
Kaggle dataset identifier: creditcardfraud
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
# ## Reading Data
path = "/kaggle/input/creditcardfraud/creditcard.csv"
data = pd.read_csv(path)
# ## Dataset Information
usage = data.memory_usage(index=False)
TO_MB = (1024) ** 2
print(
f"All Columns Has Same Size Of {round(usage[0]/TO_MB)} MB \n The Sum Of Total Memory Usage Is {round(sum(usage)/TO_MB)} MB"
)
data.head()
data.info()
data[["Time", "Amount", "Class"]].describe().T.style.background_gradient(
subset="mean", axis=0, cmap="bwr", vmin=0.5, vmax=1, low=0.4, high=0.8
).background_gradient(subset=["std", "50%"], axis=0, cmap="bwr").background_gradient(
subset="max", axis=0, cmap="Blues_r", low=0.2, high=0.9
)
fraud = len(data[data["Class"] == 1])
no_fraud = len(data[data["Class"] == 0])
print(f"{fraud} Of Frauds")
print(f"{no_fraud} No Frauds")
sns.countplot(data=data, x="Class")
plt.title("No Fraud Vs Fraud")
plt.show()
# ## EDA
fig, axs = plt.subplots(1, 3, figsize=(19, 4))
sns.histplot(data["Time"], ax=axs[0], kde=True)
axs[0].set_title("Time Distribution")
sns.histplot(data["Amount"], ax=axs[1], kde=True)
axs[1].set_title("Amount Distribution")
sns.histplot(data["Class"], ax=axs[2], kde=True)
axs[2].set_title("Class Distribution")
plt.tight_layout(pad=0.5)
plt.show()
# Take 492 row from the no fraud class Because The dataset is unbalanced
fraud_df = data.loc[data["Class"] == 0][:492]
no_fraud_df = data.loc[data["Class"] == 1]
new_norm_dist_df = pd.concat([fraud_df, no_fraud_df])
sns.countplot(data=new_norm_dist_df, x="Class")
plt.title("No Fraud VS Fraud Balanced")
plt.show()
sns.color_palette(palette="husl", n_colors=10)
plt.figure(figsize=(25, 8))
corr_data = new_norm_dist_df.corr()
sns.heatmap(data=corr_data)
plt.title("Corrolated Values")
plt.show()
#
# V3,V5,V17,V18 Are Negatively Corrolated
# V2,V4,V11,V19 Are Positively Corrolated
# Detecting Outliers
negative_corr_cols = ["V3", "V5", "V17", "V18"]
positive_corr_cols = ["V2", "V4", "V11", "V19"]
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(18, 7))
sns.boxplot(ax=axs[0, 0], data=new_norm_dist_df, x="Class", y=negative_corr_cols[0])
sns.boxplot(ax=axs[0, 1], data=new_norm_dist_df, x="Class", y=negative_corr_cols[1])
sns.boxplot(ax=axs[0, 2], data=new_norm_dist_df, x="Class", y=negative_corr_cols[2])
sns.boxplot(ax=axs[0, 3], data=new_norm_dist_df, x="Class", y=negative_corr_cols[3])
sns.boxplot(ax=axs[1, 0], data=new_norm_dist_df, x="Class", y=positive_corr_cols[0])
sns.boxplot(ax=axs[1, 1], data=new_norm_dist_df, x="Class", y=positive_corr_cols[1])
sns.boxplot(ax=axs[1, 2], data=new_norm_dist_df, x="Class", y=positive_corr_cols[2])
sns.boxplot(ax=axs[1, 3], data=new_norm_dist_df, x="Class", y=positive_corr_cols[3])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/696/129696086.ipynb
|
creditcardfraud
| null |
[{"Id": 129696086, "ScriptId": 38489308, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5268220, "CreationDate": "05/15/2023 20:25:58", "VersionNumber": 2.0, "Title": "FraudDetection | \ud83d\udccaEDA", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 98.0, "LinesInsertedFromPrevious": 70.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 28.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186023251, "KernelVersionId": 129696086, "SourceDatasetVersionId": 23498}]
|
[{"Id": 23498, "DatasetId": 310, "DatasourceVersionId": 23502, "CreatorUserId": 998023, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "03/23/2018 01:17:27", "VersionNumber": 3.0, "Title": "Credit Card Fraud Detection", "Slug": "creditcardfraud", "Subtitle": "Anonymized credit card transactions labeled as fraudulent or genuine", "Description": "Context\n---------\n\nIt is important that credit card companies are able to recognize fraudulent credit card transactions so that customers are not charged for items that they did not purchase.\n\nContent\n---------\n\nThe dataset contains transactions made by credit cards in September 2013 by European cardholders. \nThis dataset presents transactions that occurred in two days, where we have 492 frauds out of 284,807 transactions. The dataset is highly unbalanced, the positive class (frauds) account for 0.172% of all transactions.\n\nIt contains only numerical input variables which are the result of a PCA transformation. Unfortunately, due to confidentiality issues, we cannot provide the original features and more background information about the data. Features V1, V2, ... V28 are the principal components obtained with PCA, the only features which have not been transformed with PCA are 'Time' and 'Amount'. Feature 'Time' contains the seconds elapsed between each transaction and the first transaction in the dataset. The feature 'Amount' is the transaction Amount, this feature can be used for example-dependant cost-sensitive learning. Feature 'Class' is the response variable and it takes value 1 in case of fraud and 0 otherwise. \n\nGiven the class imbalance ratio, we recommend measuring the accuracy using the Area Under the Precision-Recall Curve (AUPRC). Confusion matrix accuracy is not meaningful for unbalanced classification.\n\nUpdate (03/05/2021)\n---------\n\nA simulator for transaction data has been released as part of the practical handbook on Machine Learning for Credit Card Fraud Detection - https://fraud-detection-handbook.github.io/fraud-detection-handbook/Chapter_3_GettingStarted/SimulatedDataset.html. We invite all practitioners interested in fraud detection datasets to also check out this data simulator, and the methodologies for credit card fraud detection presented in the book.\n\nAcknowledgements\n---------\n\nThe dataset has been collected and analysed during a research collaboration of Worldline and the Machine Learning Group (http://mlg.ulb.ac.be) of ULB (Universit\u00e9 Libre de Bruxelles) on big data mining and fraud detection.\nMore details on current and past projects on related topics are available on [https://www.researchgate.net/project/Fraud-detection-5][1] and the page of the [DefeatFraud][2] project\n\nPlease cite the following works: \n\nAndrea Dal Pozzolo, Olivier Caelen, Reid A. Johnson and Gianluca Bontempi. [Calibrating Probability with Undersampling for Unbalanced Classification.][3] In Symposium on Computational Intelligence and Data Mining (CIDM), IEEE, 2015\n\nDal Pozzolo, Andrea; Caelen, Olivier; Le Borgne, Yann-Ael; Waterschoot, Serge; Bontempi, Gianluca. [Learned lessons in credit card fraud detection from a practitioner perspective][4], Expert systems with applications,41,10,4915-4928,2014, Pergamon\n\nDal Pozzolo, Andrea; Boracchi, Giacomo; Caelen, Olivier; Alippi, Cesare; Bontempi, Gianluca. [Credit card fraud detection: a realistic modeling and a novel learning strategy,][5] IEEE transactions on neural networks and learning systems,29,8,3784-3797,2018,IEEE\n\nDal Pozzolo, Andrea [Adaptive Machine learning for credit card fraud detection][6] ULB MLG PhD thesis (supervised by G. Bontempi)\n\nCarcillo, Fabrizio; Dal Pozzolo, Andrea; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Mazzer, Yannis; Bontempi, Gianluca. [Scarff: a scalable framework for streaming credit card fraud detection with Spark][7], Information fusion,41, 182-194,2018,Elsevier\n\nCarcillo, Fabrizio; Le Borgne, Yann-A\u00ebl; Caelen, Olivier; Bontempi, Gianluca. [Streaming active learning strategies for real-life credit card fraud detection: assessment and visualization,][8] International Journal of Data Science and Analytics, 5,4,285-300,2018,Springer International Publishing\n\nBertrand Lebichot, Yann-A\u00ebl Le Borgne, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Deep-Learning Domain Adaptation Techniques for Credit Cards Fraud Detection](https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection), INNSBDDL 2019: Recent Advances in Big Data and Deep Learning, pp 78-88, 2019\n\nFabrizio Carcillo, Yann-A\u00ebl Le Borgne, Olivier Caelen, Frederic Obl\u00e9, Gianluca Bontempi [Combining Unsupervised and Supervised Learning in Credit Card Fraud Detection ](https://www.researchgate.net/publication/333143698_Combining_Unsupervised_and_Supervised_Learning_in_Credit_Card_Fraud_Detection) Information Sciences, 2019\n\nYann-A\u00ebl Le Borgne, Gianluca Bontempi [Reproducible machine Learning for Credit Card Fraud Detection - Practical Handbook ](https://www.researchgate.net/publication/351283764_Machine_Learning_for_Credit_Card_Fraud_Detection_-_Practical_Handbook) \n\nBertrand Lebichot, Gianmarco Paldino, Wissam Siblini, Liyun He, Frederic Obl\u00e9, Gianluca Bontempi [Incremental learning strategies for credit cards fraud detection](https://www.researchgate.net/publication/352275169_Incremental_learning_strategies_for_credit_cards_fraud_detection), IInternational Journal of Data Science and Analytics\n\n [1]: https://www.researchgate.net/project/Fraud-detection-5\n [2]: https://mlg.ulb.ac.be/wordpress/portfolio_page/defeatfraud-assessment-and-validation-of-deep-feature-engineering-and-learning-solutions-for-fraud-detection/\n [3]: https://www.researchgate.net/publication/283349138_Calibrating_Probability_with_Undersampling_for_Unbalanced_Classification\n [4]: https://www.researchgate.net/publication/260837261_Learned_lessons_in_credit_card_fraud_detection_from_a_practitioner_perspective\n [5]: https://www.researchgate.net/publication/319867396_Credit_Card_Fraud_Detection_A_Realistic_Modeling_and_a_Novel_Learning_Strategy\n [6]: http://di.ulb.ac.be/map/adalpozz/pdf/Dalpozzolo2015PhD.pdf\n [7]: https://www.researchgate.net/publication/319616537_SCARFF_a_Scalable_Framework_for_Streaming_Credit_Card_Fraud_Detection_with_Spark\n \n[8]: https://www.researchgate.net/publication/332180999_Deep-Learning_Domain_Adaptation_Techniques_for_Credit_Cards_Fraud_Detection", "VersionNotes": "Fixed preview", "TotalCompressedBytes": 150828752.0, "TotalUncompressedBytes": 69155632.0}]
|
[{"Id": 310, "CreatorUserId": 14069, "OwnerUserId": NaN, "OwnerOrganizationId": 1160.0, "CurrentDatasetVersionId": 23498.0, "CurrentDatasourceVersionId": 23502.0, "ForumId": 1838, "Type": 2, "CreationDate": "11/03/2016 13:21:36", "LastActivityDate": "02/06/2018", "TotalViews": 10310781, "TotalDownloads": 564249, "TotalVotes": 10432, "TotalKernels": 4266}]
| null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
import seaborn as sns
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
# ## Reading Data
path = "/kaggle/input/creditcardfraud/creditcard.csv"
data = pd.read_csv(path)
# ## Dataset Information
usage = data.memory_usage(index=False)
TO_MB = (1024) ** 2
print(
f"All Columns Has Same Size Of {round(usage[0]/TO_MB)} MB \n The Sum Of Total Memory Usage Is {round(sum(usage)/TO_MB)} MB"
)
data.head()
data.info()
data[["Time", "Amount", "Class"]].describe().T.style.background_gradient(
subset="mean", axis=0, cmap="bwr", vmin=0.5, vmax=1, low=0.4, high=0.8
).background_gradient(subset=["std", "50%"], axis=0, cmap="bwr").background_gradient(
subset="max", axis=0, cmap="Blues_r", low=0.2, high=0.9
)
fraud = len(data[data["Class"] == 1])
no_fraud = len(data[data["Class"] == 0])
print(f"{fraud} Of Frauds")
print(f"{no_fraud} No Frauds")
sns.countplot(data=data, x="Class")
plt.title("No Fraud Vs Fraud")
plt.show()
# ## EDA
fig, axs = plt.subplots(1, 3, figsize=(19, 4))
sns.histplot(data["Time"], ax=axs[0], kde=True)
axs[0].set_title("Time Distribution")
sns.histplot(data["Amount"], ax=axs[1], kde=True)
axs[1].set_title("Amount Distribution")
sns.histplot(data["Class"], ax=axs[2], kde=True)
axs[2].set_title("Class Distribution")
plt.tight_layout(pad=0.5)
plt.show()
# Take 492 row from the no fraud class Because The dataset is unbalanced
fraud_df = data.loc[data["Class"] == 0][:492]
no_fraud_df = data.loc[data["Class"] == 1]
new_norm_dist_df = pd.concat([fraud_df, no_fraud_df])
sns.countplot(data=new_norm_dist_df, x="Class")
plt.title("No Fraud VS Fraud Balanced")
plt.show()
sns.color_palette(palette="husl", n_colors=10)
plt.figure(figsize=(25, 8))
corr_data = new_norm_dist_df.corr()
sns.heatmap(data=corr_data)
plt.title("Corrolated Values")
plt.show()
#
# V3,V5,V17,V18 Are Negatively Corrolated
# V2,V4,V11,V19 Are Positively Corrolated
# Detecting Outliers
negative_corr_cols = ["V3", "V5", "V17", "V18"]
positive_corr_cols = ["V2", "V4", "V11", "V19"]
fig, axs = plt.subplots(nrows=2, ncols=4, figsize=(18, 7))
sns.boxplot(ax=axs[0, 0], data=new_norm_dist_df, x="Class", y=negative_corr_cols[0])
sns.boxplot(ax=axs[0, 1], data=new_norm_dist_df, x="Class", y=negative_corr_cols[1])
sns.boxplot(ax=axs[0, 2], data=new_norm_dist_df, x="Class", y=negative_corr_cols[2])
sns.boxplot(ax=axs[0, 3], data=new_norm_dist_df, x="Class", y=negative_corr_cols[3])
sns.boxplot(ax=axs[1, 0], data=new_norm_dist_df, x="Class", y=positive_corr_cols[0])
sns.boxplot(ax=axs[1, 1], data=new_norm_dist_df, x="Class", y=positive_corr_cols[1])
sns.boxplot(ax=axs[1, 2], data=new_norm_dist_df, x="Class", y=positive_corr_cols[2])
sns.boxplot(ax=axs[1, 3], data=new_norm_dist_df, x="Class", y=positive_corr_cols[3])
| false | 0 | 1,236 | 0 | 3,110 | 1,236 |
||
129696579
|
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os, json, math, librosa
import IPython.display as ipd
import librosa.display
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D
import sklearn.model_selection as sk
from sklearn.model_selection import train_test_split
# Klasör adından tür alma.
MUSIC = "/kaggle/input/deneme/Veri/Tür"
music_dataset = [] # Her vaw dosyası için dosya konumları
genre_target = []
for root, dirs, files in os.walk(MUSIC):
for name in files:
filename = os.path.join(root, name)
if filename != "/Veri/Tür/Arabesk/Arabesk1.wav":
music_dataset.append(filename)
genre_target.append(filename.split("/")[6])
print(set(genre_target))
# Ses Dosyalarını Test Etme
audio_path = music_dataset[150]
x, sr = librosa.load(audio_path)
librosa.load(audio_path, sr=None)
ipd.Audio(audio_path)
# Ses dosyasını bir dalga biçimi olarak görselleştirme
plt.figure(figsize=(16, 5))
librosa.display.waveshow(x, sr=sr)
# Ses dosyasını spektogram olarak görselleştirme
X = librosa.stft(x)
Xdb = librosa.amplitude_to_db(abs(X))
plt.figure(figsize=(14, 5))
librosa.display.specshow(Xdb, sr=sr, x_axis="time", y_axis="hz")
plt.title("Spectogram")
plt.colorbar()
# Sesi Mel-Spectogram Olarak Görselleştirme
file_location = audio_path
y, sr = librosa.load(file_location)
melSpec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128)
melSpec_dB = librosa.power_to_db(melSpec, ref=np.max)
plt.figure(figsize=(10, 5))
librosa.display.specshow(melSpec_dB, x_axis="time", y_axis="mel", sr=sr, fmax=8000)
plt.colorbar(format="%+1.0f dB")
plt.title("MelSpectrogram")
plt.tight_layout()
plt.show()
DATASET_PATH = "/kaggle/input/deneme/Veri/Tür"
JSON_PATH = "data_10.json"
SAMPLE_RATE = 22050
TRACK_DURATION = 30 # Saniye cinsinden ölçülür.
SAMPLES_PER_TRACK = SAMPLE_RATE * TRACK_DURATION
def save_mfcc(
dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5
):
"""Extracts MFCCs from music dataset and saves them into a json file along witgh genre labels.
:param dataset_path (str): Path to dataset
:param json_path (str): Path to json file used to save MFCCs
:param num_mfcc (int): Number of coefficients to extract
:param n_fft (int): Interval we consider to apply FFT. Measured in # of samples
:param hop_length (int): Sliding window for FFT. Measured in # of samples
:param: num_segments (int): Number of segments we want to divide sample tracks into
:return:
"""
# Store mapping(eşleşmeler), labels(etiketler) ve
# MFCC ler için bir sözlük oluşturulur.
data = {"mapping": [], "labels": [], "mfcc": []}
samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
semantic_label = dirpath.split("/")[-1]
data["mapping"].append(semantic_label)
print("\nProcessing: {}".format(semantic_label))
for f in filenames:
# Ses dosyaları yüklenir.
file_path = os.path.join(dirpath, f)
if file_path != "/kaggle/input/deneme/Veri/Tür/Arabesk/Arabesk1.wav":
signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)
for d in range(num_segments):
start = samples_per_segment * d
finish = start + samples_per_segment
mfcc = librosa.feature.mfcc(
signal[start:finish],
sample_rate,
n_mfcc=num_mfcc,
n_fft=n_fft,
hop_length=hop_length,
)
mfcc = mfcc.T
if len(mfcc) == num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i - 1)
print("{}, segment:{}".format(file_path, d + 1))
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
# Veri Ön İşleme
save_mfcc(DATASET_PATH, JSON_PATH, num_segments=6)
DATA_PATH = "./data_10.json"
def load_data(data_path):
"""Loads training dataset from json file.
:param data_path (str): Path to json file containing data
:return X (ndarray): Inputs
:return y (ndarray): Targets
"""
with open(data_path, "r") as fp:
data = json.load(fp)
X = np.array(data["mfcc"])
y = np.array(data["labels"])
z = np.array(data["mapping"])
return X, y, z
def plot_history(history):
"""Plots accuracy/loss for training/validation set as a function of the epochs
:param history: Training history of model
:return:
"""
fig, axs = plt.subplots(2)
# Doğruluk Tablosu
axs[0].plot(history.history["accuracy"], label="train accuracy")
axs[0].plot(history.history["val_accuracy"], label="test accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].legend(loc="lower right")
axs[0].set_title("Accuracy eval")
# Hata Tablosu
axs[1].plot(history.history["loss"], label="train error")
axs[1].plot(history.history["val_loss"], label="test error")
axs[1].set_ylabel("Error")
axs[1].set_xlabel("Epoch")
axs[1].legend(loc="upper right")
axs[1].set_title("Error eval")
plt.show()
def prepare_datasets(test_size, validation_size):
"""Loads data and splits it into train, validation and test sets.
:param test_size (float): Value in [0, 1] indicating percentage of data set to allocate to test split
:param validation_size (float): Value in [0, 1] indicating percentage of train set to allocate to validation split
:return X_train (ndarray): Input training set
:return X_validation (ndarray): Input validation set
:return X_test (ndarray): Input test set
:return y_train (ndarray): Target training set
:return y_validation (ndarray): Target validation set
:return y_test (ndarray): Target test set
:return z : Mappings for data
"""
X, y, z = load_data(DATA_PATH)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
X_train, X_validation, y_train, y_validation = train_test_split(
X_train, y_train, test_size=validation_size
)
X_train = X_train[..., np.newaxis]
X_validation = X_validation[..., np.newaxis]
X_test = X_test[..., np.newaxis]
return X_train, X_validation, X_test, y_train, y_validation, y_test, z
def build_model(input_shape):
"""Generates CNN model
:param input_shape (tuple): Shape of input set
:return model: CNN model
"""
model = keras.Sequential()
# 1. Katman
model.add(
keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape)
)
model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# 2. Katman
model.add(keras.layers.Conv2D(32, (3, 3), activation="relu"))
model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# 3. Katman
model.add(keras.layers.Conv2D(32, (2, 2), activation="relu"))
model.add(keras.layers.MaxPooling2D((2, 2), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# Özniteliklerin tek boyuta indirgenmesi için kullanılan düzleştirme katmanı
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation="relu"))
model.add(keras.layers.Dropout(0.3))
# Çıktı Katmanı
model.add(keras.layers.Dense(10, activation="softmax"))
return model
def predict(model, X, y):
"""Predict a single sample using the trained model
:param model: Trained classifier
:param X: Input data
:param y (int): Target
"""
X = X[np.newaxis, ...] # array shape (1, 130, 13, 1)
prediction = model.predict(X)
predicted_index = np.argmax(prediction, axis=1)
target = z[y]
predicted = z[predicted_index]
print("Target: {}, Predicted label: {}".format(target, predicted))
X_train, X_validation, X_test, y_train, y_validation, y_test, z = prepare_datasets(
0.25, 0.2
)
input_shape = (X_train.shape[1], X_train.shape[2], 1)
model = build_model(input_shape)
optimiser = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimiser, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.summary()
history = model.fit(
X_train,
y_train,
validation_data=(X_validation, y_validation),
batch_size=32,
epochs=300,
)
plot_history(history)
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
print("\nTest accuracy:", test_acc)
# Ses Dosyalarını Test Etme
# Veri setinden tahmin etmek için bir örnek seçilir.
X_to_predict = X_test[1]
y_to_predict = y_test[1]
predict(model, X_to_predict, y_to_predict)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/696/129696579.ipynb
| null | null |
[{"Id": 129696579, "ScriptId": 33072183, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12466932, "CreationDate": "05/15/2023 20:31:47", "VersionNumber": 1.0, "Title": "cnn_deneme", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 283.0, "LinesInsertedFromPrevious": 283.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import os, json, math, librosa
import IPython.display as ipd
import librosa.display
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import Sequential
from tensorflow.keras.layers import Conv2D
import sklearn.model_selection as sk
from sklearn.model_selection import train_test_split
# Klasör adından tür alma.
MUSIC = "/kaggle/input/deneme/Veri/Tür"
music_dataset = [] # Her vaw dosyası için dosya konumları
genre_target = []
for root, dirs, files in os.walk(MUSIC):
for name in files:
filename = os.path.join(root, name)
if filename != "/Veri/Tür/Arabesk/Arabesk1.wav":
music_dataset.append(filename)
genre_target.append(filename.split("/")[6])
print(set(genre_target))
# Ses Dosyalarını Test Etme
audio_path = music_dataset[150]
x, sr = librosa.load(audio_path)
librosa.load(audio_path, sr=None)
ipd.Audio(audio_path)
# Ses dosyasını bir dalga biçimi olarak görselleştirme
plt.figure(figsize=(16, 5))
librosa.display.waveshow(x, sr=sr)
# Ses dosyasını spektogram olarak görselleştirme
X = librosa.stft(x)
Xdb = librosa.amplitude_to_db(abs(X))
plt.figure(figsize=(14, 5))
librosa.display.specshow(Xdb, sr=sr, x_axis="time", y_axis="hz")
plt.title("Spectogram")
plt.colorbar()
# Sesi Mel-Spectogram Olarak Görselleştirme
file_location = audio_path
y, sr = librosa.load(file_location)
melSpec = librosa.feature.melspectrogram(y=y, sr=sr, n_mels=128)
melSpec_dB = librosa.power_to_db(melSpec, ref=np.max)
plt.figure(figsize=(10, 5))
librosa.display.specshow(melSpec_dB, x_axis="time", y_axis="mel", sr=sr, fmax=8000)
plt.colorbar(format="%+1.0f dB")
plt.title("MelSpectrogram")
plt.tight_layout()
plt.show()
DATASET_PATH = "/kaggle/input/deneme/Veri/Tür"
JSON_PATH = "data_10.json"
SAMPLE_RATE = 22050
TRACK_DURATION = 30 # Saniye cinsinden ölçülür.
SAMPLES_PER_TRACK = SAMPLE_RATE * TRACK_DURATION
def save_mfcc(
dataset_path, json_path, num_mfcc=13, n_fft=2048, hop_length=512, num_segments=5
):
"""Extracts MFCCs from music dataset and saves them into a json file along witgh genre labels.
:param dataset_path (str): Path to dataset
:param json_path (str): Path to json file used to save MFCCs
:param num_mfcc (int): Number of coefficients to extract
:param n_fft (int): Interval we consider to apply FFT. Measured in # of samples
:param hop_length (int): Sliding window for FFT. Measured in # of samples
:param: num_segments (int): Number of segments we want to divide sample tracks into
:return:
"""
# Store mapping(eşleşmeler), labels(etiketler) ve
# MFCC ler için bir sözlük oluşturulur.
data = {"mapping": [], "labels": [], "mfcc": []}
samples_per_segment = int(SAMPLES_PER_TRACK / num_segments)
num_mfcc_vectors_per_segment = math.ceil(samples_per_segment / hop_length)
for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dataset_path)):
if dirpath is not dataset_path:
semantic_label = dirpath.split("/")[-1]
data["mapping"].append(semantic_label)
print("\nProcessing: {}".format(semantic_label))
for f in filenames:
# Ses dosyaları yüklenir.
file_path = os.path.join(dirpath, f)
if file_path != "/kaggle/input/deneme/Veri/Tür/Arabesk/Arabesk1.wav":
signal, sample_rate = librosa.load(file_path, sr=SAMPLE_RATE)
for d in range(num_segments):
start = samples_per_segment * d
finish = start + samples_per_segment
mfcc = librosa.feature.mfcc(
signal[start:finish],
sample_rate,
n_mfcc=num_mfcc,
n_fft=n_fft,
hop_length=hop_length,
)
mfcc = mfcc.T
if len(mfcc) == num_mfcc_vectors_per_segment:
data["mfcc"].append(mfcc.tolist())
data["labels"].append(i - 1)
print("{}, segment:{}".format(file_path, d + 1))
with open(json_path, "w") as fp:
json.dump(data, fp, indent=4)
# Veri Ön İşleme
save_mfcc(DATASET_PATH, JSON_PATH, num_segments=6)
DATA_PATH = "./data_10.json"
def load_data(data_path):
"""Loads training dataset from json file.
:param data_path (str): Path to json file containing data
:return X (ndarray): Inputs
:return y (ndarray): Targets
"""
with open(data_path, "r") as fp:
data = json.load(fp)
X = np.array(data["mfcc"])
y = np.array(data["labels"])
z = np.array(data["mapping"])
return X, y, z
def plot_history(history):
"""Plots accuracy/loss for training/validation set as a function of the epochs
:param history: Training history of model
:return:
"""
fig, axs = plt.subplots(2)
# Doğruluk Tablosu
axs[0].plot(history.history["accuracy"], label="train accuracy")
axs[0].plot(history.history["val_accuracy"], label="test accuracy")
axs[0].set_ylabel("Accuracy")
axs[0].legend(loc="lower right")
axs[0].set_title("Accuracy eval")
# Hata Tablosu
axs[1].plot(history.history["loss"], label="train error")
axs[1].plot(history.history["val_loss"], label="test error")
axs[1].set_ylabel("Error")
axs[1].set_xlabel("Epoch")
axs[1].legend(loc="upper right")
axs[1].set_title("Error eval")
plt.show()
def prepare_datasets(test_size, validation_size):
"""Loads data and splits it into train, validation and test sets.
:param test_size (float): Value in [0, 1] indicating percentage of data set to allocate to test split
:param validation_size (float): Value in [0, 1] indicating percentage of train set to allocate to validation split
:return X_train (ndarray): Input training set
:return X_validation (ndarray): Input validation set
:return X_test (ndarray): Input test set
:return y_train (ndarray): Target training set
:return y_validation (ndarray): Target validation set
:return y_test (ndarray): Target test set
:return z : Mappings for data
"""
X, y, z = load_data(DATA_PATH)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size)
X_train, X_validation, y_train, y_validation = train_test_split(
X_train, y_train, test_size=validation_size
)
X_train = X_train[..., np.newaxis]
X_validation = X_validation[..., np.newaxis]
X_test = X_test[..., np.newaxis]
return X_train, X_validation, X_test, y_train, y_validation, y_test, z
def build_model(input_shape):
"""Generates CNN model
:param input_shape (tuple): Shape of input set
:return model: CNN model
"""
model = keras.Sequential()
# 1. Katman
model.add(
keras.layers.Conv2D(32, (3, 3), activation="relu", input_shape=input_shape)
)
model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# 2. Katman
model.add(keras.layers.Conv2D(32, (3, 3), activation="relu"))
model.add(keras.layers.MaxPooling2D((3, 3), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# 3. Katman
model.add(keras.layers.Conv2D(32, (2, 2), activation="relu"))
model.add(keras.layers.MaxPooling2D((2, 2), strides=(2, 2), padding="same"))
model.add(keras.layers.BatchNormalization())
# Özniteliklerin tek boyuta indirgenmesi için kullanılan düzleştirme katmanı
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64, activation="relu"))
model.add(keras.layers.Dropout(0.3))
# Çıktı Katmanı
model.add(keras.layers.Dense(10, activation="softmax"))
return model
def predict(model, X, y):
"""Predict a single sample using the trained model
:param model: Trained classifier
:param X: Input data
:param y (int): Target
"""
X = X[np.newaxis, ...] # array shape (1, 130, 13, 1)
prediction = model.predict(X)
predicted_index = np.argmax(prediction, axis=1)
target = z[y]
predicted = z[predicted_index]
print("Target: {}, Predicted label: {}".format(target, predicted))
X_train, X_validation, X_test, y_train, y_validation, y_test, z = prepare_datasets(
0.25, 0.2
)
input_shape = (X_train.shape[1], X_train.shape[2], 1)
model = build_model(input_shape)
optimiser = keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimiser, loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)
model.summary()
history = model.fit(
X_train,
y_train,
validation_data=(X_validation, y_validation),
batch_size=32,
epochs=300,
)
plot_history(history)
test_loss, test_acc = model.evaluate(X_test, y_test, verbose=2)
print("\nTest accuracy:", test_acc)
# Ses Dosyalarını Test Etme
# Veri setinden tahmin etmek için bir örnek seçilir.
X_to_predict = X_test[1]
y_to_predict = y_test[1]
predict(model, X_to_predict, y_to_predict)
| false | 0 | 2,905 | 0 | 2,905 | 2,905 |
||
129696958
|
# Import libraries and modules.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols
# Load the data.
data = pd.read_csv("/kaggle/input/marketing-sales-data/marketing_sales_data.csv")
# Display the first five rows.
data.head()
# Create a pairplot of the data.
sns.pairplot(data)
# Calculate the mean sales for each TV category.
print(data.groupby("TV")["Sales"].mean())
print("")
# Calculate the mean sales for each Influencer category .
print(data.groupby("Influencer")["Sales"].mean())
# Drop rows that contain missing data and update the DataFrame.
data = data.dropna(axis=0)
# Rename all columns in data that contain a space.
data = data.rename(columns={"Social Media": "Social_Media"})
# Define the OLS formula.
ols_formula = "Sales ~ C(TV) + Radio"
# Create an OLS model.
OLS = ols(formula=ols_formula, data=data)
# Fit the model.
model = OLS.fit()
# Save the results summary.
model_results = model.summary()
# Display the model results.
model_results
# Create a scatterplot for each independent variable and the dependent variable.
# Create a 1x2 plot figure.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# Create a scatterplot between Radio and Sales.
sns.scatterplot(x=data["Radio"], y=data["Sales"], ax=axes[0])
# Set the title of the first plot.
axes[0].set_title("Radio and Sales")
# Create a scatterplot between Social Media and Sales.
sns.scatterplot(x=data["Social_Media"], y=data["Sales"], ax=axes[1])
# Set the title of the second plot.
axes[1].set_title("Social Media and Sales")
# Set the xlabel of the second plot.
axes[1].set_xlabel("Social Media")
# Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance.
plt.tight_layout()
# Calculate the residuals.
residuals = model.resid
# Create a 1x2 plot figure.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# Create a histogram with the residuals.
sns.histplot(residuals, ax=axes[0])
# Set the x label of the residual plot.
axes[0].set_xlabel("Residual Value")
# Set the title of the residual plot.
axes[0].set_title("Histogram of Residuals")
# Create a Q-Q plot of the residuals.
sm.qqplot(residuals, line="s", ax=axes[1])
# Set the title of the Q-Q plot.
axes[1].set_title("Normal QQ Plot")
# Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance.
plt.tight_layout()
# Show the plot.
plt.show()
# Create a scatterplot with the fitted values from the model and the residuals.
fig = sns.scatterplot(x=model.fittedvalues, y=model.resid)
# Set the x axis label.
fig.set_xlabel("Fitted Values")
# Set the y axis label.
fig.set_ylabel("Residuals")
# Set the title.
fig.set_title("Fitted Values v. Residuals")
# Add a line at y = 0 to visualize the variance of residuals above and below 0.
fig.axhline(0)
# Show the plot.
plt.show()
# Create a pairplot of the data.
sns.pairplot(data)
# Calculate the variance inflation factor (optional).
# Import variance_inflation_factor from statsmodels.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a subset of the data with the continous independent variables.
X = data[["Radio", "Social_Media"]]
# Calculate the variance inflation factor for each variable.
vif = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
# Create a DataFrame with the VIF results for the column names in X.
df_vif = pd.DataFrame(vif, index=X.columns, columns=["VIF"])
# Display the VIF results.
df_vif
# Display the model results summary.
model_results
# Display the model results summary.
model_results
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/696/129696958.ipynb
| null | null |
[{"Id": 129696958, "ScriptId": 38569257, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9485427, "CreationDate": "05/15/2023 20:36:13", "VersionNumber": 1.0, "Title": "Multiple Linear Regression in Python", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 136.0, "LinesInsertedFromPrevious": 136.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Import libraries and modules.
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import statsmodels.api as sm
from statsmodels.formula.api import ols
# Load the data.
data = pd.read_csv("/kaggle/input/marketing-sales-data/marketing_sales_data.csv")
# Display the first five rows.
data.head()
# Create a pairplot of the data.
sns.pairplot(data)
# Calculate the mean sales for each TV category.
print(data.groupby("TV")["Sales"].mean())
print("")
# Calculate the mean sales for each Influencer category .
print(data.groupby("Influencer")["Sales"].mean())
# Drop rows that contain missing data and update the DataFrame.
data = data.dropna(axis=0)
# Rename all columns in data that contain a space.
data = data.rename(columns={"Social Media": "Social_Media"})
# Define the OLS formula.
ols_formula = "Sales ~ C(TV) + Radio"
# Create an OLS model.
OLS = ols(formula=ols_formula, data=data)
# Fit the model.
model = OLS.fit()
# Save the results summary.
model_results = model.summary()
# Display the model results.
model_results
# Create a scatterplot for each independent variable and the dependent variable.
# Create a 1x2 plot figure.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# Create a scatterplot between Radio and Sales.
sns.scatterplot(x=data["Radio"], y=data["Sales"], ax=axes[0])
# Set the title of the first plot.
axes[0].set_title("Radio and Sales")
# Create a scatterplot between Social Media and Sales.
sns.scatterplot(x=data["Social_Media"], y=data["Sales"], ax=axes[1])
# Set the title of the second plot.
axes[1].set_title("Social Media and Sales")
# Set the xlabel of the second plot.
axes[1].set_xlabel("Social Media")
# Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance.
plt.tight_layout()
# Calculate the residuals.
residuals = model.resid
# Create a 1x2 plot figure.
fig, axes = plt.subplots(1, 2, figsize=(8, 4))
# Create a histogram with the residuals.
sns.histplot(residuals, ax=axes[0])
# Set the x label of the residual plot.
axes[0].set_xlabel("Residual Value")
# Set the title of the residual plot.
axes[0].set_title("Histogram of Residuals")
# Create a Q-Q plot of the residuals.
sm.qqplot(residuals, line="s", ax=axes[1])
# Set the title of the Q-Q plot.
axes[1].set_title("Normal QQ Plot")
# Use matplotlib's tight_layout() function to add space between plots for a cleaner appearance.
plt.tight_layout()
# Show the plot.
plt.show()
# Create a scatterplot with the fitted values from the model and the residuals.
fig = sns.scatterplot(x=model.fittedvalues, y=model.resid)
# Set the x axis label.
fig.set_xlabel("Fitted Values")
# Set the y axis label.
fig.set_ylabel("Residuals")
# Set the title.
fig.set_title("Fitted Values v. Residuals")
# Add a line at y = 0 to visualize the variance of residuals above and below 0.
fig.axhline(0)
# Show the plot.
plt.show()
# Create a pairplot of the data.
sns.pairplot(data)
# Calculate the variance inflation factor (optional).
# Import variance_inflation_factor from statsmodels.
from statsmodels.stats.outliers_influence import variance_inflation_factor
# Create a subset of the data with the continous independent variables.
X = data[["Radio", "Social_Media"]]
# Calculate the variance inflation factor for each variable.
vif = [variance_inflation_factor(X.values, i) for i in range(X.shape[1])]
# Create a DataFrame with the VIF results for the column names in X.
df_vif = pd.DataFrame(vif, index=X.columns, columns=["VIF"])
# Display the VIF results.
df_vif
# Display the model results summary.
model_results
# Display the model results summary.
model_results
| false | 0 | 1,097 | 0 | 1,097 | 1,097 |
||
129775735
|
<jupyter_start><jupyter_text>Huggingface BERT Variants
This Dataset contains various variants of BERT from huggingface (Updated Monthly with the latest version from huggingface)
List of Included Datasets:
* **`bert-base-cased`**
* **`bert-base-uncased`**
* **`bert-large-cased`**
* **`bert-large-uncased`**
* **`distilbert-base-cased`**
* **`distilbert-base-uncased`**
* **`distilbert-base-multilingual-cased`**
* **`distilbert-base-cased-distilled-squad`**
* **`distilbert-base-uncased-distilled-squad`**
Kaggle dataset identifier: huggingface-bert-variants
<jupyter_script>import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.simplefilter("ignore")
# general purpose packages
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
# data processing
import re, string
from wordcloud import WordCloud
from collections import Counter
import emoji
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# keras
import tensorflow as tf
from tensorflow import keras
# metrics
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import classification_report, confusion_matrix
# set seed for reproducibility
seed = 42
# # FAKE NEWS CLASSIFICATION
# # **Loading Data**
df_real = pd.read_csv("/kaggle/input/fake-news-football/real.csv")
df_fake = pd.read_csv("/kaggle/input/fake-news-football/fake.csv")
df_real["label"] = 0
df_fake["label"] = 1
df = pd.concat([df_real, df_fake], ignore_index=True)
df["label"].value_counts()
# # **Tweets Deep Cleaning**
##CUSTOM DEFINED FUNCTIONS TO CLEAN THE TWEETS
# Clean emojis from text
def strip_emoji(text):
return emoji.demojize(text, language="en") # remove emoji
# Remove punctuations, links, mentions and \r\n new line characters
def strip_all_entities(text):
text = (
text.replace("\r", "").replace("\n", " ").replace("\n", " ").lower()
) # remove \n and \r and lowercase
text = re.sub(r"(@|https?\:\/\/)\S+", "", text) # remove links and mentions
text = re.sub(
r"[^\x00-\x7f]", r"", text
) # remove non utf8/ascii characters such as '\x9a\x91\x97\x9a\x97'
banned_list = string.punctuation + "Ã" + "±" + "ã" + "¼" + "â" + "»" + "§"
table = str.maketrans("", "", banned_list)
text = text.translate(table)
return text
# clean hashtags at the end of the sentence, and keep those in the middle of the sentence by removing just the # symbol
def clean_hashtags(tweet):
new_tweet = " ".join(
word.strip()
for word in re.split("#(?!(?:hashtag)\b)[\w-]+(?=(?:\s+#[\w-]+)*\s*$)", tweet)
) # remove last hashtags
new_tweet2 = " ".join(
word.strip() for word in re.split("#|_", new_tweet)
) # remove hashtags symbol from words in the middle of the sentence
return new_tweet2
# Filter special characters such as & and $ present in some words
def filter_chars(a):
sent = []
for word in a.split(" "):
if ("$" in word) | ("&" in word):
sent.append("")
else:
sent.append(word)
return " ".join(sent)
def remove_mult_spaces(text): # remove multiple spaces
return re.sub("\s\s+", " ", text)
df["tweet"] = df["tweet"].astype("str")
df["tweet"] = df["tweet"].apply(
lambda t: remove_mult_spaces(
filter_chars(clean_hashtags(strip_all_entities(strip_emoji(t))))
)
)
# # ****
# # EDA & Visualization
# # **Kdeplot by Total Words**
plt.figure(figsize=(6, 4))
sns.kdeplot(
x=df["tweet"].apply(lambda x: len(x.split())),
hue=df["label"],
palette="winter",
fill=True,
)
plt.xlabel("Word Count")
plt.show()
# ### WordCloud
def wordcloud_text(text):
plt.figure(figsize=(10, 8))
wordcloud = WordCloud(
max_words=500,
height=800,
width=1500,
background_color="black",
colormap="viridis",
).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
real_text = " ".join(df[df["label"] == 0]["tweet"])
fake_text = " ".join(df[df["label"] == 0]["tweet"])
wordcloud_text(real_text)
wordcloud_text(fake_text)
df["tweet_len"] = df["tweet"].apply(lambda x: len(x.split()))
plt.figure(figsize=(7, 5))
ax = sns.countplot(x="tweet_len", data=df[df["tweet_len"] < 10], palette="mako")
plt.title("Tweets with less than 10 words")
plt.yticks([])
ax.bar_label(ax.containers[0])
plt.ylabel("count")
plt.xlabel("")
plt.show()
# **We will drop these empty tweets and also those with less than 5 words.**
df = df[df["tweet_len"] > 4]
df[df["tweet_len"] > 100]
df_texts, df_labels = df["tweet"].to_list(), df["label"].to_list()
# # **Splitting train dataset into training and validation dataset**
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(
df_texts, df_labels, test_size=0.2
)
# # **Using the DistilBert tokenizer**
from transformers import (
BertTokenizer,
TFBertModel,
BertForSequenceClassification,
DistilBertTokenizerFast,
)
# model_name = '/kaggle/input/huggingface-bert-variants/bert-base-cased/bert-base-cased'
model_name = "/kaggle/input/huggingface-bert-variants/distilbert-base-uncased/distilbert-base-uncased"
tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
# # **Dataset object function**
# **PyTorch Dataset is a crucial step in preparing your data for training a model. It allows you to organize your data in a format that can be easily utilized by PyTorch's DataLoader/Trainer, which efficiently handles batching, shuffling, and parallel data loading during training.**
import torch
class SentimentDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = SentimentDataset(train_encodings, train_labels)
val_dataset = SentimentDataset(val_encodings, val_labels)
# # **Defining the Metrics for evaluation**
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds, average="binary"
)
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# # **Fine-tuning with Huggingface Trainer**
from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments
import os
os.environ["WANDB_DISABLED"] = "true"
training_args = TrainingArguments(
output_dir="./results", # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir="./logs", # directory for storing logs
logging_steps=10,
)
model = DistilBertForSequenceClassification.from_pretrained(model_name)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
compute_metrics=compute_metrics,
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset, # evaluation dataset
)
trainer.train()
trainer.evaluate()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/775/129775735.ipynb
|
huggingface-bert-variants
|
sauravmaheshkar
|
[{"Id": 129775735, "ScriptId": 38595264, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9578158, "CreationDate": "05/16/2023 11:41:19", "VersionNumber": 1.0, "Title": "DistilBert Classification", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 234.0, "LinesInsertedFromPrevious": 234.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 29}]
|
[{"Id": 186138398, "KernelVersionId": 129775735, "SourceDatasetVersionId": 2233309}, {"Id": 186138399, "KernelVersionId": 129775735, "SourceDatasetVersionId": 5289554}]
|
[{"Id": 2233309, "DatasetId": 1335671, "DatasourceVersionId": 2275114, "CreatorUserId": 4382914, "LicenseName": "CC0: Public Domain", "CreationDate": "05/15/2021 07:32:05", "VersionNumber": 9.0, "Title": "Huggingface BERT Variants", "Slug": "huggingface-bert-variants", "Subtitle": "Various Variants of BERT from Huggingface", "Description": "This Dataset contains various variants of BERT from huggingface (Updated Monthly with the latest version from huggingface)\n\nList of Included Datasets:\n\n* **`bert-base-cased`**\n* **`bert-base-uncased`**\n* **`bert-large-cased`**\n* **`bert-large-uncased`**\n* **`distilbert-base-cased`**\n* **`distilbert-base-uncased`**\n* **`distilbert-base-multilingual-cased`**\n* **`distilbert-base-cased-distilled-squad`**\n* **`distilbert-base-uncased-distilled-squad`**", "VersionNotes": "distilbert-base-multilingual-cased", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1335671, "CreatorUserId": 4382914, "OwnerUserId": 4382914.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2233309.0, "CurrentDatasourceVersionId": 2275114.0, "ForumId": 1354616, "Type": 2, "CreationDate": "05/12/2021 07:07:01", "LastActivityDate": "05/12/2021", "TotalViews": 11616, "TotalDownloads": 1492, "TotalVotes": 72, "TotalKernels": 119}]
|
[{"Id": 4382914, "UserName": "sauravmaheshkar", "DisplayName": "Saurav Maheshkar \u2615\ufe0f", "RegisterDate": "01/25/2020", "PerformanceTier": 2}]
|
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.simplefilter("ignore")
# general purpose packages
import numpy as np
import pandas as pd
import tensorflow as tf
import matplotlib.pyplot as plt
import seaborn as sns
# data processing
import re, string
from wordcloud import WordCloud
from collections import Counter
import emoji
import nltk
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from sklearn import preprocessing
from sklearn.model_selection import train_test_split
# keras
import tensorflow as tf
from tensorflow import keras
# metrics
from sklearn.metrics import accuracy_score, f1_score
from sklearn.metrics import classification_report, confusion_matrix
# set seed for reproducibility
seed = 42
# # FAKE NEWS CLASSIFICATION
# # **Loading Data**
df_real = pd.read_csv("/kaggle/input/fake-news-football/real.csv")
df_fake = pd.read_csv("/kaggle/input/fake-news-football/fake.csv")
df_real["label"] = 0
df_fake["label"] = 1
df = pd.concat([df_real, df_fake], ignore_index=True)
df["label"].value_counts()
# # **Tweets Deep Cleaning**
##CUSTOM DEFINED FUNCTIONS TO CLEAN THE TWEETS
# Clean emojis from text
def strip_emoji(text):
return emoji.demojize(text, language="en") # remove emoji
# Remove punctuations, links, mentions and \r\n new line characters
def strip_all_entities(text):
text = (
text.replace("\r", "").replace("\n", " ").replace("\n", " ").lower()
) # remove \n and \r and lowercase
text = re.sub(r"(@|https?\:\/\/)\S+", "", text) # remove links and mentions
text = re.sub(
r"[^\x00-\x7f]", r"", text
) # remove non utf8/ascii characters such as '\x9a\x91\x97\x9a\x97'
banned_list = string.punctuation + "Ã" + "±" + "ã" + "¼" + "â" + "»" + "§"
table = str.maketrans("", "", banned_list)
text = text.translate(table)
return text
# clean hashtags at the end of the sentence, and keep those in the middle of the sentence by removing just the # symbol
def clean_hashtags(tweet):
new_tweet = " ".join(
word.strip()
for word in re.split("#(?!(?:hashtag)\b)[\w-]+(?=(?:\s+#[\w-]+)*\s*$)", tweet)
) # remove last hashtags
new_tweet2 = " ".join(
word.strip() for word in re.split("#|_", new_tweet)
) # remove hashtags symbol from words in the middle of the sentence
return new_tweet2
# Filter special characters such as & and $ present in some words
def filter_chars(a):
sent = []
for word in a.split(" "):
if ("$" in word) | ("&" in word):
sent.append("")
else:
sent.append(word)
return " ".join(sent)
def remove_mult_spaces(text): # remove multiple spaces
return re.sub("\s\s+", " ", text)
df["tweet"] = df["tweet"].astype("str")
df["tweet"] = df["tweet"].apply(
lambda t: remove_mult_spaces(
filter_chars(clean_hashtags(strip_all_entities(strip_emoji(t))))
)
)
# # ****
# # EDA & Visualization
# # **Kdeplot by Total Words**
plt.figure(figsize=(6, 4))
sns.kdeplot(
x=df["tweet"].apply(lambda x: len(x.split())),
hue=df["label"],
palette="winter",
fill=True,
)
plt.xlabel("Word Count")
plt.show()
# ### WordCloud
def wordcloud_text(text):
plt.figure(figsize=(10, 8))
wordcloud = WordCloud(
max_words=500,
height=800,
width=1500,
background_color="black",
colormap="viridis",
).generate(text)
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.show()
real_text = " ".join(df[df["label"] == 0]["tweet"])
fake_text = " ".join(df[df["label"] == 0]["tweet"])
wordcloud_text(real_text)
wordcloud_text(fake_text)
df["tweet_len"] = df["tweet"].apply(lambda x: len(x.split()))
plt.figure(figsize=(7, 5))
ax = sns.countplot(x="tweet_len", data=df[df["tweet_len"] < 10], palette="mako")
plt.title("Tweets with less than 10 words")
plt.yticks([])
ax.bar_label(ax.containers[0])
plt.ylabel("count")
plt.xlabel("")
plt.show()
# **We will drop these empty tweets and also those with less than 5 words.**
df = df[df["tweet_len"] > 4]
df[df["tweet_len"] > 100]
df_texts, df_labels = df["tweet"].to_list(), df["label"].to_list()
# # **Splitting train dataset into training and validation dataset**
from sklearn.model_selection import train_test_split
train_texts, val_texts, train_labels, val_labels = train_test_split(
df_texts, df_labels, test_size=0.2
)
# # **Using the DistilBert tokenizer**
from transformers import (
BertTokenizer,
TFBertModel,
BertForSequenceClassification,
DistilBertTokenizerFast,
)
# model_name = '/kaggle/input/huggingface-bert-variants/bert-base-cased/bert-base-cased'
model_name = "/kaggle/input/huggingface-bert-variants/distilbert-base-uncased/distilbert-base-uncased"
tokenizer = DistilBertTokenizerFast.from_pretrained(model_name)
train_encodings = tokenizer(train_texts, truncation=True, padding=True)
val_encodings = tokenizer(val_texts, truncation=True, padding=True)
# # **Dataset object function**
# **PyTorch Dataset is a crucial step in preparing your data for training a model. It allows you to organize your data in a format that can be easily utilized by PyTorch's DataLoader/Trainer, which efficiently handles batching, shuffling, and parallel data loading during training.**
import torch
class SentimentDataset(torch.utils.data.Dataset):
def __init__(self, encodings, labels):
self.encodings = encodings
self.labels = labels
def __getitem__(self, idx):
item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}
item["labels"] = torch.tensor(self.labels[idx])
return item
def __len__(self):
return len(self.labels)
train_dataset = SentimentDataset(train_encodings, train_labels)
val_dataset = SentimentDataset(val_encodings, val_labels)
# # **Defining the Metrics for evaluation**
from sklearn.metrics import accuracy_score, precision_recall_fscore_support
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(
labels, preds, average="binary"
)
acc = accuracy_score(labels, preds)
return {"accuracy": acc, "f1": f1, "precision": precision, "recall": recall}
# # **Fine-tuning with Huggingface Trainer**
from transformers import DistilBertForSequenceClassification, Trainer, TrainingArguments
import os
os.environ["WANDB_DISABLED"] = "true"
training_args = TrainingArguments(
output_dir="./results", # output directory
num_train_epochs=3, # total number of training epochs
per_device_train_batch_size=16, # batch size per device during training
per_device_eval_batch_size=64, # batch size for evaluation
warmup_steps=500, # number of warmup steps for learning rate scheduler
weight_decay=0.01, # strength of weight decay
logging_dir="./logs", # directory for storing logs
logging_steps=10,
)
model = DistilBertForSequenceClassification.from_pretrained(model_name)
trainer = Trainer(
model=model, # the instantiated 🤗 Transformers model to be trained
args=training_args, # training arguments, defined above
compute_metrics=compute_metrics,
train_dataset=train_dataset, # training dataset
eval_dataset=val_dataset, # evaluation dataset
)
trainer.train()
trainer.evaluate()
| false | 2 | 2,269 | 29 | 2,453 | 2,269 |
||
129775145
|
<jupyter_start><jupyter_text>Classification in asteroseismology
# **Classification in asteroseismology**
## **RGB-HeB classification**
### **Columns description:**
- Pop: [0/1] Population (1)
Population as follows:
0 = RGB
1 = HeB
RGB (Red Giant Branch)
HeB (Helium Burning)
- Dnu F8.5 (uHz) Mean large frequency separation of modes with the same degree and consecutive order, {DELTA}nu
- numax F9.5 (uHz) Frequency of maximum oscillation power
- epsilon F7.3 Location of the l=0 mode (2)
### **Acknowledgements:**
Dataset adapted from vizieR "test.dat".
VizieR: https://cdsarc.cds.unistra.fr/viz-bin/cat/J/MNRAS/469/4578#/browse
Kaggle dataset identifier: classification-in-asteroseismology
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/classification-in-asteroseismology/classification_in_asteroseismology.csv"
)
df
# # Analyze datase
top_10_records = df.nlargest(10, "Dnu")
top_10_records
# # Verbalize your insights in Markdown cells
import matplotlib.pyplot as plt
import seaborn as sns
# Overview of the dataset
dataset_overview = """
## Verbalizing Insights
### Dataset Overview
The "Classification in Asteroseismology" dataset contains information about asteroseismic classifications, including attributes such as POP, Dnu, numax, and epsilon. The dataset comprises {} rows and {} columns.
- The columns in the dataset include 'POP', 'Dnu', 'numax', and 'epsilon'.
- There are {} missing values in certain columns that need to be addressed before further analysis.
""".format(
df.shape[0], df.shape[1], df.isnull().sum().sum()
)
# Verbalize your insights
insights = """
## Insights
- Insight 1: [Describe an insight or finding]
- Insight 2: [Describe an insight or finding]
- Insight 3: [Describe an insight or finding]
"""
# Outputting the Markdown cells
print(dataset_overview)
print(insights)
# # Visualize your insights in several ways
plt.figure(figsize=(10, 6))
sns.scatterplot(data=df, x="Dnu", y="numax")
plt.title("Scatter plot of Dnu vs. numax")
plt.xlabel("Dnu")
plt.ylabel("numax")
plt.show()
plt.figure(figsize=(8, 6))
sns.histplot(data=df, x="epsilon", kde=True)
plt.title("Distribution of epsilon")
plt.xlabel("epsilon")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/775/129775145.ipynb
|
classification-in-asteroseismology
|
fernandolima23
|
[{"Id": 129775145, "ScriptId": 38594228, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14982343, "CreationDate": "05/16/2023 11:35:58", "VersionNumber": 1.0, "Title": "Data Visualization2", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 72.0, "LinesInsertedFromPrevious": 72.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186137562, "KernelVersionId": 129775145, "SourceDatasetVersionId": 3200346}]
|
[{"Id": 3200346, "DatasetId": 1942432, "DatasourceVersionId": 3250088, "CreatorUserId": 8067781, "LicenseName": "CC0: Public Domain", "CreationDate": "02/18/2022 11:44:34", "VersionNumber": 1.0, "Title": "Classification in asteroseismology", "Slug": "classification-in-asteroseismology", "Subtitle": "RGB-HeB classification", "Description": "# **Classification in asteroseismology**\n## **RGB-HeB classification**\n\n### **Columns description:**\n\n- Pop: [0/1] Population (1)\n Population as follows:\n 0 = RGB\n 1 = HeB\n RGB (Red Giant Branch)\n HeB (Helium Burning)\n\n- Dnu F8.5 (uHz) Mean large frequency separation of modes with the same degree and consecutive order, {DELTA}nu\n- numax F9.5 (uHz) Frequency of maximum oscillation power\n- epsilon F7.3 Location of the l=0 mode (2)\n\n### **Acknowledgements:**\n\nDataset adapted from vizieR \"test.dat\".\nVizieR: https://cdsarc.cds.unistra.fr/viz-bin/cat/J/MNRAS/469/4578#/browse", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1942432, "CreatorUserId": 8067781, "OwnerUserId": 8067781.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3200346.0, "CurrentDatasourceVersionId": 3250088.0, "ForumId": 1966251, "Type": 2, "CreationDate": "02/18/2022 11:44:34", "LastActivityDate": "02/18/2022", "TotalViews": 9653, "TotalDownloads": 726, "TotalVotes": 36, "TotalKernels": 14}]
|
[{"Id": 8067781, "UserName": "fernandolima23", "DisplayName": "Fernando Jos\u00e9 Silva Lima Filho", "RegisterDate": "08/05/2021", "PerformanceTier": 1}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
df = pd.read_csv(
"/kaggle/input/classification-in-asteroseismology/classification_in_asteroseismology.csv"
)
df
# # Analyze datase
top_10_records = df.nlargest(10, "Dnu")
top_10_records
# # Verbalize your insights in Markdown cells
import matplotlib.pyplot as plt
import seaborn as sns
# Overview of the dataset
dataset_overview = """
## Verbalizing Insights
### Dataset Overview
The "Classification in Asteroseismology" dataset contains information about asteroseismic classifications, including attributes such as POP, Dnu, numax, and epsilon. The dataset comprises {} rows and {} columns.
- The columns in the dataset include 'POP', 'Dnu', 'numax', and 'epsilon'.
- There are {} missing values in certain columns that need to be addressed before further analysis.
""".format(
df.shape[0], df.shape[1], df.isnull().sum().sum()
)
# Verbalize your insights
insights = """
## Insights
- Insight 1: [Describe an insight or finding]
- Insight 2: [Describe an insight or finding]
- Insight 3: [Describe an insight or finding]
"""
# Outputting the Markdown cells
print(dataset_overview)
print(insights)
# # Visualize your insights in several ways
plt.figure(figsize=(10, 6))
sns.scatterplot(data=df, x="Dnu", y="numax")
plt.title("Scatter plot of Dnu vs. numax")
plt.xlabel("Dnu")
plt.ylabel("numax")
plt.show()
plt.figure(figsize=(8, 6))
sns.histplot(data=df, x="epsilon", kde=True)
plt.title("Distribution of epsilon")
plt.xlabel("epsilon")
plt.show()
| false | 1 | 628 | 0 | 864 | 628 |
||
129678510
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
def get_data(path="/kaggle/input/playground-series-s3e14/train.csv"):
df = pd.read_csv(path)
X = df.drop(["id", "yield"], axis=1)
y = df[["yield"]]
return (X, y)
def plot_correlation(X):
sns.heatmap(X.corr().abs())
plt.show()
def evaluate(model, X, y, metric="mae"):
metric_func_map = {"mae": mean_absolute_error}
metric_ln_map = {"mae": "Mean Absolute Error"}
metric_ln = metric_ln_map[metric]
metric_func = metric_func_map[metric]
# make predictions on test set
y_pred = model.predict(X)
# calculate evaluation metrics
metric_num = metric_func(y, y_pred)
print(f"{metric_ln}: {metric_num}")
return metric_num
def preprocess(X):
new_x = X.drop(
[
"MaxOfUpperTRange",
"MinOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
],
axis=1,
)
new_x["AverageOfUpperTRange"] -= 69
new_x["AverageOfUpperTRange"] /= 7.641
# Some crappy manual normalization
new_x["AverageRainingDays"] -= 0.32
new_x["AverageRainingDays"] /= 0.16
new_x["fruitset"] -= 0.506
new_x["fruitset"] /= 0.074
new_x["fruitmass"] -= 0.446
new_x["fruitmass"] /= 0.037035
new_x["seeds"] -= 36
new_x["seeds"] /= 4.03
new_x["osmia"] -= 0.592
new_x["osmia"] /= 0.1394
#
new_x["andrena"] -= 0.492
new_x["andrena"] /= 0.148115
new_x["honeybee"] -= 0.389314
new_x["honeybee"] /= 0.3616
new_x["bumbles"] -= 0.286768
new_x["bumbles"] /= 0.059917
new_x["clonesize"] -= 19.7
new_x["clonesize"] /= 6.59
return new_x
X, y = get_data()
X_p = preprocess(X)
X_p.describe()
X_p.head()
plot_correlation(X_p)
X_train, X_test, y_train, y_test = train_test_split(
X_p, y, test_size=0.15, random_state=101
)
model = GradientBoostingRegressor(
random_state=49,
n_estimators=550,
loss="absolute_error",
min_samples_leaf=8,
max_depth=5,
)
model.fit(X_train, y_train)
evaluate(model, X_test, y_test)
# ### Do some iteration and go back to prev step if needed
param_grid = {"n_estimators": [550], "max_depth": [1, 3, 5, 7]}
grid = GridSearchCV(
GradientBoostingRegressor(random_state=49, loss="absolute_error"),
param_grid,
refit=True,
verbose=3,
)
# fitting the model for grid search
grid.fit(X_train, y_train)
# TODO: Screw around a bit more and add more params to the grid
# Print the best params based on GridSearch
print(f"Best params: {grid.best_params_}")
# ### Train the final model with ALL data
model.fit(X_p, y)
# ### Run the Final model on our test data
infer_path = "/kaggle/input/playground-series-s3e14/test.csv"
def generate_submission(model, test_data_path, output_path="my_submission.csv"):
infer_data = pd.read_csv(test_data_path)
infer_data = preprocess(infer_data)
# Grab the IDs
infer_ids = infer_data["id"]
infer_x = infer_data.drop(["id"], axis=1)
predictions = model.predict(infer_x).reshape(-1)
infer_y = pd.DataFrame({"id": infer_ids, "yield": predictions})
# Save our predictions
infer_y.to_csv(output_path, index=False)
return infer_y
inf_y = generate_submission(model, infer_path, output_path="my_sub_550_new.csv")
inf_y.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/678/129678510.ipynb
| null | null |
[{"Id": 129678510, "ScriptId": 38561548, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 994586, "CreationDate": "05/15/2023 17:17:43", "VersionNumber": 1.0, "Title": "notebookec650ec966", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 162.0, "LinesInsertedFromPrevious": 162.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.svm import SVR
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.ensemble import AdaBoostRegressor, GradientBoostingRegressor
import seaborn as sns
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
def get_data(path="/kaggle/input/playground-series-s3e14/train.csv"):
df = pd.read_csv(path)
X = df.drop(["id", "yield"], axis=1)
y = df[["yield"]]
return (X, y)
def plot_correlation(X):
sns.heatmap(X.corr().abs())
plt.show()
def evaluate(model, X, y, metric="mae"):
metric_func_map = {"mae": mean_absolute_error}
metric_ln_map = {"mae": "Mean Absolute Error"}
metric_ln = metric_ln_map[metric]
metric_func = metric_func_map[metric]
# make predictions on test set
y_pred = model.predict(X)
# calculate evaluation metrics
metric_num = metric_func(y, y_pred)
print(f"{metric_ln}: {metric_num}")
return metric_num
def preprocess(X):
new_x = X.drop(
[
"MaxOfUpperTRange",
"MinOfUpperTRange",
"MaxOfLowerTRange",
"MinOfLowerTRange",
"AverageOfLowerTRange",
"RainingDays",
],
axis=1,
)
new_x["AverageOfUpperTRange"] -= 69
new_x["AverageOfUpperTRange"] /= 7.641
# Some crappy manual normalization
new_x["AverageRainingDays"] -= 0.32
new_x["AverageRainingDays"] /= 0.16
new_x["fruitset"] -= 0.506
new_x["fruitset"] /= 0.074
new_x["fruitmass"] -= 0.446
new_x["fruitmass"] /= 0.037035
new_x["seeds"] -= 36
new_x["seeds"] /= 4.03
new_x["osmia"] -= 0.592
new_x["osmia"] /= 0.1394
#
new_x["andrena"] -= 0.492
new_x["andrena"] /= 0.148115
new_x["honeybee"] -= 0.389314
new_x["honeybee"] /= 0.3616
new_x["bumbles"] -= 0.286768
new_x["bumbles"] /= 0.059917
new_x["clonesize"] -= 19.7
new_x["clonesize"] /= 6.59
return new_x
X, y = get_data()
X_p = preprocess(X)
X_p.describe()
X_p.head()
plot_correlation(X_p)
X_train, X_test, y_train, y_test = train_test_split(
X_p, y, test_size=0.15, random_state=101
)
model = GradientBoostingRegressor(
random_state=49,
n_estimators=550,
loss="absolute_error",
min_samples_leaf=8,
max_depth=5,
)
model.fit(X_train, y_train)
evaluate(model, X_test, y_test)
# ### Do some iteration and go back to prev step if needed
param_grid = {"n_estimators": [550], "max_depth": [1, 3, 5, 7]}
grid = GridSearchCV(
GradientBoostingRegressor(random_state=49, loss="absolute_error"),
param_grid,
refit=True,
verbose=3,
)
# fitting the model for grid search
grid.fit(X_train, y_train)
# TODO: Screw around a bit more and add more params to the grid
# Print the best params based on GridSearch
print(f"Best params: {grid.best_params_}")
# ### Train the final model with ALL data
model.fit(X_p, y)
# ### Run the Final model on our test data
infer_path = "/kaggle/input/playground-series-s3e14/test.csv"
def generate_submission(model, test_data_path, output_path="my_submission.csv"):
infer_data = pd.read_csv(test_data_path)
infer_data = preprocess(infer_data)
# Grab the IDs
infer_ids = infer_data["id"]
infer_x = infer_data.drop(["id"], axis=1)
predictions = model.predict(infer_x).reshape(-1)
infer_y = pd.DataFrame({"id": infer_ids, "yield": predictions})
# Save our predictions
infer_y.to_csv(output_path, index=False)
return infer_y
inf_y = generate_submission(model, infer_path, output_path="my_sub_550_new.csv")
inf_y.head()
| false | 0 | 1,488 | 0 | 1,488 | 1,488 |
||
129678685
|
# # Import Libraries
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from scipy import stats
from statistics import mean
import scipy.stats as st
import statsmodels.api as sm
import matplotlib.mlab as mlab
import csv
with open("heart_disease.csv", "r") as csvfile:
reader = csv.reader(csvfile)
# Use next() to skip header row if present
headers = next(reader, None)
# Create a dictionary to store unique values for each column
unique_values = {header: set() for header in headers}
# Iterate through each row and add each value to the set for its respective column
for row in reader:
for i, value in enumerate(row):
unique_values[headers[i]].add(value)
# Count the number of unique values for each column and print the results
for header in headers:
num_unique_values = len(unique_values[header])
print(
f"Column '{header}' has {num_unique_values} unique values: {unique_values[header]}"
)
# # Read the Datasheet
df = pd.read_csv("heart_disease.csv")
df
df.rename(columns={"male": "gender"}, inplace=True)
# # Counting number of Row and Column
num_rows = df.shape[0]
num_cols = df.shape[1]
print(f"The data sheet has {num_rows} rows and {num_cols} columns.")
# ## The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients’ information. It includes over 4,238 records and 16 attributes.
# # Variables :
# Each attribute is a potential risk factor. There are both demographic, behavioural and medical risk factors.
# 1. Demographic: sex: male;(Nominal)
# 2. age: age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous)
# Behavioural
# 3. currentSmoker: whether or not the patient is a current smoker (Nominal)
# 4. cigsPerDay: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarretts, even half a cigarette.)
# 5. education
# Medical( history):
# 5. BPMeds: whether or not the patient was on blood pressure medication (Nominal)
# 6. prevalentStroke: whether or not the patient had previously had a stroke (Nominal)
# 7. prevalentHyp: whether or not the patient was hypertensive (Nominal)
# 8. diabetes: whether or not the patient had diabetes (Nominal)
# Medical(current):
# 9. totChol: total cholesterol level (Continuous)
# 10. sysBP: systolic blood pressure (Continuous)
# 11. diaBP: diastolic blood pressure (Continuous)
# 12. BMI: Body Mass Index (Continuous)
# 13. heartRate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.)
# 14. glucose: glucose level (Continuous)
# 15. Predict variable (desired target):
# 16. year risk of coronary heart disease CHD **(binary: “1”, means “Yes”, “0” means “No”)**
# Categorical value:sex,currentsmoker,education,Bpmeds,prevalentstroke,prevalenthyp,biabetes
# continuous:glucose,heart rate,bmi,diabp,sysbp,totchol,age
# ## Show information
column_names = df.columns.tolist()
# Print the column names
for column in column_names:
print(column)
df.info()
df.dtypes
# # Visualization Types of Data
df.dtypes.value_counts().plot.pie(explode=[0.1, 0.1], autopct="%1.1f%%", shadow=True)
plt.title("Type of our data")
# # Year risk of coronary heart disease CHD
# This is our Target veriable. We predict Ten year heart Failure from others predicted Variable
# (binary: “1”, means “Yes”,----“0” means “No”)
#
column_name = ["TenYearCHD"]
zero_counts = (df == 0).sum()
one_counts = (df == 1).sum()
# Print the zero and one counts for each column
for column_names in column_name:
zero_count = zero_counts[column_names]
one_count = one_counts[column_names]
print(f"Column '{column_names}' has {zero_count} zeros and {one_count} ones.")
value_counts = pd.value_counts(df["TenYearCHD"])
value_counts.values # converting into numpy array cause other wise we can't plot pie
label = ["No", "Yes"]
colors = ["green", "skyblue"]
fig1, axarr = plt.subplots()
plt.pie(
value_counts.values,
autopct="%0.01f",
explode=[0.1, 0.1],
shadow=True,
labels=label,
colors=colors,
)
axarr.set_title("Ten year Death %")
plt.show()
# # Checking Duplicate row in our Data sheet
duplicated_rows = df[df.duplicated()]
print(duplicated_rows)
# # Find out missing value
missing_values_count = df.isnull().sum()
print(missing_values_count)
total_missing = missing_values_count.sum()
print(f"The data sheet has a total of {total_missing} missing values.")
# # This Column have null values
# 1. Education
# 2. cigsperday
# 3. BPMeds
# 4. BMI
# 5. heartrate
# 6. glucose
# 7. totchol
count = 0
for i in df.isnull().sum(axis=1):
if i > 0:
count = count + 1
print("Total number of rows with missing values is ", count)
print(
"It is only",
round((count / len(df.index)) * 100),
"percent of the entire dataset the rows with missing values.",
)
# # Visualization Of Missing Value
plt.figure(figsize=(10, 6))
sns.displot(
data=df.isna().melt(value_name="missing"),
y="variable",
hue="missing",
multiple="fill",
aspect=1.50,
)
plt.savefig("visualizing.png", dpi=100)
# # Replace Null Value inplace NaN
df.replace("NA", np.nan, inplace=True)
df
# # Checking Null Value with True and False
#
df.isnull()
# # Replace Null values with Mean
from statistics import mean
# # Replace Null Value with mean for continuous value
avg_totChol = df["totChol"].mean()
df["totChol"].replace(np.nan, avg_totChol, inplace=True)
avg_BMI = df["BMI"].mean()
df["BMI"].replace(np.nan, avg_BMI, inplace=True)
avg_heartRate = df["heartRate"].mean()
df["heartRate"].replace(np.nan, avg_heartRate, inplace=True)
avg_glucose = df["glucose"].mean()
df["glucose"].replace(np.nan, avg_glucose, inplace=True)
# # Replace null Value with mode for Categorical values
#
mode_value = df["education"].mode().values[0]
df["education"].fillna(mode_value, inplace=True)
mode_value = df["BPMeds"].mode().values[0]
df["BPMeds"].fillna(mode_value, inplace=True)
mode_value = df["cigsPerDay"].mode().values[0]
df["cigsPerDay"].fillna(mode_value, inplace=True)
# # Count Missing Value after filling Null value
missing_values_count = df.isnull().sum()
print(missing_values_count)
total_missing = missing_values_count.sum()
print(f"The data sheet has a total of {total_missing} missing values.")
# # Find Outliers
def detect_outliers(df):
outliers = {}
for column in df.columns:
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (1.5 * iqr)
upper_bound = q3 + (1.5 * iqr)
outlier_indices = df[
(df[column] < lower_bound) | (df[column] > upper_bound)
].index
num_outliers = len(outlier_indices)
if num_outliers > 0:
outliers[column] = num_outliers
return outliers
# data = pd.read_csv('heart_disease.csv')
outliers = detect_outliers(df)
if outliers:
print("Columns with outliers:")
for column, count in outliers.items():
print(f"{column}: {count} outliers")
else:
print("No outliers found.")
# # BpMeds, PrevalentStroke, Diabetes are categorical value and have two unique value 0 and 1.So we do not want to remove its outlier that show.
# # Visualize Outlier with Boxplot
fig, ax = plt.subplots()
df.boxplot(ax=ax)
ax.set_xlabel("Columns")
ax.set_ylabel("Values")
ax.set_title("Box Plot for Each Column")
plt.xticks(rotation="vertical")
plt.show()
def draw_boxplot(df, glucose):
plt.figure(figsize=(10, 4))
plt.boxplot(df[glucose], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {glucose}")
plt.xlabel("Values")
plt.ylabel(glucose)
plt.show()
draw_boxplot(df, "glucose")
def draw_boxplot(df, sysBP):
plt.figure(figsize=(8, 4))
plt.boxplot(df[sysBP], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {sysBP}")
plt.xlabel("Values")
plt.ylabel(sysBP)
plt.show()
draw_boxplot(df, "sysBP")
def draw_boxplot(df, heartRate):
plt.figure(figsize=(8, 4))
plt.boxplot(df[heartRate], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {heartRate}")
plt.xlabel("Values")
plt.ylabel(heartRate)
plt.show()
draw_boxplot(df, "heartRate")
def draw_boxplot(df, totChol):
plt.figure(figsize=(8, 4))
plt.boxplot(df[totChol], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {totChol}")
plt.xlabel("Values")
plt.ylabel(totChol)
plt.show()
draw_boxplot(df, "totChol")
# # Remove the outlier
column_names = [
"cigsPerDay",
"totChol",
"sysBP",
"diaBP",
"BMI",
"heartRate",
"glucose",
]
for column_name in column_names:
Q1 = df[column_name].quantile(0.25)
Q3 = df[column_name].quantile(0.75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
# df= df[(df[column_name] >= lower_bound) & (df[column_name] <= upper_bound)]
df = df[df[column_name].between(lower_bound, upper_bound)]
# # Statistical Report
df.iloc[:, :-1].describe().T.sort_values(
by="std", ascending=False
).style.background_gradient(cmap="Greens").bar(subset=["max"], color="#F8766D").bar(
subset=["mean"], color="#00BFC4"
)
# # Exploratory Data Analysis
# # Histogrm
def draw_histograms(dataframe, features, rows, cols):
fig = plt.figure(figsize=(20, 20))
for i, feature in enumerate(features):
ax = fig.add_subplot(rows, cols, i + 1)
dataframe[feature].hist(bins=20, ax=ax, facecolor="midnightblue")
ax.set_title(feature + " Distribution", color="DarkRed")
fig.tight_layout()
plt.show()
draw_histograms(df, df.columns, 6, 3)
data = np.concatenate(df.values)
# Plot histogram
plt.hist(data, bins=10)
plt.title("Histogram of All heart Disease")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
# # We see the older age have high risk for Coronary Heart Disease
sns.pointplot(y="age", x="TenYearCHD", data=df)
# # Who take cigerate per day much they are risk of coronary Heart Disease
sns.pointplot(y="cigsPerDay", x="TenYearCHD", data=df)
# # High Colestorel level are high risk of Coronary heart Disease
sns.pointplot(y="totChol", x="TenYearCHD", data=df)
# # Systolic Blood Pressure high means high risk of coronary heart disease
sns.pointplot(y="sysBP", x="TenYearCHD", data=df)
# # Count Ten Year Coronary heart Disease Value
df.TenYearCHD.value_counts()
sns.countplot(x="TenYearCHD", data=df)
sns.pairplot(data=df)
# # Hypothesis Testing
# # P-value test
# # One Sample P-Value Test
pd = df
# Extract the 'age' column as the population
population = pd["age"]
# Specify the sample size
sample_size = 100
# Randomly select the sample from the population
sample = population.sample(n=sample_size, random_state=42)
# Specify the null hypothesis mean
null_mean = 50
# Perform the one-sample t-test
test_statistic, p_value = stats.ttest_1samp(sample, null_mean)
# Set the significance level
alpha = 0.05
# Plotting
plt.hist(sample, bins=10, edgecolor="black", alpha=0.75)
plt.axvline(x=null_mean, color="red", linestyle="--", label="Null Hypothesis Mean")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.title("Distribution of Age in Sample")
plt.legend()
# Print the result
if p_value < alpha:
plt.text(
0.5,
0.5,
"Reject the null hypothesis.\nThere is a significant difference between the sample mean and the null hypothesis mean.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
else:
plt.text(
0.5,
0.5,
"Fail to reject the null hypothesis.\nThere is no significant difference between the sample mean and the null hypothesis mean.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
# Display the plot
plt.show()
# # Two sample P-value Test
pd1 = df
# Split the data into two groups based on the target variable
group1 = pd1[pd1["TenYearCHD"] == 0]["age"]
group2 = pd1[pd1["TenYearCHD"] == 1]["age"]
# Perform the two-sample t-test
test_statistic, p_value = stats.ttest_ind(group1, group2)
# Set the significance level
alpha = 0.05
# Plotting
plt.boxplot([group1, group2], labels=["No Heart Disease", "Heart Disease"])
plt.ylabel("Age")
plt.title("Comparison of Age between Groups")
# Print the result
if p_value < alpha:
plt.text(
0.5,
0.5,
"Reject the null hypothesis.\nThere is a significant difference between the two groups.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
else:
plt.text(
0.5,
0.5,
"Fail to reject the null hypothesis.\nThere is no significant difference between the two groups.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
# Display the plot
plt.show()
# ## One Sample T- Test Hypothesis testing
# Define the null and alternative hypotheses:
# Null hypothesis (H0): There is no significant difference between the sample mean and the population mean.
# Alternative hypothesis (HA): There is a significant difference between the sample mean and the population mean.
data = df
# Extract the 'battery_power' column as the population
population = data["age"]
# Specify the sample size
sample_size = 100
# Randomly select the sample from the population
sample = population.sample(n=sample_size, random_state=42)
# Specify the null hypothesis mean
null_mean = 50
# Perform the one-sample t-test
test_statistic, p_value = stats.ttest_1samp(sample, null_mean)
# Set the significance level
alpha = 0.05
# Print the result
if p_value < alpha:
print(
"Reject the null hypothesis. There is a significant difference between the sample mean and the null hypothesis mean."
)
else:
print(
"Fail to reject the null hypothesis. There is no significant difference between the sample mean and the null hypothesis mean."
)
# Plot the sample distribution
plt.hist(sample, bins=10, edgecolor="black")
plt.axvline(x=sample.mean(), color="red", linestyle="--", label="Sample Mean")
plt.axvline(x=null_mean, color="green", linestyle="--", label="Null Hypothesis Mean")
plt.xlabel("age")
plt.ylabel("Frequency")
plt.title("Distribution of Sample age")
plt.legend()
plt.show()
data1 = df
# ## Two Sample T-Test Hypothesis testing
# Define the null and alternative hypotheses:
# Null hypothesis (H0): There is no significant difference between the sample mean and the population mean.
# Alternative hypothesis (HA): There is a significant difference between the sample mean and the population mean.
#
# Extract the 'age' column for each population
population_smoker = data1[data1["currentSmoker"] == 1]["age"]
population_non_smoker = data1[data1["currentSmoker"] == 0]["age"]
# Specify the sample size you want to use for each group (e.g., 100)
sample_size = 100
# Randomly select the samples from each population
sample_smoker = population_smoker.sample(n=sample_size, random_state=42)
sample_non_smoker = population_non_smoker.sample(n=sample_size, random_state=42)
# Set the significance level (alpha)
alpha = 0.05
# Perform the two-sample t-test
t_statistic, p_value = stats.ttest_ind(sample_smoker, sample_non_smoker)
# Plot the box plot
plt.boxplot([sample_smoker, sample_non_smoker], labels=["Smoker", "Non-Smoker"])
plt.xlabel("Group")
plt.ylabel("Age")
plt.title("Comparison of Age between Smokers and Non-Smokers")
# Print the results
if p_value < alpha:
plt.text(0.05, 0.9, "Reject H0\n(p < alpha)", transform=plt.gca().transAxes)
else:
plt.text(
0.05, 0.9, "Fail to reject H0\n(p >= alpha)", transform=plt.gca().transAxes
)
plt.show()
# # Association Test
# ## Chi_Squire test
# Significance between two categorical variable
dfc = df
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
# Create the contingency table
contingency_table = pd.crosstab(dfc["currentSmoker"], dfc["prevalentStroke"])
# Perform the chi-square test
chi2_statistic, p_value, dof, expected = stats.chi2_contingency(contingency_table)
# Print the results
print("Chi-square statistic:", chi2_statistic)
print("p-value:", p_value)
print("Degrees of freedom:", dof)
print("Expected frequencies:", expected)
# Plot the contingency table and expected frequencies
fig, ax = plt.subplots(figsize=(8, 6))
contingency_table.plot(kind="bar", stacked=True, ax=ax)
ax.set_ylabel("Frequency")
ax.set_xlabel("Current Smoker")
ax.set_title("Contingency Table")
ax.legend(["0=No Stroke", "1=Stroke"])
plt.xticks(rotation=0)
# Add expected frequencies as text annotations
for i in range(contingency_table.shape[0]):
for j in range(contingency_table.shape[1]):
observed = contingency_table.iloc[i, j]
expected_freq = expected[i, j]
ax.text(i, observed, f"{expected_freq:.1f}", ha="center", va="bottom")
# Interpret the results
alpha = 0.05
if p_value < alpha:
plt.annotate(
"p < 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
plt.annotate(
"Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
else:
plt.annotate(
"p >= 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.annotate(
"No Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.tight_layout()
plt.show()
contingency_table = pd.crosstab(dfc["currentSmoker"], dfc["BPMeds"])
# Perform the chi-square test
chi2_statistic, p_value, dof, expected = stats.chi2_contingency(contingency_table)
# Print the results
print("Chi-square statistic:", chi2_statistic)
print("p-value:", p_value)
print("Degrees of freedom:", dof)
print("Expected frequencies:", expected)
# Plot the contingency table and expected frequencies
fig, ax = plt.subplots(figsize=(8, 6))
contingency_table.plot(kind="bar", stacked=True, ax=ax)
ax.set_ylabel("Frequency")
ax.set_xlabel("Current Smoker")
ax.set_title("Contingency Table")
ax.legend(["0=Non Smoker", "1=Smoker"])
plt.xticks(rotation=0)
# Add expected frequencies as text annotations
for i in range(contingency_table.shape[0]):
for j in range(contingency_table.shape[1]):
observed = contingency_table.iloc[i, j]
expected_freq = expected[i, j]
ax.text(i, observed, f"{expected_freq:.1f}", ha="center", va="bottom")
# Interpret the results
alpha = 0.05
if p_value < alpha:
plt.annotate(
"p < 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
plt.annotate(
"Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
else:
plt.annotate(
"p >= 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.annotate(
"No Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.tight_layout()
plt.show()
# # Using ExtraClassifier
plt.rcParams["figure.figsize"] = 20, 8
sns.set_style("darkgrid")
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(x, y)
print(model.feature_importances_)
feat_importances = pd.Series(model.feature_importances_, index=x.columns)
feat_importances.nlargest(12).plot(kind="barh")
plt.show()
# # features importance using Chi Score
from sklearn.feature_selection import chi2
a = df.drop(columns=["TenYearCHD"], axis=1)
b = df["TenYearCHD"]
chi_scores = chi2(a, b)
chi_scores
# higher the chi value, higher the importance
chi_values = pd.Series(chi_scores[0], index=a.columns)
chi_values.sort_values(ascending=False, inplace=True)
chi_values.plot.bar()
# # P Value Importance features
# if p-value > 0.05, lower the importance
p_values = pd.Series(chi_scores[1], index=a.columns)
p_values.sort_values(ascending=False, inplace=True)
p_values.plot.bar()
# # Correlation among the variable
corr = df.corr()
corr
# # Correlation Heatmap Visualization
print("\n")
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap=plt.cm.PuBu)
plt.show()
print("\n")
# # Logistic Regresssion
# ## Logistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success.
from statsmodels.tools import add_constant as add_constant
heart_df_constant = add_constant(df)
heart_df_constant.head()
st.chisqprob = lambda chisq, df: st.chi2.sf(chisq, df)
cols = heart_df_constant.columns[:-1]
model = sm.Logit(heart_df_constant.TenYearCHD, heart_df_constant[cols])
result = model.fit()
result.summary()
# The results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elemination approach is used here to remove those attributes with highest Pvalue one at a time follwed by running the regression repeatedly until all attributes have P Values less than 0.05.
# # Feature Selection: Backward elemination (P-value approach)
def back_feature_elem(data_frame, dep_var, col_list):
"""Takes in the dataframe, the dependent variable and a list of column names, runs the regression repeatedly eleminating feature with the highest
P-value above alpha one at a time and returns the regression summary with all p-values below alpha
"""
while len(col_list) > 0:
model = sm.Logit(dep_var, data_frame[col_list])
result = model.fit(disp=0)
largest_pvalue = round(result.pvalues, 3).nlargest(1)
if largest_pvalue[0] < (0.05):
return result
break
else:
col_list = col_list.drop(largest_pvalue.index)
result = back_feature_elem(heart_df_constant, df.TenYearCHD, cols)
result.summary()
# Logistic regression equation
# P=eβ0+β1X1/1+eβ0+β1X1
#
# When all features plugged in:
# logit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucose
# # Interpreting the results: Odds Ratio, Confidence Intervals and Pvalues
params = np.exp(result.params)
conf = np.exp(result.conf_int())
conf["OR"] = params
pvalue = round(result.pvalues, 3)
conf["pvalue"] = pvalue
conf.columns = ["CI 95%(2.5%)", "CI 95%(97.5%)", "Odds Ratio", "pvalue"]
print((conf))
# # Devided into predicted and Target Variable
#
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X
y
# # Training and Testing Data sheet
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=40
)
X_train
X_test
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_train_scaled
X_test_scaled = scaler.transform(X_test)
X_test_scaled
# # Logistic Regression
# # Model Accuracy
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
logreg = LogisticRegression(C=9)
logreg.fit(X_train_scaled, y_train)
y_predict1 = logreg.predict(X_test_scaled)
# # Confusion Matrix
logreg_cm = confusion_matrix(y_test, y_predict1)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
logreg_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
fmt="g",
ax=ax,
cmap="YlGnBu",
)
plt.title("Logistic Regression Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 627+3 = 670 correct predictions and 95+1= 96 incorrect ones.
# * True Positives: 3
# * True Negatives: 627
# * False Positives: 1 (Type I error)
# * False Negatives: 95 ( Type II error)
print("\n")
score_logreg = logreg.score(X_test_scaled, y_test)
print("Logistic Regression Score = ", score_logreg * 100, "%")
print("\n")
# # Classification Report
#
print("\nClassification Report for Logistic Regression\n")
print(classification_report(y_test, y_predict1))
print("\n")
# # Gaussian Naive Bayes
nbcla = GaussianNB()
nbcla.fit(X_train_scaled, y_train)
y_predict2 = nbcla.predict(X_test_scaled)
# # Confusion Matrix
nbcla_cm = confusion_matrix(y_test, y_predict2)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
nbcla_cm, annot=True, linewidth=0.7, linecolor="cyan", fmt="g", ax=ax, cmap="YlGnBu"
)
plt.title("Gaussian Naive Bayes Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 585+13 = 598 correct predictions and 85+43= 128 incorrect ones.
# * True Positives: 13
# * True Negatives: 585
# * False Positives: 43 (Type I error)
# * False Negatives: 85 ( Type II error)
# # Test Score
print("\n")
score_nbcla = nbcla.score(X_test_scaled, y_test)
print("Gaussian Naive Bayes Score = ", score_nbcla * 100, "%")
print("\n")
# # Classification Report
print("\n")
score_nbcla = nbcla.score(X_test_scaled, y_test)
print("Gaussian Naive Bayes Score = ", score_nbcla * 100, "%")
print("\n")
# # Decision Tree
dtcla = DecisionTreeClassifier(random_state=9)
dtcla.fit(X_train_scaled, y_train)
y_predict3 = dtcla.predict(X_test_scaled)
# # Confusion Matrix
dtcla_cm = confusion_matrix(y_test, y_predict3)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
dtcla_cm, annot=True, linewidth=0.7, linecolor="cyan", fmt="g", ax=ax, cmap="YlGnBu"
)
plt.title("Decision Tree Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 559+18 = 557 correct predictions and 80+69= 149 incorrect ones.
# * True Positives: 69
# * True Negatives: 559
# * False Positives: 69 (Type I error)
# * False Negatives: 80 ( Type II error)
# # Test Score
print("\n")
score_dtcla = dtcla.score(X_test_scaled, y_test)
print("Decision Tree Score = ", score_dtcla * 100, "%")
print("\n")
# # Classification Report
print("\n\t\t\tClassification Tree for Decision Tree\n")
print(classification_report(y_test, y_predict3))
print("\n")
# # All Test Score
print("\n")
Testscores = pd.Series(
[score_logreg * 100, score_nbcla * 100, score_dtcla * 100],
index=[
"Logistic Regression Score = ",
"Naive Bayes Score = ",
"Decision Tree Score = ",
],
)
print(Testscores)
print("\n")
# # All Confusion Matrix
print("\n")
fig = plt.figure(figsize=(20, 15))
ax1 = fig.add_subplot(3, 3, 1)
ax1.set_title("Logistic Regression Classification\n")
ax2 = fig.add_subplot(3, 3, 2)
ax2.set_title("Naive Bayes Classification\n")
ax3 = fig.add_subplot(3, 3, 3)
ax3.set_title("Decision Tree Classification\n")
sns.heatmap(
data=logreg_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax1,
)
sns.heatmap(
data=nbcla_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax2,
)
sns.heatmap(
data=dtcla_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax3,
)
plt.show()
print("\n")
# # Comparison
x = ["Logistic Regression", "G. Naive Bayes", "Decision Tree"]
y = [score_logreg, score_nbcla, score_dtcla]
print("\n")
plt.bar(x, y)
plt.xlabel("\nClassification Algorithms")
plt.ylabel("Scores\n")
plt.title("Classification Algorithms Score Comparison Bar Plot\n")
plt.show()
print("\n")
# # Scatter Plot
print("\n")
colors = np.random.rand(3)
plt.xlabel("\nClassification Algorithms")
plt.ylabel("Scores\n")
plt.title("Classification Algorithms Score Comparison Scatter Plot\n")
plt.scatter(x, y, s=200, c=colors)
plt.show()
print("\n")
# # Compare Scores and Find Out The Best Algorithm
#
al = False
ln = False
ld = False
nd = False
if (
score_logreg == score_nbcla
and score_logreg == score_dtcla
and score_nbcla == score_dtcla
):
al = True
if score_logreg == score_nbcla:
ln = True
if score_logreg == score_dtcla:
ld = True
if score_nbcla == score_dtcla:
nd = True
if al:
print("\nAll Models Perform The Same\n")
elif ln:
print("\nLogistic Regression and Gaussian Naive Bayes Performs Better\n")
elif ld:
print("\nLogistic Regression and Decision Tree Performs Better\n")
elif nd:
print("\nGaussian Naive Bayes and Decision Tree Performs Better\n")
else:
if score_logreg > score_nbcla and score_logreg > score_dtcla:
print("\nLogistic Regression Performs Better\n")
if score_nbcla > score_logreg and score_nbcla > score_dtcla:
print("\nGaussian Naive Bayes Performs Better\n")
if score_dtcla > score_logreg and score_dtcla > score_nbcla:
print("\nDecision Tree Performs Better\n")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/678/129678685.ipynb
| null | null |
[{"Id": 129678685, "ScriptId": 38563315, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15084504, "CreationDate": "05/15/2023 17:19:16", "VersionNumber": 1.0, "Title": "Heart Disease Prediction", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 1029.0, "LinesInsertedFromPrevious": 1029.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # Import Libraries
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(style="darkgrid")
import matplotlib.pyplot as plt
from matplotlib.pyplot import figure
from scipy import stats
from statistics import mean
import scipy.stats as st
import statsmodels.api as sm
import matplotlib.mlab as mlab
import csv
with open("heart_disease.csv", "r") as csvfile:
reader = csv.reader(csvfile)
# Use next() to skip header row if present
headers = next(reader, None)
# Create a dictionary to store unique values for each column
unique_values = {header: set() for header in headers}
# Iterate through each row and add each value to the set for its respective column
for row in reader:
for i, value in enumerate(row):
unique_values[headers[i]].add(value)
# Count the number of unique values for each column and print the results
for header in headers:
num_unique_values = len(unique_values[header])
print(
f"Column '{header}' has {num_unique_values} unique values: {unique_values[header]}"
)
# # Read the Datasheet
df = pd.read_csv("heart_disease.csv")
df
df.rename(columns={"male": "gender"}, inplace=True)
# # Counting number of Row and Column
num_rows = df.shape[0]
num_cols = df.shape[1]
print(f"The data sheet has {num_rows} rows and {num_cols} columns.")
# ## The classification goal is to predict whether the patient has 10-year risk of future coronary heart disease (CHD).The dataset provides the patients’ information. It includes over 4,238 records and 16 attributes.
# # Variables :
# Each attribute is a potential risk factor. There are both demographic, behavioural and medical risk factors.
# 1. Demographic: sex: male;(Nominal)
# 2. age: age of the patient;(Continuous - Although the recorded ages have been truncated to whole numbers, the concept of age is continuous)
# Behavioural
# 3. currentSmoker: whether or not the patient is a current smoker (Nominal)
# 4. cigsPerDay: the number of cigarettes that the person smoked on average in one day.(can be considered continuous as one can have any number of cigarretts, even half a cigarette.)
# 5. education
# Medical( history):
# 5. BPMeds: whether or not the patient was on blood pressure medication (Nominal)
# 6. prevalentStroke: whether or not the patient had previously had a stroke (Nominal)
# 7. prevalentHyp: whether or not the patient was hypertensive (Nominal)
# 8. diabetes: whether or not the patient had diabetes (Nominal)
# Medical(current):
# 9. totChol: total cholesterol level (Continuous)
# 10. sysBP: systolic blood pressure (Continuous)
# 11. diaBP: diastolic blood pressure (Continuous)
# 12. BMI: Body Mass Index (Continuous)
# 13. heartRate: heart rate (Continuous - In medical research, variables such as heart rate though in fact discrete, yet are considered continuous because of large number of possible values.)
# 14. glucose: glucose level (Continuous)
# 15. Predict variable (desired target):
# 16. year risk of coronary heart disease CHD **(binary: “1”, means “Yes”, “0” means “No”)**
# Categorical value:sex,currentsmoker,education,Bpmeds,prevalentstroke,prevalenthyp,biabetes
# continuous:glucose,heart rate,bmi,diabp,sysbp,totchol,age
# ## Show information
column_names = df.columns.tolist()
# Print the column names
for column in column_names:
print(column)
df.info()
df.dtypes
# # Visualization Types of Data
df.dtypes.value_counts().plot.pie(explode=[0.1, 0.1], autopct="%1.1f%%", shadow=True)
plt.title("Type of our data")
# # Year risk of coronary heart disease CHD
# This is our Target veriable. We predict Ten year heart Failure from others predicted Variable
# (binary: “1”, means “Yes”,----“0” means “No”)
#
column_name = ["TenYearCHD"]
zero_counts = (df == 0).sum()
one_counts = (df == 1).sum()
# Print the zero and one counts for each column
for column_names in column_name:
zero_count = zero_counts[column_names]
one_count = one_counts[column_names]
print(f"Column '{column_names}' has {zero_count} zeros and {one_count} ones.")
value_counts = pd.value_counts(df["TenYearCHD"])
value_counts.values # converting into numpy array cause other wise we can't plot pie
label = ["No", "Yes"]
colors = ["green", "skyblue"]
fig1, axarr = plt.subplots()
plt.pie(
value_counts.values,
autopct="%0.01f",
explode=[0.1, 0.1],
shadow=True,
labels=label,
colors=colors,
)
axarr.set_title("Ten year Death %")
plt.show()
# # Checking Duplicate row in our Data sheet
duplicated_rows = df[df.duplicated()]
print(duplicated_rows)
# # Find out missing value
missing_values_count = df.isnull().sum()
print(missing_values_count)
total_missing = missing_values_count.sum()
print(f"The data sheet has a total of {total_missing} missing values.")
# # This Column have null values
# 1. Education
# 2. cigsperday
# 3. BPMeds
# 4. BMI
# 5. heartrate
# 6. glucose
# 7. totchol
count = 0
for i in df.isnull().sum(axis=1):
if i > 0:
count = count + 1
print("Total number of rows with missing values is ", count)
print(
"It is only",
round((count / len(df.index)) * 100),
"percent of the entire dataset the rows with missing values.",
)
# # Visualization Of Missing Value
plt.figure(figsize=(10, 6))
sns.displot(
data=df.isna().melt(value_name="missing"),
y="variable",
hue="missing",
multiple="fill",
aspect=1.50,
)
plt.savefig("visualizing.png", dpi=100)
# # Replace Null Value inplace NaN
df.replace("NA", np.nan, inplace=True)
df
# # Checking Null Value with True and False
#
df.isnull()
# # Replace Null values with Mean
from statistics import mean
# # Replace Null Value with mean for continuous value
avg_totChol = df["totChol"].mean()
df["totChol"].replace(np.nan, avg_totChol, inplace=True)
avg_BMI = df["BMI"].mean()
df["BMI"].replace(np.nan, avg_BMI, inplace=True)
avg_heartRate = df["heartRate"].mean()
df["heartRate"].replace(np.nan, avg_heartRate, inplace=True)
avg_glucose = df["glucose"].mean()
df["glucose"].replace(np.nan, avg_glucose, inplace=True)
# # Replace null Value with mode for Categorical values
#
mode_value = df["education"].mode().values[0]
df["education"].fillna(mode_value, inplace=True)
mode_value = df["BPMeds"].mode().values[0]
df["BPMeds"].fillna(mode_value, inplace=True)
mode_value = df["cigsPerDay"].mode().values[0]
df["cigsPerDay"].fillna(mode_value, inplace=True)
# # Count Missing Value after filling Null value
missing_values_count = df.isnull().sum()
print(missing_values_count)
total_missing = missing_values_count.sum()
print(f"The data sheet has a total of {total_missing} missing values.")
# # Find Outliers
def detect_outliers(df):
outliers = {}
for column in df.columns:
q1 = df[column].quantile(0.25)
q3 = df[column].quantile(0.75)
iqr = q3 - q1
lower_bound = q1 - (1.5 * iqr)
upper_bound = q3 + (1.5 * iqr)
outlier_indices = df[
(df[column] < lower_bound) | (df[column] > upper_bound)
].index
num_outliers = len(outlier_indices)
if num_outliers > 0:
outliers[column] = num_outliers
return outliers
# data = pd.read_csv('heart_disease.csv')
outliers = detect_outliers(df)
if outliers:
print("Columns with outliers:")
for column, count in outliers.items():
print(f"{column}: {count} outliers")
else:
print("No outliers found.")
# # BpMeds, PrevalentStroke, Diabetes are categorical value and have two unique value 0 and 1.So we do not want to remove its outlier that show.
# # Visualize Outlier with Boxplot
fig, ax = plt.subplots()
df.boxplot(ax=ax)
ax.set_xlabel("Columns")
ax.set_ylabel("Values")
ax.set_title("Box Plot for Each Column")
plt.xticks(rotation="vertical")
plt.show()
def draw_boxplot(df, glucose):
plt.figure(figsize=(10, 4))
plt.boxplot(df[glucose], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {glucose}")
plt.xlabel("Values")
plt.ylabel(glucose)
plt.show()
draw_boxplot(df, "glucose")
def draw_boxplot(df, sysBP):
plt.figure(figsize=(8, 4))
plt.boxplot(df[sysBP], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {sysBP}")
plt.xlabel("Values")
plt.ylabel(sysBP)
plt.show()
draw_boxplot(df, "sysBP")
def draw_boxplot(df, heartRate):
plt.figure(figsize=(8, 4))
plt.boxplot(df[heartRate], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {heartRate}")
plt.xlabel("Values")
plt.ylabel(heartRate)
plt.show()
draw_boxplot(df, "heartRate")
def draw_boxplot(df, totChol):
plt.figure(figsize=(8, 4))
plt.boxplot(df[totChol], vert=False, notch=True, patch_artist=True)
plt.title(f"Boxplot of {totChol}")
plt.xlabel("Values")
plt.ylabel(totChol)
plt.show()
draw_boxplot(df, "totChol")
# # Remove the outlier
column_names = [
"cigsPerDay",
"totChol",
"sysBP",
"diaBP",
"BMI",
"heartRate",
"glucose",
]
for column_name in column_names:
Q1 = df[column_name].quantile(0.25)
Q3 = df[column_name].quantile(0.75)
IQR = Q3 - Q1
lower_bound = Q1 - 1.5 * IQR
upper_bound = Q3 + 1.5 * IQR
# df= df[(df[column_name] >= lower_bound) & (df[column_name] <= upper_bound)]
df = df[df[column_name].between(lower_bound, upper_bound)]
# # Statistical Report
df.iloc[:, :-1].describe().T.sort_values(
by="std", ascending=False
).style.background_gradient(cmap="Greens").bar(subset=["max"], color="#F8766D").bar(
subset=["mean"], color="#00BFC4"
)
# # Exploratory Data Analysis
# # Histogrm
def draw_histograms(dataframe, features, rows, cols):
fig = plt.figure(figsize=(20, 20))
for i, feature in enumerate(features):
ax = fig.add_subplot(rows, cols, i + 1)
dataframe[feature].hist(bins=20, ax=ax, facecolor="midnightblue")
ax.set_title(feature + " Distribution", color="DarkRed")
fig.tight_layout()
plt.show()
draw_histograms(df, df.columns, 6, 3)
data = np.concatenate(df.values)
# Plot histogram
plt.hist(data, bins=10)
plt.title("Histogram of All heart Disease")
plt.xlabel("Value")
plt.ylabel("Frequency")
plt.show()
# # We see the older age have high risk for Coronary Heart Disease
sns.pointplot(y="age", x="TenYearCHD", data=df)
# # Who take cigerate per day much they are risk of coronary Heart Disease
sns.pointplot(y="cigsPerDay", x="TenYearCHD", data=df)
# # High Colestorel level are high risk of Coronary heart Disease
sns.pointplot(y="totChol", x="TenYearCHD", data=df)
# # Systolic Blood Pressure high means high risk of coronary heart disease
sns.pointplot(y="sysBP", x="TenYearCHD", data=df)
# # Count Ten Year Coronary heart Disease Value
df.TenYearCHD.value_counts()
sns.countplot(x="TenYearCHD", data=df)
sns.pairplot(data=df)
# # Hypothesis Testing
# # P-value test
# # One Sample P-Value Test
pd = df
# Extract the 'age' column as the population
population = pd["age"]
# Specify the sample size
sample_size = 100
# Randomly select the sample from the population
sample = population.sample(n=sample_size, random_state=42)
# Specify the null hypothesis mean
null_mean = 50
# Perform the one-sample t-test
test_statistic, p_value = stats.ttest_1samp(sample, null_mean)
# Set the significance level
alpha = 0.05
# Plotting
plt.hist(sample, bins=10, edgecolor="black", alpha=0.75)
plt.axvline(x=null_mean, color="red", linestyle="--", label="Null Hypothesis Mean")
plt.xlabel("Age")
plt.ylabel("Frequency")
plt.title("Distribution of Age in Sample")
plt.legend()
# Print the result
if p_value < alpha:
plt.text(
0.5,
0.5,
"Reject the null hypothesis.\nThere is a significant difference between the sample mean and the null hypothesis mean.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
else:
plt.text(
0.5,
0.5,
"Fail to reject the null hypothesis.\nThere is no significant difference between the sample mean and the null hypothesis mean.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
# Display the plot
plt.show()
# # Two sample P-value Test
pd1 = df
# Split the data into two groups based on the target variable
group1 = pd1[pd1["TenYearCHD"] == 0]["age"]
group2 = pd1[pd1["TenYearCHD"] == 1]["age"]
# Perform the two-sample t-test
test_statistic, p_value = stats.ttest_ind(group1, group2)
# Set the significance level
alpha = 0.05
# Plotting
plt.boxplot([group1, group2], labels=["No Heart Disease", "Heart Disease"])
plt.ylabel("Age")
plt.title("Comparison of Age between Groups")
# Print the result
if p_value < alpha:
plt.text(
0.5,
0.5,
"Reject the null hypothesis.\nThere is a significant difference between the two groups.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
else:
plt.text(
0.5,
0.5,
"Fail to reject the null hypothesis.\nThere is no significant difference between the two groups.",
ha="center",
va="center",
transform=plt.gca().transAxes,
)
# Display the plot
plt.show()
# ## One Sample T- Test Hypothesis testing
# Define the null and alternative hypotheses:
# Null hypothesis (H0): There is no significant difference between the sample mean and the population mean.
# Alternative hypothesis (HA): There is a significant difference between the sample mean and the population mean.
data = df
# Extract the 'battery_power' column as the population
population = data["age"]
# Specify the sample size
sample_size = 100
# Randomly select the sample from the population
sample = population.sample(n=sample_size, random_state=42)
# Specify the null hypothesis mean
null_mean = 50
# Perform the one-sample t-test
test_statistic, p_value = stats.ttest_1samp(sample, null_mean)
# Set the significance level
alpha = 0.05
# Print the result
if p_value < alpha:
print(
"Reject the null hypothesis. There is a significant difference between the sample mean and the null hypothesis mean."
)
else:
print(
"Fail to reject the null hypothesis. There is no significant difference between the sample mean and the null hypothesis mean."
)
# Plot the sample distribution
plt.hist(sample, bins=10, edgecolor="black")
plt.axvline(x=sample.mean(), color="red", linestyle="--", label="Sample Mean")
plt.axvline(x=null_mean, color="green", linestyle="--", label="Null Hypothesis Mean")
plt.xlabel("age")
plt.ylabel("Frequency")
plt.title("Distribution of Sample age")
plt.legend()
plt.show()
data1 = df
# ## Two Sample T-Test Hypothesis testing
# Define the null and alternative hypotheses:
# Null hypothesis (H0): There is no significant difference between the sample mean and the population mean.
# Alternative hypothesis (HA): There is a significant difference between the sample mean and the population mean.
#
# Extract the 'age' column for each population
population_smoker = data1[data1["currentSmoker"] == 1]["age"]
population_non_smoker = data1[data1["currentSmoker"] == 0]["age"]
# Specify the sample size you want to use for each group (e.g., 100)
sample_size = 100
# Randomly select the samples from each population
sample_smoker = population_smoker.sample(n=sample_size, random_state=42)
sample_non_smoker = population_non_smoker.sample(n=sample_size, random_state=42)
# Set the significance level (alpha)
alpha = 0.05
# Perform the two-sample t-test
t_statistic, p_value = stats.ttest_ind(sample_smoker, sample_non_smoker)
# Plot the box plot
plt.boxplot([sample_smoker, sample_non_smoker], labels=["Smoker", "Non-Smoker"])
plt.xlabel("Group")
plt.ylabel("Age")
plt.title("Comparison of Age between Smokers and Non-Smokers")
# Print the results
if p_value < alpha:
plt.text(0.05, 0.9, "Reject H0\n(p < alpha)", transform=plt.gca().transAxes)
else:
plt.text(
0.05, 0.9, "Fail to reject H0\n(p >= alpha)", transform=plt.gca().transAxes
)
plt.show()
# # Association Test
# ## Chi_Squire test
# Significance between two categorical variable
dfc = df
import pandas as pd
import scipy.stats as stats
import matplotlib.pyplot as plt
# Create the contingency table
contingency_table = pd.crosstab(dfc["currentSmoker"], dfc["prevalentStroke"])
# Perform the chi-square test
chi2_statistic, p_value, dof, expected = stats.chi2_contingency(contingency_table)
# Print the results
print("Chi-square statistic:", chi2_statistic)
print("p-value:", p_value)
print("Degrees of freedom:", dof)
print("Expected frequencies:", expected)
# Plot the contingency table and expected frequencies
fig, ax = plt.subplots(figsize=(8, 6))
contingency_table.plot(kind="bar", stacked=True, ax=ax)
ax.set_ylabel("Frequency")
ax.set_xlabel("Current Smoker")
ax.set_title("Contingency Table")
ax.legend(["0=No Stroke", "1=Stroke"])
plt.xticks(rotation=0)
# Add expected frequencies as text annotations
for i in range(contingency_table.shape[0]):
for j in range(contingency_table.shape[1]):
observed = contingency_table.iloc[i, j]
expected_freq = expected[i, j]
ax.text(i, observed, f"{expected_freq:.1f}", ha="center", va="bottom")
# Interpret the results
alpha = 0.05
if p_value < alpha:
plt.annotate(
"p < 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
plt.annotate(
"Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
else:
plt.annotate(
"p >= 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.annotate(
"No Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.tight_layout()
plt.show()
contingency_table = pd.crosstab(dfc["currentSmoker"], dfc["BPMeds"])
# Perform the chi-square test
chi2_statistic, p_value, dof, expected = stats.chi2_contingency(contingency_table)
# Print the results
print("Chi-square statistic:", chi2_statistic)
print("p-value:", p_value)
print("Degrees of freedom:", dof)
print("Expected frequencies:", expected)
# Plot the contingency table and expected frequencies
fig, ax = plt.subplots(figsize=(8, 6))
contingency_table.plot(kind="bar", stacked=True, ax=ax)
ax.set_ylabel("Frequency")
ax.set_xlabel("Current Smoker")
ax.set_title("Contingency Table")
ax.legend(["0=Non Smoker", "1=Smoker"])
plt.xticks(rotation=0)
# Add expected frequencies as text annotations
for i in range(contingency_table.shape[0]):
for j in range(contingency_table.shape[1]):
observed = contingency_table.iloc[i, j]
expected_freq = expected[i, j]
ax.text(i, observed, f"{expected_freq:.1f}", ha="center", va="bottom")
# Interpret the results
alpha = 0.05
if p_value < alpha:
plt.annotate(
"p < 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
plt.annotate(
"Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="red",
)
else:
plt.annotate(
"p >= 0.05",
xy=(0.5, 0.9),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.annotate(
"No Significant Association",
xy=(0.5, 0.8),
xycoords="axes fraction",
ha="center",
fontsize=12,
color="black",
)
plt.tight_layout()
plt.show()
# # Using ExtraClassifier
plt.rcParams["figure.figsize"] = 20, 8
sns.set_style("darkgrid")
x = df.iloc[:, :-1]
y = df.iloc[:, -1]
from sklearn.ensemble import ExtraTreesClassifier
model = ExtraTreesClassifier()
model.fit(x, y)
print(model.feature_importances_)
feat_importances = pd.Series(model.feature_importances_, index=x.columns)
feat_importances.nlargest(12).plot(kind="barh")
plt.show()
# # features importance using Chi Score
from sklearn.feature_selection import chi2
a = df.drop(columns=["TenYearCHD"], axis=1)
b = df["TenYearCHD"]
chi_scores = chi2(a, b)
chi_scores
# higher the chi value, higher the importance
chi_values = pd.Series(chi_scores[0], index=a.columns)
chi_values.sort_values(ascending=False, inplace=True)
chi_values.plot.bar()
# # P Value Importance features
# if p-value > 0.05, lower the importance
p_values = pd.Series(chi_scores[1], index=a.columns)
p_values.sort_values(ascending=False, inplace=True)
p_values.plot.bar()
# # Correlation among the variable
corr = df.corr()
corr
# # Correlation Heatmap Visualization
print("\n")
plt.figure(figsize=(20, 10))
sns.heatmap(df.corr(), annot=True, cmap=plt.cm.PuBu)
plt.show()
print("\n")
# # Logistic Regresssion
# ## Logistic regression is a type of regression analysis in statistics used for prediction of outcome of a categorical dependent variable from a set of predictor or independent variables. In logistic regression the dependent variable is always binary. Logistic regression is mainly used to for prediction and also calculating the probability of success.
from statsmodels.tools import add_constant as add_constant
heart_df_constant = add_constant(df)
heart_df_constant.head()
st.chisqprob = lambda chisq, df: st.chi2.sf(chisq, df)
cols = heart_df_constant.columns[:-1]
model = sm.Logit(heart_df_constant.TenYearCHD, heart_df_constant[cols])
result = model.fit()
result.summary()
# The results above show some of the attributes with P value higher than the preferred alpha(5%) and thereby showing low statistically significant relationship with the probability of heart disease. Backward elemination approach is used here to remove those attributes with highest Pvalue one at a time follwed by running the regression repeatedly until all attributes have P Values less than 0.05.
# # Feature Selection: Backward elemination (P-value approach)
def back_feature_elem(data_frame, dep_var, col_list):
"""Takes in the dataframe, the dependent variable and a list of column names, runs the regression repeatedly eleminating feature with the highest
P-value above alpha one at a time and returns the regression summary with all p-values below alpha
"""
while len(col_list) > 0:
model = sm.Logit(dep_var, data_frame[col_list])
result = model.fit(disp=0)
largest_pvalue = round(result.pvalues, 3).nlargest(1)
if largest_pvalue[0] < (0.05):
return result
break
else:
col_list = col_list.drop(largest_pvalue.index)
result = back_feature_elem(heart_df_constant, df.TenYearCHD, cols)
result.summary()
# Logistic regression equation
# P=eβ0+β1X1/1+eβ0+β1X1
#
# When all features plugged in:
# logit(p)=log(p/(1−p))=β0+β1∗Sexmale+β2∗age+β3∗cigsPerDay+β4∗totChol+β5∗sysBP+β6∗glucose
# # Interpreting the results: Odds Ratio, Confidence Intervals and Pvalues
params = np.exp(result.params)
conf = np.exp(result.conf_int())
conf["OR"] = params
pvalue = round(result.pvalues, 3)
conf["pvalue"] = pvalue
conf.columns = ["CI 95%(2.5%)", "CI 95%(97.5%)", "Odds Ratio", "pvalue"]
print((conf))
# # Devided into predicted and Target Variable
#
X = df.iloc[:, :-1]
y = df.iloc[:, -1]
X
y
# # Training and Testing Data sheet
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=40
)
X_train
X_test
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_train_scaled
X_test_scaled = scaler.transform(X_test)
X_test_scaled
# # Logistic Regression
# # Model Accuracy
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
from sklearn.preprocessing import StandardScaler
logreg = LogisticRegression(C=9)
logreg.fit(X_train_scaled, y_train)
y_predict1 = logreg.predict(X_test_scaled)
# # Confusion Matrix
logreg_cm = confusion_matrix(y_test, y_predict1)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
logreg_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
fmt="g",
ax=ax,
cmap="YlGnBu",
)
plt.title("Logistic Regression Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 627+3 = 670 correct predictions and 95+1= 96 incorrect ones.
# * True Positives: 3
# * True Negatives: 627
# * False Positives: 1 (Type I error)
# * False Negatives: 95 ( Type II error)
print("\n")
score_logreg = logreg.score(X_test_scaled, y_test)
print("Logistic Regression Score = ", score_logreg * 100, "%")
print("\n")
# # Classification Report
#
print("\nClassification Report for Logistic Regression\n")
print(classification_report(y_test, y_predict1))
print("\n")
# # Gaussian Naive Bayes
nbcla = GaussianNB()
nbcla.fit(X_train_scaled, y_train)
y_predict2 = nbcla.predict(X_test_scaled)
# # Confusion Matrix
nbcla_cm = confusion_matrix(y_test, y_predict2)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
nbcla_cm, annot=True, linewidth=0.7, linecolor="cyan", fmt="g", ax=ax, cmap="YlGnBu"
)
plt.title("Gaussian Naive Bayes Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 585+13 = 598 correct predictions and 85+43= 128 incorrect ones.
# * True Positives: 13
# * True Negatives: 585
# * False Positives: 43 (Type I error)
# * False Negatives: 85 ( Type II error)
# # Test Score
print("\n")
score_nbcla = nbcla.score(X_test_scaled, y_test)
print("Gaussian Naive Bayes Score = ", score_nbcla * 100, "%")
print("\n")
# # Classification Report
print("\n")
score_nbcla = nbcla.score(X_test_scaled, y_test)
print("Gaussian Naive Bayes Score = ", score_nbcla * 100, "%")
print("\n")
# # Decision Tree
dtcla = DecisionTreeClassifier(random_state=9)
dtcla.fit(X_train_scaled, y_train)
y_predict3 = dtcla.predict(X_test_scaled)
# # Confusion Matrix
dtcla_cm = confusion_matrix(y_test, y_predict3)
f, ax = plt.subplots(figsize=(5, 5))
sns.heatmap(
dtcla_cm, annot=True, linewidth=0.7, linecolor="cyan", fmt="g", ax=ax, cmap="YlGnBu"
)
plt.title("Decision Tree Classification Confusion Matrix\n")
plt.xlabel("y predict")
plt.ylabel("y test")
print("\n")
plt.show()
print("\n")
# The confusion matrix shows 559+18 = 557 correct predictions and 80+69= 149 incorrect ones.
# * True Positives: 69
# * True Negatives: 559
# * False Positives: 69 (Type I error)
# * False Negatives: 80 ( Type II error)
# # Test Score
print("\n")
score_dtcla = dtcla.score(X_test_scaled, y_test)
print("Decision Tree Score = ", score_dtcla * 100, "%")
print("\n")
# # Classification Report
print("\n\t\t\tClassification Tree for Decision Tree\n")
print(classification_report(y_test, y_predict3))
print("\n")
# # All Test Score
print("\n")
Testscores = pd.Series(
[score_logreg * 100, score_nbcla * 100, score_dtcla * 100],
index=[
"Logistic Regression Score = ",
"Naive Bayes Score = ",
"Decision Tree Score = ",
],
)
print(Testscores)
print("\n")
# # All Confusion Matrix
print("\n")
fig = plt.figure(figsize=(20, 15))
ax1 = fig.add_subplot(3, 3, 1)
ax1.set_title("Logistic Regression Classification\n")
ax2 = fig.add_subplot(3, 3, 2)
ax2.set_title("Naive Bayes Classification\n")
ax3 = fig.add_subplot(3, 3, 3)
ax3.set_title("Decision Tree Classification\n")
sns.heatmap(
data=logreg_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax1,
)
sns.heatmap(
data=nbcla_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax2,
)
sns.heatmap(
data=dtcla_cm,
annot=True,
linewidth=0.7,
linecolor="cyan",
cmap="YlGnBu",
fmt="g",
ax=ax3,
)
plt.show()
print("\n")
# # Comparison
x = ["Logistic Regression", "G. Naive Bayes", "Decision Tree"]
y = [score_logreg, score_nbcla, score_dtcla]
print("\n")
plt.bar(x, y)
plt.xlabel("\nClassification Algorithms")
plt.ylabel("Scores\n")
plt.title("Classification Algorithms Score Comparison Bar Plot\n")
plt.show()
print("\n")
# # Scatter Plot
print("\n")
colors = np.random.rand(3)
plt.xlabel("\nClassification Algorithms")
plt.ylabel("Scores\n")
plt.title("Classification Algorithms Score Comparison Scatter Plot\n")
plt.scatter(x, y, s=200, c=colors)
plt.show()
print("\n")
# # Compare Scores and Find Out The Best Algorithm
#
al = False
ln = False
ld = False
nd = False
if (
score_logreg == score_nbcla
and score_logreg == score_dtcla
and score_nbcla == score_dtcla
):
al = True
if score_logreg == score_nbcla:
ln = True
if score_logreg == score_dtcla:
ld = True
if score_nbcla == score_dtcla:
nd = True
if al:
print("\nAll Models Perform The Same\n")
elif ln:
print("\nLogistic Regression and Gaussian Naive Bayes Performs Better\n")
elif ld:
print("\nLogistic Regression and Decision Tree Performs Better\n")
elif nd:
print("\nGaussian Naive Bayes and Decision Tree Performs Better\n")
else:
if score_logreg > score_nbcla and score_logreg > score_dtcla:
print("\nLogistic Regression Performs Better\n")
if score_nbcla > score_logreg and score_nbcla > score_dtcla:
print("\nGaussian Naive Bayes Performs Better\n")
if score_dtcla > score_logreg and score_dtcla > score_nbcla:
print("\nDecision Tree Performs Better\n")
| false | 0 | 9,570 | 1 | 9,570 | 9,570 |
||
129678769
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing data
x_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
x_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
# # **TRAINING DATA**
# training the data
y = x_train["yield"]
x_train.drop(["yield"], axis=1, inplace=True)
# feature engineering
x_train["pollinators"] = (
x_train["honeybee"] + x_train["bumbles"] + x_train["andrena"] + x_train["osmia"]
)
x_test["pollinators"] = (
x_test["honeybee"] + x_test["bumbles"] + x_test["andrena"] + x_test["osmia"]
)
x_train.drop(["honeybee"], axis=1, inplace=True)
x_train.drop(["RainingDays"], axis=1, inplace=True)
x_train.drop(["MaxOfUpperTRange"], axis=1, inplace=True)
x_train.drop(["MinOfUpperTRange"], axis=1, inplace=True)
x_train.drop(["MaxOfLowerTRange"], axis=1, inplace=True)
x_train.drop(["MinOfLowerTRange"], axis=1, inplace=True)
x_train.drop(["bumbles"], axis=1, inplace=True)
x_train.drop(["andrena"], axis=1, inplace=True)
x_train.drop(["osmia"], axis=1, inplace=True)
x_test.drop(["honeybee"], axis=1, inplace=True)
x_test.drop(["RainingDays"], axis=1, inplace=True)
x_test.drop(["MaxOfUpperTRange"], axis=1, inplace=True)
x_test.drop(["MinOfUpperTRange"], axis=1, inplace=True)
x_test.drop(["MaxOfLowerTRange"], axis=1, inplace=True)
x_test.drop(["MinOfLowerTRange"], axis=1, inplace=True)
x_test.drop(["bumbles"], axis=1, inplace=True)
x_test.drop(["andrena"], axis=1, inplace=True)
x_test.drop(["osmia"], axis=1, inplace=True)
# no. of data
m = len(x_test)
# # **KNN**
from sklearn.neighbors import KNeighborsRegressor
knn_model = KNeighborsRegressor(n_neighbors=5).fit(x_train, y)
# Making Predictions
y_test_knn = knn_model.predict(x_test)
result_knn = x_test[["id"]].copy()
result_knn["yield"] = y_test_knn.tolist()
result_knn
error_knn = pd.DataFrame()
error_knn["diff"] = abs(y_test_knn - sample["yield"])
# mean absolute error
s_knn = error_knn["diff"].sum()
mae_knn = s_knn / m
mae_knn
result_knn.to_csv("submission_knn.csv", index=False, header=True)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/678/129678769.ipynb
| null | null |
[{"Id": 129678769, "ScriptId": 38563186, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15100167, "CreationDate": "05/15/2023 17:20:06", "VersionNumber": 1.0, "Title": "notebook6028b1b8d1", "EvaluationDate": "05/15/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# importing data
x_train = pd.read_csv("/kaggle/input/playground-series-s3e14/train.csv")
x_test = pd.read_csv("/kaggle/input/playground-series-s3e14/test.csv")
sample = pd.read_csv("/kaggle/input/playground-series-s3e14/sample_submission.csv")
# # **TRAINING DATA**
# training the data
y = x_train["yield"]
x_train.drop(["yield"], axis=1, inplace=True)
# feature engineering
x_train["pollinators"] = (
x_train["honeybee"] + x_train["bumbles"] + x_train["andrena"] + x_train["osmia"]
)
x_test["pollinators"] = (
x_test["honeybee"] + x_test["bumbles"] + x_test["andrena"] + x_test["osmia"]
)
x_train.drop(["honeybee"], axis=1, inplace=True)
x_train.drop(["RainingDays"], axis=1, inplace=True)
x_train.drop(["MaxOfUpperTRange"], axis=1, inplace=True)
x_train.drop(["MinOfUpperTRange"], axis=1, inplace=True)
x_train.drop(["MaxOfLowerTRange"], axis=1, inplace=True)
x_train.drop(["MinOfLowerTRange"], axis=1, inplace=True)
x_train.drop(["bumbles"], axis=1, inplace=True)
x_train.drop(["andrena"], axis=1, inplace=True)
x_train.drop(["osmia"], axis=1, inplace=True)
x_test.drop(["honeybee"], axis=1, inplace=True)
x_test.drop(["RainingDays"], axis=1, inplace=True)
x_test.drop(["MaxOfUpperTRange"], axis=1, inplace=True)
x_test.drop(["MinOfUpperTRange"], axis=1, inplace=True)
x_test.drop(["MaxOfLowerTRange"], axis=1, inplace=True)
x_test.drop(["MinOfLowerTRange"], axis=1, inplace=True)
x_test.drop(["bumbles"], axis=1, inplace=True)
x_test.drop(["andrena"], axis=1, inplace=True)
x_test.drop(["osmia"], axis=1, inplace=True)
# no. of data
m = len(x_test)
# # **KNN**
from sklearn.neighbors import KNeighborsRegressor
knn_model = KNeighborsRegressor(n_neighbors=5).fit(x_train, y)
# Making Predictions
y_test_knn = knn_model.predict(x_test)
result_knn = x_test[["id"]].copy()
result_knn["yield"] = y_test_knn.tolist()
result_knn
error_knn = pd.DataFrame()
error_knn["diff"] = abs(y_test_knn - sample["yield"])
# mean absolute error
s_knn = error_knn["diff"].sum()
mae_knn = s_knn / m
mae_knn
result_knn.to_csv("submission_knn.csv", index=False, header=True)
| false | 0 | 967 | 0 | 967 | 967 |
||
129433282
|
# # Domino tiles in a set
for left in range(7):
for right in range(left, 7):
print(
"[" + str(left) + " | " + str(right) + "]", end=" "
) # end parameter creates a new line if left blank with a space.
print()
# # Is the number a power of a given base? [Recursion]
# Note: base is assumed to be a positive number. Tip: for functions that return a boolean value, you can return the result of a comparison.
def is_power_of(number, base):
# Base case: when number is smaller than base.
if number < base:
# If number is equal to 1, it's a power (base**0).
return number == 1
# Recursive case: keep dividing number by base.
return is_power_of(number / base, base)
print(is_power_of(8, 2)) # Should be True
print(is_power_of(64, 4)) # Should be True
print(is_power_of(70, 10)) # Should be False
# # Sum of all positive numbers [Recursion]
def sum_positive_numbers(n):
if n <= 1:
return n
return n + sum_positive_numbers(n - 1)
print(sum_positive_numbers(3)) # Should be 6
print(sum_positive_numbers(5)) # Should be 15
# # Count the digits of a number
def digits(n):
count = 0
if n == 0:
count += 1
while n != 0:
n = n // 10
count += 1
return count
print(digits(25)) # Should print 2
print(digits(144)) # Should print 3
print(digits(1000)) # Should print 4
print(digits(0)) # Should print 1
# # Sequence of numbers in descending order
def sequence(low, high):
for x in range(high, low, -1):
for y in range(high, 0, -1):
if y == low:
print(str(y))
else:
print(str(y), end=", ")
y -= 1
sequence(1, 3)
# Should print the sequence 3, 2, 1 two times, as shown above.
# # Palindrome
def is_palindrome(input_string):
new_string = ""
reverse_string = ""
for letter in input_string.lower():
if letter != " ":
new_string = new_string + letter
reverse_string = letter + reverse_string
if reverse_string == new_string:
return True
return False
print(is_palindrome("Never Odd or Even")) # Should be True
print(is_palindrome("abc")) # Should be False
print(is_palindrome("kayak")) # Should be True
# # Get a word from a sentence
def get_word(sentence, n):
if n > 0:
words = sentence.split()
if n <= len(words):
return words[n - 1]
return ""
print(get_word("This is a lesson about lists", 4)) # Should print: lesson
print(get_word("This is a lesson about lists", -4)) # Nothing
print(get_word("Now we are cooking!", 1)) # Should print: Now
print(get_word("Now we are cooking!", 5)) # Nothing
# # Enumerate in Python
#
winner = ["Fabi", "Nasif", "Rafa"]
for index, name in enumerate(winner):
print("{} - {}".format(index + 1, name))
# # Printing names & emails of people
def memberDetails(people):
placeHolder = []
for name, email in people:
placeHolder.append("{} <{}>".format(name, email))
return placeHolder
# Calling the function
print(
memberDetails([("Fabi", "[email protected]"), ("John Doe", "[email protected]")])
)
# # Skip elements in a list
#
def skip_elements(elements):
result = []
for index, element in enumerate(elements):
if index % 2 == 0:
result.append(element)
return result
print(
skip_elements(["a", "b", "c", "d", "e", "f", "g"])
) # Should be ['a', 'c', 'e', 'g']
print(
skip_elements(["Orange", "Pineapple", "Strawberry", "Kiwi", "Peach"])
) # Should be ['Orange', 'Strawberry', 'Peach']
# # Multiplication Table
multipleTable = [5 * x for x in range(1, 11)]
print(multipleTable)
# # Lengths of the elements of a list
languages = ["python", "java", "c++", "ruby", "swift"]
lengths = [len(element) for element in languages]
print(lengths)
# # Odd numbers within a range
#
def odd_numbers(n):
return [x for x in range(1, n + 1) if x % 2 != 0]
print(odd_numbers(11)) # Should print [1, 3, 5, 7, 9, 11]
# # Replacing file extensions
filenames = ["program.c", "stdio.hpp", "sample.hpp", "a.out", "math.hpp", "hpp.out"]
newfilenames = []
for name in filenames:
if name.endswith(".hpp"):
newfilenames.append(name.replace(".hpp", ".h"))
else:
newfilenames.append(name)
print(newfilenames)
# Should be ["program.c", "stdio.h", "sample.h", "a.out", "math.h", "hpp.out"]
# # Pig Latin
def pig_latin(text):
say = ""
# Separate the text into words
words = text.split()
for word in words:
# Create the pig latin word and add it to the list
newWord = word[1 : len(word) + 1] + word[0] + "ay"
say = say + newWord + " "
# Turn the list back into a phrase
result = "".join([str(elem) for elem in say])
return result
print(pig_latin("hello how are you")) # Should be "ellohay owhay reaay ouyay"
print(
pig_latin("programming in python is fun")
) # Should be "rogrammingpay niay ythonpay siay unfay"
# # Count letters in words/sentences
def countLetters(text):
dictionary = {} # creating an empty dictionary
for letter in text:
if letter not in dictionary:
dictionary[letter] = 0
dictionary[letter] += 1
return dictionary
countLetters("the quick brown fox jumped over the lazy dog")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/433/129433282.ipynb
| null | null |
[{"Id": 129433282, "ScriptId": 38439788, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10398728, "CreationDate": "05/13/2023 18:55:24", "VersionNumber": 4.0, "Title": "Python Mini Exercises [Level: Beginner]", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 224.0, "LinesInsertedFromPrevious": 155.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 69.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Domino tiles in a set
for left in range(7):
for right in range(left, 7):
print(
"[" + str(left) + " | " + str(right) + "]", end=" "
) # end parameter creates a new line if left blank with a space.
print()
# # Is the number a power of a given base? [Recursion]
# Note: base is assumed to be a positive number. Tip: for functions that return a boolean value, you can return the result of a comparison.
def is_power_of(number, base):
# Base case: when number is smaller than base.
if number < base:
# If number is equal to 1, it's a power (base**0).
return number == 1
# Recursive case: keep dividing number by base.
return is_power_of(number / base, base)
print(is_power_of(8, 2)) # Should be True
print(is_power_of(64, 4)) # Should be True
print(is_power_of(70, 10)) # Should be False
# # Sum of all positive numbers [Recursion]
def sum_positive_numbers(n):
if n <= 1:
return n
return n + sum_positive_numbers(n - 1)
print(sum_positive_numbers(3)) # Should be 6
print(sum_positive_numbers(5)) # Should be 15
# # Count the digits of a number
def digits(n):
count = 0
if n == 0:
count += 1
while n != 0:
n = n // 10
count += 1
return count
print(digits(25)) # Should print 2
print(digits(144)) # Should print 3
print(digits(1000)) # Should print 4
print(digits(0)) # Should print 1
# # Sequence of numbers in descending order
def sequence(low, high):
for x in range(high, low, -1):
for y in range(high, 0, -1):
if y == low:
print(str(y))
else:
print(str(y), end=", ")
y -= 1
sequence(1, 3)
# Should print the sequence 3, 2, 1 two times, as shown above.
# # Palindrome
def is_palindrome(input_string):
new_string = ""
reverse_string = ""
for letter in input_string.lower():
if letter != " ":
new_string = new_string + letter
reverse_string = letter + reverse_string
if reverse_string == new_string:
return True
return False
print(is_palindrome("Never Odd or Even")) # Should be True
print(is_palindrome("abc")) # Should be False
print(is_palindrome("kayak")) # Should be True
# # Get a word from a sentence
def get_word(sentence, n):
if n > 0:
words = sentence.split()
if n <= len(words):
return words[n - 1]
return ""
print(get_word("This is a lesson about lists", 4)) # Should print: lesson
print(get_word("This is a lesson about lists", -4)) # Nothing
print(get_word("Now we are cooking!", 1)) # Should print: Now
print(get_word("Now we are cooking!", 5)) # Nothing
# # Enumerate in Python
#
winner = ["Fabi", "Nasif", "Rafa"]
for index, name in enumerate(winner):
print("{} - {}".format(index + 1, name))
# # Printing names & emails of people
def memberDetails(people):
placeHolder = []
for name, email in people:
placeHolder.append("{} <{}>".format(name, email))
return placeHolder
# Calling the function
print(
memberDetails([("Fabi", "[email protected]"), ("John Doe", "[email protected]")])
)
# # Skip elements in a list
#
def skip_elements(elements):
result = []
for index, element in enumerate(elements):
if index % 2 == 0:
result.append(element)
return result
print(
skip_elements(["a", "b", "c", "d", "e", "f", "g"])
) # Should be ['a', 'c', 'e', 'g']
print(
skip_elements(["Orange", "Pineapple", "Strawberry", "Kiwi", "Peach"])
) # Should be ['Orange', 'Strawberry', 'Peach']
# # Multiplication Table
multipleTable = [5 * x for x in range(1, 11)]
print(multipleTable)
# # Lengths of the elements of a list
languages = ["python", "java", "c++", "ruby", "swift"]
lengths = [len(element) for element in languages]
print(lengths)
# # Odd numbers within a range
#
def odd_numbers(n):
return [x for x in range(1, n + 1) if x % 2 != 0]
print(odd_numbers(11)) # Should print [1, 3, 5, 7, 9, 11]
# # Replacing file extensions
filenames = ["program.c", "stdio.hpp", "sample.hpp", "a.out", "math.hpp", "hpp.out"]
newfilenames = []
for name in filenames:
if name.endswith(".hpp"):
newfilenames.append(name.replace(".hpp", ".h"))
else:
newfilenames.append(name)
print(newfilenames)
# Should be ["program.c", "stdio.h", "sample.h", "a.out", "math.h", "hpp.out"]
# # Pig Latin
def pig_latin(text):
say = ""
# Separate the text into words
words = text.split()
for word in words:
# Create the pig latin word and add it to the list
newWord = word[1 : len(word) + 1] + word[0] + "ay"
say = say + newWord + " "
# Turn the list back into a phrase
result = "".join([str(elem) for elem in say])
return result
print(pig_latin("hello how are you")) # Should be "ellohay owhay reaay ouyay"
print(
pig_latin("programming in python is fun")
) # Should be "rogrammingpay niay ythonpay siay unfay"
# # Count letters in words/sentences
def countLetters(text):
dictionary = {} # creating an empty dictionary
for letter in text:
if letter not in dictionary:
dictionary[letter] = 0
dictionary[letter] += 1
return dictionary
countLetters("the quick brown fox jumped over the lazy dog")
| false | 0 | 1,674 | 0 | 1,674 | 1,674 |
||
129869672
|
<jupyter_start><jupyter_text>Iris Flower Dataset
### Context
The Iris flower data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. The data set consists of 50 samples from each of three species of Iris (Iris Setosa, Iris virginica, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.
This dataset became a typical test case for many statistical classification techniques in machine learning such as support vector machines
### Content
The dataset contains a set of 150 records under 5 attributes - Petal Length, Petal Width, Sepal Length, Sepal width and Class(Species).
Kaggle dataset identifier: iris-flower-dataset
<jupyter_code>import pandas as pd
df = pd.read_csv('iris-flower-dataset/IRIS.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 150 entries, 0 to 149
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sepal_length 150 non-null float64
1 sepal_width 150 non-null float64
2 petal_length 150 non-null float64
3 petal_width 150 non-null float64
4 species 150 non-null object
dtypes: float64(4), object(1)
memory usage: 6.0+ KB
<jupyter_text>Examples:
{
"sepal_length": 5.1,
"sepal_width": 3.5,
"petal_length": 1.4,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.9,
"sepal_width": 3.0,
"petal_length": 1.4,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.7,
"sepal_width": 3.2,
"petal_length": 1.3,
"petal_width": 0.2,
"species": "Iris-setosa"
}
{
"sepal_length": 4.6,
"sepal_width": 3.1,
"petal_length": 1.5,
"petal_width": 0.2,
"species": "Iris-setosa"
}
<jupyter_script>import pandas as pd
import numpy as np
# Groupby operation is used to manipulate data in groups/category and to perform various operations.
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
df
df.info()
x = df.groupby("species")
x
type(x)
dir(x)
x.groups
# coverting it into the list
xl = list(x)
xl
xl[0]
xl[0][1]
# **Manual substitue of groupby object **
df.species.unique() # so there can b 3 groups in iris dataframe
# .loc is a subsititue of groupby but it cannot b used for large datasets.
df.loc[df.species == "Iris-setosa"]
df.loc[df.species == "Iris-versicolor"]
df.loc[df.species == "Iris-virginica"]
# **How groupby object can b used with multiple keys.**
df = pd.read_csv("/kaggle/input/data-science-job-salaries/ds_salaries.csv")
df
df.job_title.unique()
df.job_title.nunique() # 50 unique jobs.
# splitting the data into group by job title.THis will create 50 groups.
split1 = df.groupby("job_title")
len(split1)
# creating another group via salary
df.salary.nunique()
split2 = df.groupby(["job_title", "salary"])
len(split2)
split2 = list(split2)
split2[3][:]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/869/129869672.ipynb
|
iris-flower-dataset
|
arshid
|
[{"Id": 129869672, "ScriptId": 38542833, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11303094, "CreationDate": "05/17/2023 04:56:38", "VersionNumber": 1.0, "Title": "Groupby Operations", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 69.0, "LinesInsertedFromPrevious": 69.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186269661, "KernelVersionId": 129869672, "SourceDatasetVersionId": 23404}, {"Id": 186269662, "KernelVersionId": 129869672, "SourceDatasetVersionId": 3806098}]
|
[{"Id": 23404, "DatasetId": 17860, "DatasourceVersionId": 23408, "CreatorUserId": 1272228, "LicenseName": "CC0: Public Domain", "CreationDate": "03/22/2018 15:18:06", "VersionNumber": 1.0, "Title": "Iris Flower Dataset", "Slug": "iris-flower-dataset", "Subtitle": "Iris flower data set used for multi-class classification.", "Description": "### Context\n\nThe Iris flower data set is a multivariate data set introduced by the British statistician and biologist Ronald Fisher in his 1936 paper The use of multiple measurements in taxonomic problems. It is sometimes called Anderson's Iris data set because Edgar Anderson collected the data to quantify the morphologic variation of Iris flowers of three related species. The data set consists of 50 samples from each of three species of Iris (Iris Setosa, Iris virginica, and Iris versicolor). Four features were measured from each sample: the length and the width of the sepals and petals, in centimeters.\n\nThis dataset became a typical test case for many statistical classification techniques in machine learning such as support vector machines\n\n\n### Content\n\nThe dataset contains a set of 150 records under 5 attributes - Petal Length, Petal Width, Sepal Length, Sepal width and Class(Species).\n\n\n### Acknowledgements\n\nThis dataset is free and is publicly available at the UCI Machine Learning Repository", "VersionNotes": "Initial release", "TotalCompressedBytes": 4617.0, "TotalUncompressedBytes": 4617.0}]
|
[{"Id": 17860, "CreatorUserId": 1272228, "OwnerUserId": 1272228.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 23404.0, "CurrentDatasourceVersionId": 23408.0, "ForumId": 25592, "Type": 2, "CreationDate": "03/22/2018 15:18:06", "LastActivityDate": "03/22/2018", "TotalViews": 467985, "TotalDownloads": 107813, "TotalVotes": 688, "TotalKernels": 925}]
|
[{"Id": 1272228, "UserName": "arshid", "DisplayName": "MathNerd", "RegisterDate": "09/17/2017", "PerformanceTier": 1}]
|
import pandas as pd
import numpy as np
# Groupby operation is used to manipulate data in groups/category and to perform various operations.
df = pd.read_csv("/kaggle/input/iris-flower-dataset/IRIS.csv")
df
df.info()
x = df.groupby("species")
x
type(x)
dir(x)
x.groups
# coverting it into the list
xl = list(x)
xl
xl[0]
xl[0][1]
# **Manual substitue of groupby object **
df.species.unique() # so there can b 3 groups in iris dataframe
# .loc is a subsititue of groupby but it cannot b used for large datasets.
df.loc[df.species == "Iris-setosa"]
df.loc[df.species == "Iris-versicolor"]
df.loc[df.species == "Iris-virginica"]
# **How groupby object can b used with multiple keys.**
df = pd.read_csv("/kaggle/input/data-science-job-salaries/ds_salaries.csv")
df
df.job_title.unique()
df.job_title.nunique() # 50 unique jobs.
# splitting the data into group by job title.THis will create 50 groups.
split1 = df.groupby("job_title")
len(split1)
# creating another group via salary
df.salary.nunique()
split2 = df.groupby(["job_title", "salary"])
len(split2)
split2 = list(split2)
split2[3][:]
|
[{"iris-flower-dataset/IRIS.csv": {"column_names": "[\"sepal_length\", \"sepal_width\", \"petal_length\", \"petal_width\", \"species\"]", "column_data_types": "{\"sepal_length\": \"float64\", \"sepal_width\": \"float64\", \"petal_length\": \"float64\", \"petal_width\": \"float64\", \"species\": \"object\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 150 entries, 0 to 149\nData columns (total 5 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 sepal_length 150 non-null float64\n 1 sepal_width 150 non-null float64\n 2 petal_length 150 non-null float64\n 3 petal_width 150 non-null float64\n 4 species 150 non-null object \ndtypes: float64(4), object(1)\nmemory usage: 6.0+ KB\n", "summary": "{\"sepal_length\": {\"count\": 150.0, \"mean\": 5.843333333333334, \"std\": 0.828066127977863, \"min\": 4.3, \"25%\": 5.1, \"50%\": 5.8, \"75%\": 6.4, \"max\": 7.9}, \"sepal_width\": {\"count\": 150.0, \"mean\": 3.0540000000000003, \"std\": 0.4335943113621737, \"min\": 2.0, \"25%\": 2.8, \"50%\": 3.0, \"75%\": 3.3, \"max\": 4.4}, \"petal_length\": {\"count\": 150.0, \"mean\": 3.758666666666666, \"std\": 1.7644204199522626, \"min\": 1.0, \"25%\": 1.6, \"50%\": 4.35, \"75%\": 5.1, \"max\": 6.9}, \"petal_width\": {\"count\": 150.0, \"mean\": 1.1986666666666668, \"std\": 0.7631607417008411, \"min\": 0.1, \"25%\": 0.3, \"50%\": 1.3, \"75%\": 1.8, \"max\": 2.5}}", "examples": "{\"sepal_length\":{\"0\":5.1,\"1\":4.9,\"2\":4.7,\"3\":4.6},\"sepal_width\":{\"0\":3.5,\"1\":3.0,\"2\":3.2,\"3\":3.1},\"petal_length\":{\"0\":1.4,\"1\":1.4,\"2\":1.3,\"3\":1.5},\"petal_width\":{\"0\":0.2,\"1\":0.2,\"2\":0.2,\"3\":0.2},\"species\":{\"0\":\"Iris-setosa\",\"1\":\"Iris-setosa\",\"2\":\"Iris-setosa\",\"3\":\"Iris-setosa\"}}"}}]
| true | 2 |
<start_data_description><data_path>iris-flower-dataset/IRIS.csv:
<column_names>
['sepal_length', 'sepal_width', 'petal_length', 'petal_width', 'species']
<column_types>
{'sepal_length': 'float64', 'sepal_width': 'float64', 'petal_length': 'float64', 'petal_width': 'float64', 'species': 'object'}
<dataframe_Summary>
{'sepal_length': {'count': 150.0, 'mean': 5.843333333333334, 'std': 0.828066127977863, 'min': 4.3, '25%': 5.1, '50%': 5.8, '75%': 6.4, 'max': 7.9}, 'sepal_width': {'count': 150.0, 'mean': 3.0540000000000003, 'std': 0.4335943113621737, 'min': 2.0, '25%': 2.8, '50%': 3.0, '75%': 3.3, 'max': 4.4}, 'petal_length': {'count': 150.0, 'mean': 3.758666666666666, 'std': 1.7644204199522626, 'min': 1.0, '25%': 1.6, '50%': 4.35, '75%': 5.1, 'max': 6.9}, 'petal_width': {'count': 150.0, 'mean': 1.1986666666666668, 'std': 0.7631607417008411, 'min': 0.1, '25%': 0.3, '50%': 1.3, '75%': 1.8, 'max': 2.5}}
<dataframe_info>
RangeIndex: 150 entries, 0 to 149
Data columns (total 5 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 sepal_length 150 non-null float64
1 sepal_width 150 non-null float64
2 petal_length 150 non-null float64
3 petal_width 150 non-null float64
4 species 150 non-null object
dtypes: float64(4), object(1)
memory usage: 6.0+ KB
<some_examples>
{'sepal_length': {'0': 5.1, '1': 4.9, '2': 4.7, '3': 4.6}, 'sepal_width': {'0': 3.5, '1': 3.0, '2': 3.2, '3': 3.1}, 'petal_length': {'0': 1.4, '1': 1.4, '2': 1.3, '3': 1.5}, 'petal_width': {'0': 0.2, '1': 0.2, '2': 0.2, '3': 0.2}, 'species': {'0': 'Iris-setosa', '1': 'Iris-setosa', '2': 'Iris-setosa', '3': 'Iris-setosa'}}
<end_description>
| 385 | 0 | 1,084 | 385 |
129444800
|
<jupyter_start><jupyter_text>timm-master
Kaggle dataset identifier: timmmaster
<jupyter_script>import albumentations
import cv2
import os, os.path
from PIL import Image # from RBG to YCbCr
import sys, timeit, math, copy, random
sys.path.append("../input/timmmaster/")
# Basics
import pandas as pd
import numpy as np
from numpy import pi # for DCT
from numpy import r_ # for DCT
import scipy # for cosine similarity
from scipy import fftpack # for DCT
import random
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.image as mpimg # to check images
from tqdm.notebook import tqdm # beautiful progression bar
# SKlearn
from sklearn.model_selection import KFold
from sklearn import metrics
# PyTorch
import torch
import torch.nn as nn
import torch.optim as optim
from torch import FloatTensor, LongTensor
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
# Data Augmentation for Image Preprocessing
from albumentations import (
ToFloat,
Normalize,
VerticalFlip,
HorizontalFlip,
Compose,
Resize,
RandomBrightness,
RandomContrast,
HueSaturationValue,
Blur,
GaussNoise,
)
# from albumentations.pytorch import ToTensorV2, ToTensor
from torchvision.models import resnet34
# from torchvision import transforms as transforms
import warnings
warnings.filterwarnings("ignore")
import transformers
from albumentations.pytorch import ToTensorV2
import timm
from sklearn.metrics import average_precision_score
import jpegio as jio
# model_names = timm.list_models(pretrained=True)
# print(model_names)
def set_seed(seed=1234):
"""Sets the seed of the entire notebook so results are the same every time we run.
This is for REPRODUCIBILITY."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed()
device = "cuda"
print("Device available now:", device)
"""path1='../input/alaska2-image-steganalysis'
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
train_filenames_cover = np.array(os.listdir("../input/alaska2-image-steganalysis/Cover"))
train_filenames_cover=sorted(train_filenames_cover)
positives = train_filenames_cover.copy()
neg_train_path1 = append_path('Cover')(positives[:8000])
neg_train_path2 = append_path('Cover')(positives[10000:18000])
neg_train_path3 = append_path('Cover')(positives[20000:28000])
ned_valid_path1 = append_path('Cover')(positives[8000:9000])
ned_valid_path2 = append_path('Cover')(positives[18000:19000])
ned_valid_path3 = append_path('Cover')(positives[28000:29000])
neg_test_path1 = append_path('Cover')(positives[9000:10000])
neg_test_path2 = append_path('Cover')(positives[19000:20000])
neg_test_path3 = append_path('Cover')(positives[29000:30000])
jmi_train_path=append_path('JMiPOD')(positives[:8000])
jmi_valid_path=append_path('JMiPOD')(positives[8000:9000])
jmi_test_path=append_path('JMiPOD')(positives[9000:10000])
jun_train_path=append_path('JUNIWARD')(positives[10000:18000])
jun_valid_path=append_path('JUNIWARD')(positives[18000:19000])
jun_test_path=append_path('JUNIWARD')(positives[19000:20000])
ue_train_path=append_path('UERD')(positives[20000:28000])
ue_valid_path=append_path('UERD')(positives[28000:29000])
ue_test_path=append_path('UERD')(positives[29000:30000])
train_paths = np.concatenate([neg_train_path1, neg_train_path2, neg_train_path3, jmi_train_path, jun_train_path, ue_train_path])
valid_paths= np.concatenate([ned_valid_path1, ned_valid_path2, ned_valid_path3, jmi_valid_path, jun_valid_path, ue_valid_path])
test_paths=np.concatenate([neg_test_path1, neg_test_path2, neg_test_path3, jmi_test_path, jun_test_path, ue_test_path])
train_labels = np.array([0] * len(neg_train_path1) + [0] * len(neg_train_path2)+[0] * len(neg_train_path3)+[1] * len(jmi_train_path)+ [1] * len(jun_train_path)+[1] * len(ue_train_path))
valid_labels = np.array([0] * len(ned_valid_path1) + [0] * len(ned_valid_path2)+[0] * len(ned_valid_path3)+[1] * len(jmi_valid_path)+ [1] * len(jun_valid_path)+[1] * len(ue_valid_path))
test_labels = np.array([0] * len(neg_test_path1) + [0] * len(neg_test_path2)+[0] * len(neg_test_path3)+[1] * len(jmi_test_path)+ [1] * len(jun_test_path)+[1] * len(ue_test_path))"""
path = "../input/alaska2-image-steganalysis"
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path, pre, file))
train_filenames_cover = np.array(
os.listdir("../input/alaska2-image-steganalysis/Cover")
)
train_filenames_cover = sorted(train_filenames_cover)
positives = train_filenames_cover.copy()
neg_train_path = append_path("Cover")(positives[:12000])
neg_valid_path = append_path("Cover")(positives[12000:13500])
neg_test_path = append_path("Cover")(positives[13500:15000])
jun_train_path = append_path("JUNIWARD")(positives[:12000])
jun_valid_path = append_path("JUNIWARD")(positives[12000:13500])
jun_test_path = append_path("JUNIWARD")(positives[13500:15000])
train_paths = np.concatenate([neg_train_path, jun_train_path])
valid_paths = np.concatenate([neg_valid_path, jun_valid_path])
test_paths = np.concatenate([neg_test_path, jun_test_path])
train_labels = np.array([0] * len(neg_train_path) + [1] * len(jun_train_path))
valid_labels = np.array([0] * len(neg_valid_path) + [1] * len(jun_valid_path))
test_labels = np.array([0] * len(neg_test_path) + [1] * len(jun_test_path))
train_df = pd.DataFrame({"Id": train_paths, "Label": train_labels})
valid_df = pd.DataFrame({"Id": valid_paths, "Label": valid_labels})
test_df = pd.DataFrame({"Id": test_paths, "Label": test_labels})
train_df.head(10)
path1 = "../input/cover-and-jun1"
def append_path1(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
path2 = "../input/cover-and-jun2"
def append_path2(pre):
return np.vectorize(lambda file: os.path.join(path2, pre, file))
path3 = "../input/cover-and-jun3"
def append_path3(pre):
return np.vectorize(lambda file: os.path.join(path3, pre, file))
path4 = "../input/cover-and-jun4"
def append_path4(pre):
return np.vectorize(lambda file: os.path.join(path4, pre, file))
path5 = "../input/cover-and-jun5"
def append_path5(pre):
return np.vectorize(lambda file: os.path.join(path5, pre, file))
train_filenames_cover1 = np.array(os.listdir("../input/cover-and-jun1/cover1"))
train_filenames_cover1 = sorted(train_filenames_cover1)
train_filenames_cover2 = np.array(os.listdir("../input/cover-and-jun2/cover2"))
train_filenames_cover2 = sorted(train_filenames_cover2)
train_filenames_cover3 = np.array(os.listdir("../input/cover-and-jun3/cover3"))
train_filenames_cover3 = sorted(train_filenames_cover3)
train_filenames_cover4 = np.array(os.listdir("../input/cover-and-jun4/cover4"))
train_filenames_cover4 = sorted(train_filenames_cover4)
train_filenames_cover5 = np.array(os.listdir("../input/cover-and-jun5/cover5"))
train_filenames_cover5 = sorted(train_filenames_cover5)
positives1 = train_filenames_cover1.copy()
positives2 = train_filenames_cover2.copy()
positives3 = train_filenames_cover3.copy()
positives4 = train_filenames_cover4.copy()
positives5 = train_filenames_cover5.copy()
neg_train_path1 = append_path1("cover1")(positives1[:3000])
neg_train_path2 = append_path2("cover2")(positives2[:3000])
neg_train_path3 = append_path3("cover3")(positives3[:3000])
neg_train_path4 = append_path4("cover4")(positives4[:3000])
ned_valid_path = append_path5("cover5")(positives5[:1500])
neg_test_path = append_path5("cover5")(positives5[1500:3000])
jun_train_path1 = append_path1("JUN1")(positives1[:3000])
jun_train_path2 = append_path2("JUN2")(positives2[:3000])
jun_train_path3 = append_path3("JUN3")(positives3[:3000])
jun_train_path4 = append_path4("JUN4")(positives4[:3000])
jun_valid_path = append_path5("JUN5")(positives5[:1500])
jun_test_path = append_path5("JUN5")(positives5[1500:3000])
train_paths1 = np.concatenate(
[
neg_train_path1,
neg_train_path2,
neg_train_path3,
neg_train_path4,
jun_train_path1,
jun_train_path2,
jun_train_path3,
jun_train_path4,
]
)
valid_paths1 = np.concatenate([ned_valid_path, jun_valid_path])
test_paths1 = np.concatenate([neg_test_path, jun_test_path])
train_labels1 = np.array(
[0] * len(neg_train_path1)
+ [0] * len(neg_train_path2)
+ [0] * len(neg_train_path3)
+ [0] * len(neg_train_path4)
+ [1] * len(jun_train_path1)
+ [1] * len(jun_train_path2)
+ [1] * len(jun_train_path3)
+ [1] * len(jun_train_path4)
)
valid_labels1 = np.array([0] * len(ned_valid_path) + [1] * len(jun_valid_path))
test_labels1 = np.array([0] * len(neg_test_path) + [1] * len(jun_test_path))
train1_df = pd.DataFrame({"Id": train_paths1, "Label": train_labels1})
valid1_df = pd.DataFrame({"Id": valid_paths1, "Label": valid_labels1})
test1_df = pd.DataFrame({"Id": test_paths1, "Label": test_labels1})
train1_df.head(10)
"""train1_df.to_csv('train1.csv', index=False)
valid1_df.to_csv('valid1.csv', index=False)
test1_df.to_csv('test1.csv', index=False)"""
sample_size = 128
num_classes = 1
"""train1_df = pd.read_csv('/kaggle/working/train1.csv',
header=0, names=['Path', 'Label'], dtype = {'Label':np.int32})
valid1_df = pd.read_csv('/kaggle/working/valid1.csv',
header=0, names=['Path', 'Label'], dtype = {'Label':np.int32})"""
"""train1_df['Label'] = train1_df['Label'].astype('float32')
train1_df.dtypes"""
# from torchvision.io import read_image
"""class AlaskaDatasetTrain(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img_path = self.dataframe.iloc[idx, 0]
image = read_image(img_path)
label = self.dataframe.iloc[idx, 1]
return image, label"""
"""prom = np.zeros(shape = (512, 512, 3), dtype="float32")
koef = np.zeros(shape = (512, 512, 3), dtype="float32")"""
"""20it [00:12, 1.58it/s]
torch.Size([672, 3, 512, 512])
tensor(-0.3711) tensor(21.0777)
tensor(-0.2116) tensor(6.0352)
tensor(0.1129) tensor(5.8410)"""
class CustomDatasetTrain(Dataset):
def __init__(self, dataframe1, dataframe2):
self.dataframe1 = dataframe1
self.dataframe2 = dataframe2
self.transform_image_dct = Compose(
[
Normalize(
mean=[-0.3711, -0.2116, 0.1129],
std=[21.0777, 6.0352, 5.8410],
max_pixel_value=1.0,
),
ToTensorV2(),
]
)
self.transform_image = Compose(
[
Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
),
ToTensorV2(),
]
)
def __len__(self):
return len(self.dataframe1)
def __getitem__(self, idx):
# koef = np.zeros(shape = (512, 512, 3), dtype="float32")
img_path1 = self.dataframe1.iloc[idx, 0]
# jpeg = jio.read(img_path)
image = cv2.imread(img_path1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_path2 = self.dataframe2.iloc[idx, 0]
koef = np.load(img_path2)
# koef[:,:,0] = jpeg.coef_arrays[0] ; koef[:,:,1] = jpeg.coef_arrays[1] ; koef[:,:,2] = jpeg.coef_arrays[2]
# koef = np.array(koef, dtype=np.float32)
koef = self.transform_image_dct(image=koef)["image"]
label = self.dataframe2.iloc[idx, 1]
label = torch.tensor(label, dtype=torch.float32)
image = self.transform_image(image=image)["image"]
# image_dct=torch.concat([image,koef],dim=0)
image = np.array(image)
# koef=np.array(koef)
image_dct = np.concatenate([image, koef], axis=0)
image_dct = torch.tensor(image_dct)
# koef=torch.tensor(koef)
return image_dct, label
ex_data = CustomDatasetTrain(train1_df)
ex_loader = torch.utils.data.DataLoader(
ex_data, batch_size=1, shuffle=True, num_workers=4
)
# example_data = AlaskaDatasetTrain(train_df)
# example_loader = torch.utils.data.DataLoader(example_data, batch_size = 1, shuffle=True)
"""for image, label in example_loader:
images_example = image
label_example = torch.tensor(label, dtype=torch.long)
break
print('Images shape:', images_example.shape)
print('Labels:', label, '\n')"""
num_workers = 2
images = []
for i, (image, label) in tqdm(enumerate(ex_loader)):
images.append(image)
if i == 20:
break
images_res = torch.concat(images, dim=0)
print(images_res.shape)
for i in range(3):
print(images_res[:, i, :, :].mean(), images_res[:, i, :, :].std())
prom1 = np.zeros(shape=(512, 512, 3), dtype="float32")
norm1 = np.zeros(shape=(512, 512, 3), dtype="float32")
class CustomDatasetTest(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
self.transform_image = Compose([ToTensorV2()])
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
koef = np.zeros(shape=(512, 512, 3), dtype="float32")
img_path = self.dataframe.iloc[idx, 0]
jpeg = jio.read(img_path)
# prom=np.load(img_path)
# prom = np.array(prom, dtype=np.float32)
# image=(prom-693)/(693-(-676)) #normalization
# image = cv2.imread(img_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
koef[:, :, 0] = jpeg.coef_arrays[0]
koef[:, :, 1] = jpeg.coef_arrays[1]
koef[:, :, 2] = jpeg.coef_arrays[2]
koef = np.array(koef, dtype=np.float32)
# image = (koef-693)/(693-(-676)) #normalization
image = self.transform_image(image=image)["image"]
return image
"""class AlaskaDatasetTest(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img_path = self.dataframe.iloc[idx, 0]
image = read_image(img_path)
return image"""
print(
"Train Data Size:",
len(train1_df),
"\n" + "Valid Data Size:",
len(valid1_df),
"\n" + "----------------------",
"\n" + "Total:",
len(train1_df) + len(valid1_df),
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
sns.countplot(x=train1_df["Label"], ax=ax1, palette=sns.color_palette("GnBu_d", 10))
sns.countplot(x=valid1_df["Label"], ax=ax2, palette=sns.color_palette("YlOrRd", 10))
ax1.set_title("Train Data", fontsize=16)
ax2.set_title("Valid Data", fontsize=16)
class BaseSwin(nn.Module):
def __init__(self, name="tf_efficientnet_b0_ns", pretrained=True, n_classes=1):
super(BaseSwin, self).__init__()
self.model = timm.create_model(name, pretrained=pretrained, in_chans=6)
self.n_classes = n_classes
self.n_features = self.model.classifier.in_features
self.model.classifier = nn.Identity()
self.fc = nn.Linear(self.n_features, self.n_classes)
def forward(self, x):
x = self.model(x)
x = self.fc(x)
if self.n_classes == 1:
return x.ravel()
else:
return x
"""class EfficientNetwork(nn.Module):
def __init__(self):
super().__init__()
# Define Feature part
self.features = EfficientNet.from_pretrained('efficientnet-b3')
# Define Classification part
self.classification = nn.Linear(1408, 1)
#self.avg_pool=nn.AvgPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=False,
#count_include_pad=False)
def forward(self, image, prints=False):
if prints: print('Input Image shape:', image.shape)
image = self.features.extract_features(image)
print('Features Image shape:', image.shape)
image = F.avg_pool2d(image, image.size()[2:]).reshape(-1, 1408)
if prints: print('Image Reshaped shape:', image.shape)
out = self.classification(image)
if prints: print('Out shape:', out.shape)
return torch.sigmoid(out)"""
# Create an example model (B7)
model_example = BaseSwin()
train1_data = CustomDatasetTrain(train_df, train1_df)
valid1_data = CustomDatasetTrain(valid_df, valid1_df)
train1_data[0]
"""path1="/kaggle/input/jpegdctpng"
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
train_filenames = np.array(os.listdir("/kaggle/input/jpegdctpng/cover"))
train_filenames=sorted(train_filenames)
jmi_path_train=append_path('JMI')(train_filenames[:400])
jmi_path_valid=append_path('JMI')(train_filenames[400:450])
jmi_path_test=append_path('JMI')(train_filenames[450:500])
neg_path_train=append_path('cover')(train_filenames[:400])
neg_path_valid=append_path('cover')(train_filenames[400:450])
neg_path_test=append_path('cover')(train_filenames[450:500])
train_paths = np.concatenate([jmi_path_train, neg_path_train])
valid_paths=np.concatenate([jmi_path_valid, neg_path_valid])
test_paths=np.concatenate([jmi_path_test, neg_path_test])
train_labels=np.array([1] * len(jmi_path_train) + [0] * len(neg_path_train))
test_labels=np.array([1] * len(jmi_path_test) + [0] * len(neg_path_test))
valid_labels=np.array([1] * len(jmi_path_valid) + [0] * len(neg_path_valid))"""
"""train_jmi_df=pd.DataFrame({'Id': train_paths, 'Label': train_labels})
valid_jmi_df=pd.DataFrame({'Id': valid_paths, 'Label': valid_labels})
test_jmi_df=pd.DataFrame({'Id': test_paths, 'Label': test_labels})"""
# train_jmi_data = CustomDatasetTrain(train_jmi_df)
# valid_jmi_data = CustomDatasetTrain(valid_jmi_df)
import gc
def train(
model, epochs, batch_size, num_workers, learning_rate, weight_decay, version="vx"
):
# Create file to save logs
f = open(f"logs_{version}.txt", "w+")
# Data Loaders
train_loader = torch.utils.data.DataLoader(
train1_data,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True,
)
valid_loader = torch.utils.data.DataLoader(
valid1_data,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True,
)
# Criterion
criterion = torch.nn.BCEWithLogitsLoss()
# Optimizer
optimizer = optim.AdamW(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
# train_losses = []
evaluation_losses = []
for epoch in range(epochs):
# Sets the model in training mode
model.train()
train_loss = 0
for images, labels in tqdm(train_loader):
# Need to access the images
images = images.to(device)
labels = labels.to(device)
# Clear gradients
optimizer.zero_grad()
# Make prediction
out = model(images)
# Compute loss and Backpropagate
loss = criterion(out, labels)
loss.backward()
optimizer.step()
# train_loss += loss.item()
# gc.collect()
# Compute average epoch loss
# epoch_loss_train = train_loss / batch_size
# train_losses.append(epoch_loss_train)
# gc.collect()
# ===== Evaluate =====
model.eval()
evaluation_loss = 0
actuals, predictions = [], []
num_correct = 0
num_samples = 0
# To disable gradients
with torch.no_grad():
for images, labels in tqdm(valid_loader):
images = images.to(device)
labels = labels.to(device)
# Prediction
out = model(images)
loss = criterion(out, labels)
# predictions = (out > 0.5).long()
# num_correct += (predictions == labels).sum()
# num_samples += predictions.size(0)
evaluation_loss += loss.item()
actuals.append(labels.cpu())
predictions.append(out.cpu())
# gc.collect()
# Compute epoch loss
epoch_loss_eval = evaluation_loss / batch_size
evaluation_losses.append(epoch_loss_eval)
print(average_precision_score(torch.concat(actuals), torch.concat(predictions)))
# accuracy=float(num_correct)/float(num_samples)*100
with open(f"logs_{version}.txt", "a+") as f:
print(
"Epoch: {}/{} | Train Loss: {:.3f} | Eval Loss: {:.3f}".format(
epoch + 1, epochs, 1, epoch_loss_eval
),
file=f,
)
print(
"Epoch: {}/{} | Train Loss: {:.3f} | Eval Loss: {:.3f}".format(
epoch + 1, epochs, 1, epoch_loss_eval
)
)
version = "v8"
epochs = 5
batch_size = 16
num_workers = 2
learning_rate = 0.0001
weight_decay = 0.00001
eff_net5 = BaseSwin().to(device)
from tqdm import tqdm
train(
model=eff_net5,
epochs=epochs,
batch_size=batch_size,
num_workers=num_workers,
learning_rate=learning_rate,
weight_decay=weight_decay,
)
# test_jmi_data = CustomDatasetTest(test_jmi_df)
test_loader = torch.utils.data.DataLoader(
test_jmi_data, batch_size=batch_size, shuffle=False
)
test_df = pd.read_csv(
"../input/path-for-jdp/test.csv",
header=0,
names=["Path", "Label"],
dtype={"Label": np.int32},
)
test_data = CustomDatasetTest(test_df)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=False
)
eff_net5.eval()
out = []
out_res = []
with torch.no_grad():
for images in tqdm(test_loader):
images = images.to(device)
out = eff_net5(images)
out_res.append(out.detach().cpu())
out_res
test_df
pred = torch.concat(out_res)
pred
def sigmoid(x):
return 1 / (1 + torch.exp(-x))
pred_s = []
pred_s = sigmoid(pred)
pred_s
test_df["Pred_value"] = ""
test_df.Pred_value = pred_s
test_df
y_test = test_df.Label.astype(int)
y_pred = (test_df.Pred_value > 0.5).astype(int)
from sklearn.metrics import (
classification_report,
roc_curve,
precision_recall_curve,
roc_auc_score,
accuracy_score,
)
# print("Accuracy: %f" % accuracy_score(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
test_df["Label"].hist(bins=100)
test_df["Pred_value"].hist(bins=100)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/444/129444800.ipynb
|
timmmaster
|
abhishek
|
[{"Id": 129444800, "ScriptId": 36289397, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7021721, "CreationDate": "05/13/2023 21:58:15", "VersionNumber": 19.0, "Title": "DCT eff", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 568.0, "LinesInsertedFromPrevious": 141.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 427.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185500436, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5633313}, {"Id": 185500437, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5672135}, {"Id": 185500438, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5672405}, {"Id": 185500439, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5672802}, {"Id": 185500440, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5672937}, {"Id": 185500441, "KernelVersionId": 129444800, "SourceDatasetVersionId": 5673103}]
|
[{"Id": 5633313, "DatasetId": 1607245, "DatasourceVersionId": 5708580, "CreatorUserId": 5309, "LicenseName": "Unknown", "CreationDate": "05/08/2023 13:08:13", "VersionNumber": 79.0, "Title": "timm-master", "Slug": "timmmaster", "Subtitle": "pytorch image models - master branch", "Description": NaN, "VersionNotes": "Automatic Update 2023-05-08", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1607245, "CreatorUserId": 5309, "OwnerUserId": 5309.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 6275918.0, "CurrentDatasourceVersionId": 6355834.0, "ForumId": 1627662, "Type": 2, "CreationDate": "09/23/2021 15:29:41", "LastActivityDate": "09/23/2021", "TotalViews": 5546, "TotalDownloads": 279, "TotalVotes": 45, "TotalKernels": 91}]
|
[{"Id": 5309, "UserName": "abhishek", "DisplayName": "Abhishek Thakur", "RegisterDate": "01/12/2011", "PerformanceTier": 4}]
|
import albumentations
import cv2
import os, os.path
from PIL import Image # from RBG to YCbCr
import sys, timeit, math, copy, random
sys.path.append("../input/timmmaster/")
# Basics
import pandas as pd
import numpy as np
from numpy import pi # for DCT
from numpy import r_ # for DCT
import scipy # for cosine similarity
from scipy import fftpack # for DCT
import random
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.image as mpimg # to check images
from tqdm.notebook import tqdm # beautiful progression bar
# SKlearn
from sklearn.model_selection import KFold
from sklearn import metrics
# PyTorch
import torch
import torch.nn as nn
import torch.optim as optim
from torch import FloatTensor, LongTensor
from torch.utils.data import Dataset, DataLoader
from torch.optim.lr_scheduler import ReduceLROnPlateau
import torch.nn.functional as F
# Data Augmentation for Image Preprocessing
from albumentations import (
ToFloat,
Normalize,
VerticalFlip,
HorizontalFlip,
Compose,
Resize,
RandomBrightness,
RandomContrast,
HueSaturationValue,
Blur,
GaussNoise,
)
# from albumentations.pytorch import ToTensorV2, ToTensor
from torchvision.models import resnet34
# from torchvision import transforms as transforms
import warnings
warnings.filterwarnings("ignore")
import transformers
from albumentations.pytorch import ToTensorV2
import timm
from sklearn.metrics import average_precision_score
import jpegio as jio
# model_names = timm.list_models(pretrained=True)
# print(model_names)
def set_seed(seed=1234):
"""Sets the seed of the entire notebook so results are the same every time we run.
This is for REPRODUCIBILITY."""
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
# When running on the CuDNN backend, two further options must be set
torch.backends.cudnn.deterministic = True
# Set a fixed value for the hash seed
os.environ["PYTHONHASHSEED"] = str(seed)
set_seed()
device = "cuda"
print("Device available now:", device)
"""path1='../input/alaska2-image-steganalysis'
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
train_filenames_cover = np.array(os.listdir("../input/alaska2-image-steganalysis/Cover"))
train_filenames_cover=sorted(train_filenames_cover)
positives = train_filenames_cover.copy()
neg_train_path1 = append_path('Cover')(positives[:8000])
neg_train_path2 = append_path('Cover')(positives[10000:18000])
neg_train_path3 = append_path('Cover')(positives[20000:28000])
ned_valid_path1 = append_path('Cover')(positives[8000:9000])
ned_valid_path2 = append_path('Cover')(positives[18000:19000])
ned_valid_path3 = append_path('Cover')(positives[28000:29000])
neg_test_path1 = append_path('Cover')(positives[9000:10000])
neg_test_path2 = append_path('Cover')(positives[19000:20000])
neg_test_path3 = append_path('Cover')(positives[29000:30000])
jmi_train_path=append_path('JMiPOD')(positives[:8000])
jmi_valid_path=append_path('JMiPOD')(positives[8000:9000])
jmi_test_path=append_path('JMiPOD')(positives[9000:10000])
jun_train_path=append_path('JUNIWARD')(positives[10000:18000])
jun_valid_path=append_path('JUNIWARD')(positives[18000:19000])
jun_test_path=append_path('JUNIWARD')(positives[19000:20000])
ue_train_path=append_path('UERD')(positives[20000:28000])
ue_valid_path=append_path('UERD')(positives[28000:29000])
ue_test_path=append_path('UERD')(positives[29000:30000])
train_paths = np.concatenate([neg_train_path1, neg_train_path2, neg_train_path3, jmi_train_path, jun_train_path, ue_train_path])
valid_paths= np.concatenate([ned_valid_path1, ned_valid_path2, ned_valid_path3, jmi_valid_path, jun_valid_path, ue_valid_path])
test_paths=np.concatenate([neg_test_path1, neg_test_path2, neg_test_path3, jmi_test_path, jun_test_path, ue_test_path])
train_labels = np.array([0] * len(neg_train_path1) + [0] * len(neg_train_path2)+[0] * len(neg_train_path3)+[1] * len(jmi_train_path)+ [1] * len(jun_train_path)+[1] * len(ue_train_path))
valid_labels = np.array([0] * len(ned_valid_path1) + [0] * len(ned_valid_path2)+[0] * len(ned_valid_path3)+[1] * len(jmi_valid_path)+ [1] * len(jun_valid_path)+[1] * len(ue_valid_path))
test_labels = np.array([0] * len(neg_test_path1) + [0] * len(neg_test_path2)+[0] * len(neg_test_path3)+[1] * len(jmi_test_path)+ [1] * len(jun_test_path)+[1] * len(ue_test_path))"""
path = "../input/alaska2-image-steganalysis"
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path, pre, file))
train_filenames_cover = np.array(
os.listdir("../input/alaska2-image-steganalysis/Cover")
)
train_filenames_cover = sorted(train_filenames_cover)
positives = train_filenames_cover.copy()
neg_train_path = append_path("Cover")(positives[:12000])
neg_valid_path = append_path("Cover")(positives[12000:13500])
neg_test_path = append_path("Cover")(positives[13500:15000])
jun_train_path = append_path("JUNIWARD")(positives[:12000])
jun_valid_path = append_path("JUNIWARD")(positives[12000:13500])
jun_test_path = append_path("JUNIWARD")(positives[13500:15000])
train_paths = np.concatenate([neg_train_path, jun_train_path])
valid_paths = np.concatenate([neg_valid_path, jun_valid_path])
test_paths = np.concatenate([neg_test_path, jun_test_path])
train_labels = np.array([0] * len(neg_train_path) + [1] * len(jun_train_path))
valid_labels = np.array([0] * len(neg_valid_path) + [1] * len(jun_valid_path))
test_labels = np.array([0] * len(neg_test_path) + [1] * len(jun_test_path))
train_df = pd.DataFrame({"Id": train_paths, "Label": train_labels})
valid_df = pd.DataFrame({"Id": valid_paths, "Label": valid_labels})
test_df = pd.DataFrame({"Id": test_paths, "Label": test_labels})
train_df.head(10)
path1 = "../input/cover-and-jun1"
def append_path1(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
path2 = "../input/cover-and-jun2"
def append_path2(pre):
return np.vectorize(lambda file: os.path.join(path2, pre, file))
path3 = "../input/cover-and-jun3"
def append_path3(pre):
return np.vectorize(lambda file: os.path.join(path3, pre, file))
path4 = "../input/cover-and-jun4"
def append_path4(pre):
return np.vectorize(lambda file: os.path.join(path4, pre, file))
path5 = "../input/cover-and-jun5"
def append_path5(pre):
return np.vectorize(lambda file: os.path.join(path5, pre, file))
train_filenames_cover1 = np.array(os.listdir("../input/cover-and-jun1/cover1"))
train_filenames_cover1 = sorted(train_filenames_cover1)
train_filenames_cover2 = np.array(os.listdir("../input/cover-and-jun2/cover2"))
train_filenames_cover2 = sorted(train_filenames_cover2)
train_filenames_cover3 = np.array(os.listdir("../input/cover-and-jun3/cover3"))
train_filenames_cover3 = sorted(train_filenames_cover3)
train_filenames_cover4 = np.array(os.listdir("../input/cover-and-jun4/cover4"))
train_filenames_cover4 = sorted(train_filenames_cover4)
train_filenames_cover5 = np.array(os.listdir("../input/cover-and-jun5/cover5"))
train_filenames_cover5 = sorted(train_filenames_cover5)
positives1 = train_filenames_cover1.copy()
positives2 = train_filenames_cover2.copy()
positives3 = train_filenames_cover3.copy()
positives4 = train_filenames_cover4.copy()
positives5 = train_filenames_cover5.copy()
neg_train_path1 = append_path1("cover1")(positives1[:3000])
neg_train_path2 = append_path2("cover2")(positives2[:3000])
neg_train_path3 = append_path3("cover3")(positives3[:3000])
neg_train_path4 = append_path4("cover4")(positives4[:3000])
ned_valid_path = append_path5("cover5")(positives5[:1500])
neg_test_path = append_path5("cover5")(positives5[1500:3000])
jun_train_path1 = append_path1("JUN1")(positives1[:3000])
jun_train_path2 = append_path2("JUN2")(positives2[:3000])
jun_train_path3 = append_path3("JUN3")(positives3[:3000])
jun_train_path4 = append_path4("JUN4")(positives4[:3000])
jun_valid_path = append_path5("JUN5")(positives5[:1500])
jun_test_path = append_path5("JUN5")(positives5[1500:3000])
train_paths1 = np.concatenate(
[
neg_train_path1,
neg_train_path2,
neg_train_path3,
neg_train_path4,
jun_train_path1,
jun_train_path2,
jun_train_path3,
jun_train_path4,
]
)
valid_paths1 = np.concatenate([ned_valid_path, jun_valid_path])
test_paths1 = np.concatenate([neg_test_path, jun_test_path])
train_labels1 = np.array(
[0] * len(neg_train_path1)
+ [0] * len(neg_train_path2)
+ [0] * len(neg_train_path3)
+ [0] * len(neg_train_path4)
+ [1] * len(jun_train_path1)
+ [1] * len(jun_train_path2)
+ [1] * len(jun_train_path3)
+ [1] * len(jun_train_path4)
)
valid_labels1 = np.array([0] * len(ned_valid_path) + [1] * len(jun_valid_path))
test_labels1 = np.array([0] * len(neg_test_path) + [1] * len(jun_test_path))
train1_df = pd.DataFrame({"Id": train_paths1, "Label": train_labels1})
valid1_df = pd.DataFrame({"Id": valid_paths1, "Label": valid_labels1})
test1_df = pd.DataFrame({"Id": test_paths1, "Label": test_labels1})
train1_df.head(10)
"""train1_df.to_csv('train1.csv', index=False)
valid1_df.to_csv('valid1.csv', index=False)
test1_df.to_csv('test1.csv', index=False)"""
sample_size = 128
num_classes = 1
"""train1_df = pd.read_csv('/kaggle/working/train1.csv',
header=0, names=['Path', 'Label'], dtype = {'Label':np.int32})
valid1_df = pd.read_csv('/kaggle/working/valid1.csv',
header=0, names=['Path', 'Label'], dtype = {'Label':np.int32})"""
"""train1_df['Label'] = train1_df['Label'].astype('float32')
train1_df.dtypes"""
# from torchvision.io import read_image
"""class AlaskaDatasetTrain(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img_path = self.dataframe.iloc[idx, 0]
image = read_image(img_path)
label = self.dataframe.iloc[idx, 1]
return image, label"""
"""prom = np.zeros(shape = (512, 512, 3), dtype="float32")
koef = np.zeros(shape = (512, 512, 3), dtype="float32")"""
"""20it [00:12, 1.58it/s]
torch.Size([672, 3, 512, 512])
tensor(-0.3711) tensor(21.0777)
tensor(-0.2116) tensor(6.0352)
tensor(0.1129) tensor(5.8410)"""
class CustomDatasetTrain(Dataset):
def __init__(self, dataframe1, dataframe2):
self.dataframe1 = dataframe1
self.dataframe2 = dataframe2
self.transform_image_dct = Compose(
[
Normalize(
mean=[-0.3711, -0.2116, 0.1129],
std=[21.0777, 6.0352, 5.8410],
max_pixel_value=1.0,
),
ToTensorV2(),
]
)
self.transform_image = Compose(
[
Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
max_pixel_value=255.0,
),
ToTensorV2(),
]
)
def __len__(self):
return len(self.dataframe1)
def __getitem__(self, idx):
# koef = np.zeros(shape = (512, 512, 3), dtype="float32")
img_path1 = self.dataframe1.iloc[idx, 0]
# jpeg = jio.read(img_path)
image = cv2.imread(img_path1)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
img_path2 = self.dataframe2.iloc[idx, 0]
koef = np.load(img_path2)
# koef[:,:,0] = jpeg.coef_arrays[0] ; koef[:,:,1] = jpeg.coef_arrays[1] ; koef[:,:,2] = jpeg.coef_arrays[2]
# koef = np.array(koef, dtype=np.float32)
koef = self.transform_image_dct(image=koef)["image"]
label = self.dataframe2.iloc[idx, 1]
label = torch.tensor(label, dtype=torch.float32)
image = self.transform_image(image=image)["image"]
# image_dct=torch.concat([image,koef],dim=0)
image = np.array(image)
# koef=np.array(koef)
image_dct = np.concatenate([image, koef], axis=0)
image_dct = torch.tensor(image_dct)
# koef=torch.tensor(koef)
return image_dct, label
ex_data = CustomDatasetTrain(train1_df)
ex_loader = torch.utils.data.DataLoader(
ex_data, batch_size=1, shuffle=True, num_workers=4
)
# example_data = AlaskaDatasetTrain(train_df)
# example_loader = torch.utils.data.DataLoader(example_data, batch_size = 1, shuffle=True)
"""for image, label in example_loader:
images_example = image
label_example = torch.tensor(label, dtype=torch.long)
break
print('Images shape:', images_example.shape)
print('Labels:', label, '\n')"""
num_workers = 2
images = []
for i, (image, label) in tqdm(enumerate(ex_loader)):
images.append(image)
if i == 20:
break
images_res = torch.concat(images, dim=0)
print(images_res.shape)
for i in range(3):
print(images_res[:, i, :, :].mean(), images_res[:, i, :, :].std())
prom1 = np.zeros(shape=(512, 512, 3), dtype="float32")
norm1 = np.zeros(shape=(512, 512, 3), dtype="float32")
class CustomDatasetTest(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
self.transform_image = Compose([ToTensorV2()])
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
koef = np.zeros(shape=(512, 512, 3), dtype="float32")
img_path = self.dataframe.iloc[idx, 0]
jpeg = jio.read(img_path)
# prom=np.load(img_path)
# prom = np.array(prom, dtype=np.float32)
# image=(prom-693)/(693-(-676)) #normalization
# image = cv2.imread(img_path)
# image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
koef[:, :, 0] = jpeg.coef_arrays[0]
koef[:, :, 1] = jpeg.coef_arrays[1]
koef[:, :, 2] = jpeg.coef_arrays[2]
koef = np.array(koef, dtype=np.float32)
# image = (koef-693)/(693-(-676)) #normalization
image = self.transform_image(image=image)["image"]
return image
"""class AlaskaDatasetTest(Dataset):
def __init__(self, dataframe):
self.dataframe = dataframe
def __len__(self):
return len(self.dataframe)
def __getitem__(self, idx):
img_path = self.dataframe.iloc[idx, 0]
image = read_image(img_path)
return image"""
print(
"Train Data Size:",
len(train1_df),
"\n" + "Valid Data Size:",
len(valid1_df),
"\n" + "----------------------",
"\n" + "Total:",
len(train1_df) + len(valid1_df),
)
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(14, 5))
sns.countplot(x=train1_df["Label"], ax=ax1, palette=sns.color_palette("GnBu_d", 10))
sns.countplot(x=valid1_df["Label"], ax=ax2, palette=sns.color_palette("YlOrRd", 10))
ax1.set_title("Train Data", fontsize=16)
ax2.set_title("Valid Data", fontsize=16)
class BaseSwin(nn.Module):
def __init__(self, name="tf_efficientnet_b0_ns", pretrained=True, n_classes=1):
super(BaseSwin, self).__init__()
self.model = timm.create_model(name, pretrained=pretrained, in_chans=6)
self.n_classes = n_classes
self.n_features = self.model.classifier.in_features
self.model.classifier = nn.Identity()
self.fc = nn.Linear(self.n_features, self.n_classes)
def forward(self, x):
x = self.model(x)
x = self.fc(x)
if self.n_classes == 1:
return x.ravel()
else:
return x
"""class EfficientNetwork(nn.Module):
def __init__(self):
super().__init__()
# Define Feature part
self.features = EfficientNet.from_pretrained('efficientnet-b3')
# Define Classification part
self.classification = nn.Linear(1408, 1)
#self.avg_pool=nn.AvgPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=False,
#count_include_pad=False)
def forward(self, image, prints=False):
if prints: print('Input Image shape:', image.shape)
image = self.features.extract_features(image)
print('Features Image shape:', image.shape)
image = F.avg_pool2d(image, image.size()[2:]).reshape(-1, 1408)
if prints: print('Image Reshaped shape:', image.shape)
out = self.classification(image)
if prints: print('Out shape:', out.shape)
return torch.sigmoid(out)"""
# Create an example model (B7)
model_example = BaseSwin()
train1_data = CustomDatasetTrain(train_df, train1_df)
valid1_data = CustomDatasetTrain(valid_df, valid1_df)
train1_data[0]
"""path1="/kaggle/input/jpegdctpng"
def append_path(pre):
return np.vectorize(lambda file: os.path.join(path1, pre, file))
train_filenames = np.array(os.listdir("/kaggle/input/jpegdctpng/cover"))
train_filenames=sorted(train_filenames)
jmi_path_train=append_path('JMI')(train_filenames[:400])
jmi_path_valid=append_path('JMI')(train_filenames[400:450])
jmi_path_test=append_path('JMI')(train_filenames[450:500])
neg_path_train=append_path('cover')(train_filenames[:400])
neg_path_valid=append_path('cover')(train_filenames[400:450])
neg_path_test=append_path('cover')(train_filenames[450:500])
train_paths = np.concatenate([jmi_path_train, neg_path_train])
valid_paths=np.concatenate([jmi_path_valid, neg_path_valid])
test_paths=np.concatenate([jmi_path_test, neg_path_test])
train_labels=np.array([1] * len(jmi_path_train) + [0] * len(neg_path_train))
test_labels=np.array([1] * len(jmi_path_test) + [0] * len(neg_path_test))
valid_labels=np.array([1] * len(jmi_path_valid) + [0] * len(neg_path_valid))"""
"""train_jmi_df=pd.DataFrame({'Id': train_paths, 'Label': train_labels})
valid_jmi_df=pd.DataFrame({'Id': valid_paths, 'Label': valid_labels})
test_jmi_df=pd.DataFrame({'Id': test_paths, 'Label': test_labels})"""
# train_jmi_data = CustomDatasetTrain(train_jmi_df)
# valid_jmi_data = CustomDatasetTrain(valid_jmi_df)
import gc
def train(
model, epochs, batch_size, num_workers, learning_rate, weight_decay, version="vx"
):
# Create file to save logs
f = open(f"logs_{version}.txt", "w+")
# Data Loaders
train_loader = torch.utils.data.DataLoader(
train1_data,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True,
)
valid_loader = torch.utils.data.DataLoader(
valid1_data,
batch_size=batch_size,
num_workers=num_workers,
drop_last=True,
shuffle=True,
)
# Criterion
criterion = torch.nn.BCEWithLogitsLoss()
# Optimizer
optimizer = optim.AdamW(
model.parameters(), lr=learning_rate, weight_decay=weight_decay
)
# train_losses = []
evaluation_losses = []
for epoch in range(epochs):
# Sets the model in training mode
model.train()
train_loss = 0
for images, labels in tqdm(train_loader):
# Need to access the images
images = images.to(device)
labels = labels.to(device)
# Clear gradients
optimizer.zero_grad()
# Make prediction
out = model(images)
# Compute loss and Backpropagate
loss = criterion(out, labels)
loss.backward()
optimizer.step()
# train_loss += loss.item()
# gc.collect()
# Compute average epoch loss
# epoch_loss_train = train_loss / batch_size
# train_losses.append(epoch_loss_train)
# gc.collect()
# ===== Evaluate =====
model.eval()
evaluation_loss = 0
actuals, predictions = [], []
num_correct = 0
num_samples = 0
# To disable gradients
with torch.no_grad():
for images, labels in tqdm(valid_loader):
images = images.to(device)
labels = labels.to(device)
# Prediction
out = model(images)
loss = criterion(out, labels)
# predictions = (out > 0.5).long()
# num_correct += (predictions == labels).sum()
# num_samples += predictions.size(0)
evaluation_loss += loss.item()
actuals.append(labels.cpu())
predictions.append(out.cpu())
# gc.collect()
# Compute epoch loss
epoch_loss_eval = evaluation_loss / batch_size
evaluation_losses.append(epoch_loss_eval)
print(average_precision_score(torch.concat(actuals), torch.concat(predictions)))
# accuracy=float(num_correct)/float(num_samples)*100
with open(f"logs_{version}.txt", "a+") as f:
print(
"Epoch: {}/{} | Train Loss: {:.3f} | Eval Loss: {:.3f}".format(
epoch + 1, epochs, 1, epoch_loss_eval
),
file=f,
)
print(
"Epoch: {}/{} | Train Loss: {:.3f} | Eval Loss: {:.3f}".format(
epoch + 1, epochs, 1, epoch_loss_eval
)
)
version = "v8"
epochs = 5
batch_size = 16
num_workers = 2
learning_rate = 0.0001
weight_decay = 0.00001
eff_net5 = BaseSwin().to(device)
from tqdm import tqdm
train(
model=eff_net5,
epochs=epochs,
batch_size=batch_size,
num_workers=num_workers,
learning_rate=learning_rate,
weight_decay=weight_decay,
)
# test_jmi_data = CustomDatasetTest(test_jmi_df)
test_loader = torch.utils.data.DataLoader(
test_jmi_data, batch_size=batch_size, shuffle=False
)
test_df = pd.read_csv(
"../input/path-for-jdp/test.csv",
header=0,
names=["Path", "Label"],
dtype={"Label": np.int32},
)
test_data = CustomDatasetTest(test_df)
test_loader = torch.utils.data.DataLoader(
test_data, batch_size=batch_size, shuffle=False
)
eff_net5.eval()
out = []
out_res = []
with torch.no_grad():
for images in tqdm(test_loader):
images = images.to(device)
out = eff_net5(images)
out_res.append(out.detach().cpu())
out_res
test_df
pred = torch.concat(out_res)
pred
def sigmoid(x):
return 1 / (1 + torch.exp(-x))
pred_s = []
pred_s = sigmoid(pred)
pred_s
test_df["Pred_value"] = ""
test_df.Pred_value = pred_s
test_df
y_test = test_df.Label.astype(int)
y_pred = (test_df.Pred_value > 0.5).astype(int)
from sklearn.metrics import (
classification_report,
roc_curve,
precision_recall_curve,
roc_auc_score,
accuracy_score,
)
# print("Accuracy: %f" % accuracy_score(y_test, y_pred))
print(accuracy_score(y_test, y_pred))
test_df["Label"].hist(bins=100)
test_df["Pred_value"].hist(bins=100)
| false | 1 | 7,709 | 0 | 7,730 | 7,709 |
||
129444135
|
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.compose import ColumnTransformer
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.model_selection import (
validation_curve,
RepeatedStratifiedKFold,
StratifiedKFold,
cross_val_predict,
)
from sklearn.metrics import auc, f1_score, accuracy_score, roc_auc_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.metrics import mean_squared_error as mse
from tqdm import tqdm_notebook
from sklearn.preprocessing import OneHotEncoder
from catboost import CatBoostRegressor
train = pd.read_csv("/kaggle/input/spring-2023-property-prices/Train.csv")
test = pd.read_csv("/kaggle/input/spring-2023-property-prices/Test.csv")
train["metro_dist"] = train["metro_dist"].fillna(30)
test["metro_dist"] = test["metro_dist"].fillna(30)
train = train.fillna(0)
test = test.fillna(0)
Y = train["price"]
date0 = list(map(lambda x: int(x.split("-")[0]), train["date"]))
date1 = list(map(lambda x: int(x.split("-")[1]), train["date"]))
del train["price"]
del train["date"]
del train["id"]
train["date0"] = date0
train["date1"] = date1
X = train.select_dtypes([np.number])
date0 = list(map(lambda x: int(x.split("-")[0]), test["date"]))
date1 = list(map(lambda x: int(x.split("-")[1]), test["date"]))
del test["date"]
del test["id"]
test["date0"] = date0
test["date1"] = date1
X_test = test
num_feat = ["street_id", "balcon", "rooms", "date0", "date1"]
cat_feat = [
"g_lift",
"n_photos",
"build_tech",
"floor",
"area",
"metro_dist",
"kw1",
"kw2",
"kw3",
"kw4",
"kw5",
"kw6",
"kw7",
"kw8",
"kw9",
"kw10",
"kw11",
"kw12",
"kw13",
]
model = Pipeline(
[
(
"scaler",
ColumnTransformer(
[
("num_feat", "passthrough", num_feat),
("cat_feat", OneHotEncoder(), cat_feat),
]
),
),
("model", GradientBoostingRegressor(n_estimators=1000, learning_rate=0.8)),
]
)
model.fit(X, Y)
mse(Y, model.predict(X))
Y_test_pred = model.predict(X_test)
Y_train_pred = model.predict(X)
file = pd.read_csv("/kaggle/input/spring-2023-property-prices/SampleSubmission.csv")
file.price = Y_test_pred
file.to_csv("Output.csv", index=False)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/444/129444135.ipynb
| null | null |
[{"Id": 129444135, "ScriptId": 38487517, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14626289, "CreationDate": "05/13/2023 21:45:35", "VersionNumber": 2.0, "Title": "notebookaff51c9547", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 79.0, "LinesInsertedFromPrevious": 15.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 64.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import LogisticRegression
from sklearn.compose import ColumnTransformer
from sklearn.naive_bayes import GaussianNB
from sklearn.ensemble import (
RandomForestClassifier,
GradientBoostingClassifier,
RandomForestRegressor,
GradientBoostingRegressor,
)
from sklearn.model_selection import (
validation_curve,
RepeatedStratifiedKFold,
StratifiedKFold,
cross_val_predict,
)
from sklearn.metrics import auc, f1_score, accuracy_score, roc_auc_score
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.model_selection import StratifiedKFold, GridSearchCV
from sklearn.metrics import mean_squared_error as mse
from tqdm import tqdm_notebook
from sklearn.preprocessing import OneHotEncoder
from catboost import CatBoostRegressor
train = pd.read_csv("/kaggle/input/spring-2023-property-prices/Train.csv")
test = pd.read_csv("/kaggle/input/spring-2023-property-prices/Test.csv")
train["metro_dist"] = train["metro_dist"].fillna(30)
test["metro_dist"] = test["metro_dist"].fillna(30)
train = train.fillna(0)
test = test.fillna(0)
Y = train["price"]
date0 = list(map(lambda x: int(x.split("-")[0]), train["date"]))
date1 = list(map(lambda x: int(x.split("-")[1]), train["date"]))
del train["price"]
del train["date"]
del train["id"]
train["date0"] = date0
train["date1"] = date1
X = train.select_dtypes([np.number])
date0 = list(map(lambda x: int(x.split("-")[0]), test["date"]))
date1 = list(map(lambda x: int(x.split("-")[1]), test["date"]))
del test["date"]
del test["id"]
test["date0"] = date0
test["date1"] = date1
X_test = test
num_feat = ["street_id", "balcon", "rooms", "date0", "date1"]
cat_feat = [
"g_lift",
"n_photos",
"build_tech",
"floor",
"area",
"metro_dist",
"kw1",
"kw2",
"kw3",
"kw4",
"kw5",
"kw6",
"kw7",
"kw8",
"kw9",
"kw10",
"kw11",
"kw12",
"kw13",
]
model = Pipeline(
[
(
"scaler",
ColumnTransformer(
[
("num_feat", "passthrough", num_feat),
("cat_feat", OneHotEncoder(), cat_feat),
]
),
),
("model", GradientBoostingRegressor(n_estimators=1000, learning_rate=0.8)),
]
)
model.fit(X, Y)
mse(Y, model.predict(X))
Y_test_pred = model.predict(X_test)
Y_train_pred = model.predict(X)
file = pd.read_csv("/kaggle/input/spring-2023-property-prices/SampleSubmission.csv")
file.price = Y_test_pred
file.to_csv("Output.csv", index=False)
| false | 0 | 875 | 0 | 875 | 875 |
||
129444918
|
<jupyter_start><jupyter_text>CIFAR-10 PNGs in folders
### Context
This dataset is only here for convenience. The original dataset in binary form can be found at https://www.cs.toronto.edu/~kriz/cifar.html
And the dataset in ImageNet format (each class is a subfolder) can be found at
https://course.fast.ai/datasets
### Content
From the description on the dataset's home page,
"The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. Here are the classes in the dataset: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck "
Cover photo by Ethan McArthur on Unsplash
Kaggle dataset identifier: cifar10-pngs-in-folders
<jupyter_script>f = open("../input/poetry/Kanye_West.txt")
help(open)
# **Reading the file**
# f.read()
# **print function**
"""
print("My name is Rakshit", end = '\n')
print("We are learning Python")
"""
# help(print)
# f.close()
# **read file using print function**
# print(f.read())
# **read file using loop**
# for i in f:
# print(i, end='\n')
# **Read line by line**
# f.readline()
# **Other Important methods**
# f = open("../input/poetry/Kanye_West.txt",encoding='utf-8-sig')
# f.close()
# help(open)
# f.read(8)
# f.read(4)
# f.seek(0)
# f.close()
# **Default Argumets in open()**
help(open)
# **'r' - for reading the file (default)**
# **'w' - for writing to the file. Creates a new file if does not exist - overrides the existing content**
# **'a' - append content to the end of the file**
# **'t' - opens in text mode(default)**
# **'b' - opens in binary mode**
# f.close()
# **Write to the file**
# f = open("test.txt",mode='r')
# f.write("This is my second line")
# print(f.read())
# f.close()
# **Append to the file**
# f = open("test.txt",mode='r')
# f.write("Going good")
# print(f.read())
# f.close()
# **Binary Files** - Non-text file. Images are stored as binary files.
# The mode in the open function should be 'rb' (read binary)
# img = open('../input/cifar10-pngs-in-folders/cifar10/test/airplane/0001.png','rb')
# img.readline()
# store = img.read() #store the content of img to variable store
# a = open('airplane.png','wb')
# a.write(store)
# a.close()
# img.close()
# **Exception handling** - It is important to close our file to make sure that the resources get free up. Also, there might be data loss in case we don't close the file
"""
try:
f = open('../input/poetry/Kanye_West.txt')
1/0
f.close() # this will not get executed
except:
print("There is an issue in the try block")
finally:
f.close()
"""
# f.readline()
"""
with open('python.txt','w') as f: #f is the file handler/variabkle
f.write("Hi i am learning python") #once we get out of this block, the file is automatically close
1+2
"""
"""
x = open('./python.txt','w')
x.write("Hi i am learning pythin")
x.close()
"""
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/444/129444918.ipynb
|
cifar10-pngs-in-folders
|
swaroopkml
|
[{"Id": 129444918, "ScriptId": 26584504, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8669781, "CreationDate": "05/13/2023 22:00:59", "VersionNumber": 5.0, "Title": "26. File Operation", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 130.0, "LinesInsertedFromPrevious": 34.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 96.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 185500832, "KernelVersionId": 129444918, "SourceDatasetVersionId": 283795}, {"Id": 185500831, "KernelVersionId": 129444918, "SourceDatasetVersionId": 81739}]
|
[{"Id": 283795, "DatasetId": 118250, "DatasourceVersionId": 296256, "CreatorUserId": 1930552, "LicenseName": "Unknown", "CreationDate": "02/10/2019 11:16:19", "VersionNumber": 1.0, "Title": "CIFAR-10 PNGs in folders", "Slug": "cifar10-pngs-in-folders", "Subtitle": "The CIFAR 10 dataset as a bunch of PNGs", "Description": "### Context\nThis dataset is only here for convenience. The original dataset in binary form can be found at https://www.cs.toronto.edu/~kriz/cifar.html \nAnd the dataset in ImageNet format (each class is a subfolder) can be found at \nhttps://course.fast.ai/datasets\n\n### Content\n\nFrom the description on the dataset's home page,\n\"The CIFAR-10 dataset consists of 60000 32x32 colour images in 10 classes, with 6000 images per class. There are 50000 training images and 10000 test images. Here are the classes in the dataset: airplane, automobile, bird, cat, deer, dog, frog, horse, ship, truck \"\n\nCover photo by Ethan McArthur on Unsplash", "VersionNotes": "Initial release", "TotalCompressedBytes": 146683706.0, "TotalUncompressedBytes": 146683706.0}]
|
[{"Id": 118250, "CreatorUserId": 1930552, "OwnerUserId": 1930552.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 283795.0, "CurrentDatasourceVersionId": 296256.0, "ForumId": 128152, "Type": 2, "CreationDate": "02/10/2019 11:16:19", "LastActivityDate": "02/10/2019", "TotalViews": 31421, "TotalDownloads": 7774, "TotalVotes": 81, "TotalKernels": 107}]
|
[{"Id": 1930552, "UserName": "swaroopkml", "DisplayName": "Swaroop Kumar", "RegisterDate": "05/21/2018", "PerformanceTier": 0}]
|
f = open("../input/poetry/Kanye_West.txt")
help(open)
# **Reading the file**
# f.read()
# **print function**
"""
print("My name is Rakshit", end = '\n')
print("We are learning Python")
"""
# help(print)
# f.close()
# **read file using print function**
# print(f.read())
# **read file using loop**
# for i in f:
# print(i, end='\n')
# **Read line by line**
# f.readline()
# **Other Important methods**
# f = open("../input/poetry/Kanye_West.txt",encoding='utf-8-sig')
# f.close()
# help(open)
# f.read(8)
# f.read(4)
# f.seek(0)
# f.close()
# **Default Argumets in open()**
help(open)
# **'r' - for reading the file (default)**
# **'w' - for writing to the file. Creates a new file if does not exist - overrides the existing content**
# **'a' - append content to the end of the file**
# **'t' - opens in text mode(default)**
# **'b' - opens in binary mode**
# f.close()
# **Write to the file**
# f = open("test.txt",mode='r')
# f.write("This is my second line")
# print(f.read())
# f.close()
# **Append to the file**
# f = open("test.txt",mode='r')
# f.write("Going good")
# print(f.read())
# f.close()
# **Binary Files** - Non-text file. Images are stored as binary files.
# The mode in the open function should be 'rb' (read binary)
# img = open('../input/cifar10-pngs-in-folders/cifar10/test/airplane/0001.png','rb')
# img.readline()
# store = img.read() #store the content of img to variable store
# a = open('airplane.png','wb')
# a.write(store)
# a.close()
# img.close()
# **Exception handling** - It is important to close our file to make sure that the resources get free up. Also, there might be data loss in case we don't close the file
"""
try:
f = open('../input/poetry/Kanye_West.txt')
1/0
f.close() # this will not get executed
except:
print("There is an issue in the try block")
finally:
f.close()
"""
# f.readline()
"""
with open('python.txt','w') as f: #f is the file handler/variabkle
f.write("Hi i am learning python") #once we get out of this block, the file is automatically close
1+2
"""
"""
x = open('./python.txt','w')
x.write("Hi i am learning pythin")
x.close()
"""
| false | 0 | 760 | 1 | 994 | 760 |
||
129444526
|
<jupyter_start><jupyter_text>Diabetes Dataset
### Context
This dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes.
### Content
Several constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.
- Pregnancies: Number of times pregnant
- Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
- BloodPressure: Diastolic blood pressure (mm Hg)
- SkinThickness: Triceps skin fold thickness (mm)
- Insulin: 2-Hour serum insulin (mu U/ml)
- BMI: Body mass index (weight in kg/(height in m)^2)
- DiabetesPedigreeFunction: Diabetes pedigree function
- Age: Age (years)
- Outcome: Class variable (0 or 1)
#### Sources:
(a) Original owners: National Institute of Diabetes and Digestive and
Kidney Diseases
(b) Donor of database: Vincent Sigillito ([email protected])
Research Center, RMI Group Leader
Applied Physics Laboratory
The Johns Hopkins University
Johns Hopkins Road
Laurel, MD 20707
(301) 953-6231
(c) Date received: 9 May 1990
#### Past Usage:
1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \&
Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast
the onset of diabetes mellitus. In {\it Proceedings of the Symposium
on Computer Applications and Medical Care} (pp. 261--265). IEEE
Computer Society Press.
The diagnostic, binary-valued variable investigated is whether the
patient shows signs of diabetes according to World Health Organization
criteria (i.e., if the 2 hour post-load plasma glucose was at least
200 mg/dl at any survey examination or if found during routine medical
care). The population lives near Phoenix, Arizona, USA.
Results: Their ADAP algorithm makes a real-valued prediction between
0 and 1. This was transformed into a binary decision using a cutoff of
0.448. Using 576 training instances, the sensitivity and specificity
of their algorithm was 76% on the remaining 192 instances.
#### Relevant Information:
Several constraints were placed on the selection of these instances from
a larger database. In particular, all patients here are females at
least 21 years old of Pima Indian heritage. ADAP is an adaptive learning
routine that generates and executes digital analogs of perceptron-like
devices. It is a unique algorithm; see the paper for details.
#### Number of Instances: 768
#### Number of Attributes: 8 plus class
#### For Each Attribute: (all numeric-valued)
1. Number of times pregnant
2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test
3. Diastolic blood pressure (mm Hg)
4. Triceps skin fold thickness (mm)
5. 2-Hour serum insulin (mu U/ml)
6. Body mass index (weight in kg/(height in m)^2)
7. Diabetes pedigree function
8. Age (years)
9. Class variable (0 or 1)
#### Missing Attribute Values: Yes
#### Class Distribution: (class value 1 is interpreted as "tested positive for
diabetes")
Kaggle dataset identifier: diabetes-data-set
<jupyter_code>import pandas as pd
df = pd.read_csv('diabetes-data-set/diabetes.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 768 entries, 0 to 767
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Pregnancies 768 non-null int64
1 Glucose 768 non-null int64
2 BloodPressure 768 non-null int64
3 SkinThickness 768 non-null int64
4 Insulin 768 non-null int64
5 BMI 768 non-null float64
6 DiabetesPedigreeFunction 768 non-null float64
7 Age 768 non-null int64
8 Outcome 768 non-null int64
dtypes: float64(2), int64(7)
memory usage: 54.1 KB
<jupyter_text>Examples:
{
"Pregnancies": 6.0,
"Glucose": 148.0,
"BloodPressure": 72.0,
"SkinThickness": 35.0,
"Insulin": 0.0,
"BMI": 33.6,
"DiabetesPedigreeFunction": 0.627,
"Age": 50.0,
"Outcome": 1.0
}
{
"Pregnancies": 1.0,
"Glucose": 85.0,
"BloodPressure": 66.0,
"SkinThickness": 29.0,
"Insulin": 0.0,
"BMI": 26.6,
"DiabetesPedigreeFunction": 0.35100000000000003,
"Age": 31.0,
"Outcome": 0.0
}
{
"Pregnancies": 8.0,
"Glucose": 183.0,
"BloodPressure": 64.0,
"SkinThickness": 0.0,
"Insulin": 0.0,
"BMI": 23.3,
"DiabetesPedigreeFunction": 0.672,
"Age": 32.0,
"Outcome": 1.0
}
{
"Pregnancies": 1.0,
"Glucose": 89.0,
"BloodPressure": 66.0,
"SkinThickness": 23.0,
"Insulin": 94.0,
"BMI": 28.1,
"DiabetesPedigreeFunction": 0.167,
"Age": 21.0,
"Outcome": 0.0
}
<jupyter_script># # Predict Diabetes using with Machine Learnin
# Import Packages
import pandas as pd # Used to work with datasets
import numpy as np # Used to work with arrays
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import (
KNeighborsClassifier,
) # Classifier implementing the k-nearest neighbors vote
from sklearn.tree import (
DecisionTreeClassifier,
) ## is a class capable of performing multiclass classification on a dataset.
from sklearn.neural_network import (
MLPClassifier,
) # Iteratively trains because at each time step the partial derivatives of the loss function with respect to the model parameters are computed.
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, KFold
from sklearn.preprocessing import (
StandardScaler,
) ## Removes the average and scales each feature/variable for unit variance. This process is carried out in an independent manner
from sklearn.model_selection import (
train_test_split,
) # divide the data into training data and test data
from sklearn.metrics import (
r2_score,
confusion_matrix,
mean_squared_error,
classification_report,
)
import warnings
warnings.filterwarnings("ignore")
# Data
# Pregnancies: Number of times pregnant
# Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# BloodPressure: Diastolic blood pressure (mm Hg)
# SkinThickness: Triceps skin fold thickness (mm)
# Insulin: 2-Hour serum insulin (mu U/ml)
# BMI: Body mass index (weight in kg/(height in m)^2)
# DiabetesPedigreeFunction: Diabetes pedigree function
# Age: Age (years)
# Outcome: Class variable (0 or 1)
# read data
diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv")
# name columns
print(diabetes.columns)
diabetes.head() # Show part of the data
# shape data
print("dimension of data: {}".format(diabetes.shape))
# The diabetes dataset consists of 768 data points, with 9 features each:
## print about information
diabetes.info()
# check is null data
diabetes.isnull().sum()
## print describtion
diabetes.describe()
# "outcome" Is the feature that I will expect, 0 means no diabetes, 1 means presence
print(diabetes.groupby("Outcome").size())
# 500 is rated as 0 and 268 as 1:
## The number of views in each categorical basket using bars.
sns.countplot(data=diabetes, x="Outcome", label="Count")
sns.countplot(data=diabetes, x="Pregnancies", hue="Outcome")
# create datarame in Outcome =0 and Outcome=1
diabetes_0 = diabetes[diabetes["Outcome"] == 0]
diabetes_1 = diabetes[diabetes["Outcome"] == 1]
# histogram of the "Age" variable in the "Outcome=0" dataset
plt.hist(diabetes_0["Age"])
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
# histogram of the "Age" variable in the "Outcome=1" dataset
plt.hist(diabetes_1["Age"])
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
# histogram of the "Age"
sns.histplot(data=diabetes, x="Age", hue="Outcome")
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
diabetes_0["Age"].mean()
diabetes_1["Age"].mean()
# ###### The incidence of diabetes increases from the age of 38
# histogram of the "SkinThickness"
sns.histplot(data=diabetes, x="SkinThickness", hue="Outcome")
plt.xlabel("SkinThickness")
plt.ylabel("Count")
plt.show()
# average healthy people SkinThickness
diabetes_0["SkinThickness"].mean()
# max healthy people SkinThickness
diabetes_0["SkinThickness"].max()
# average diabetics SkinThickness
diabetes_1["SkinThickness"].mean()
# max diabetics SkinThickness
diabetes_1["SkinThickness"].max()
# ###### The thickness of the skin of diabetics is higher than that of healthy people
## histogram of the "BMi"
sns.histplot(data=diabetes, x="BMI", hue="Outcome")
plt.xlabel("BMI")
plt.ylabel("Count")
plt.show()
# average healthy people BMI
diabetes_0["BMI"].mean()
# max healthy people BMI
diabetes_0["BMI"].max()
# average healthy people BMI
diabetes_1["BMI"].mean()
# max healthy people BMI
diabetes_1["BMI"].max()
# ###### BMI is more common in affected patients than in healthy people.
## histogram of the "Pregnancies"
sns.histplot(data=diabetes, x="Pregnancies", hue="Outcome")
plt.xlabel("Pregnancies")
plt.ylabel("Count")
plt.xticks([1, 3, 5, 7, 9])
plt.show()
# average healthy people Pregnancies
diabetes_0["Pregnancies"].mean()
# max healthy people Pregnancies
diabetes_0["Pregnancies"].max()
# average healthy people Pregnancies
diabetes_1["Pregnancies"].mean()
# max healthy people Pregnancies
diabetes_1["Pregnancies"].max()
# ###### The Number of times pregnant of diabetics is higher than that of healthy people
# scateer plot relationship between Age with BMI
plt.scatter(diabetes["BMI"], diabetes["Age"])
plt.title("The relationship between Age with BMI ")
plt.xlabel("BMI")
plt.ylabel("Age")
plt.show()
# to compare correlation between a target and other features in absolute
correlations = diabetes.corr()["Outcome"].drop("Outcome")
sorted_correlations = correlations.abs().sort_values(ascending=False)
sorted_correlations
# show bar to compare correlation between a target and other features in absolute
# to be organized and easy to compare
sns.barplot(x=sorted_correlations.index, y=sorted_correlations)
plt.xticks(rotation=90)
plt.xlabel("Features")
plt.ylabel("Absolute Correlation")
plt.show()
# visualizing the correlation between the variables in the diabetes
plt.figure(figsize=(15, 15))
sns.heatmap(np.abs(diabetes.corr()), annot=True)
plt.title("Correlation data ", fontsize=12)
# split data
X = diabetes.drop(columns=["Outcome"]) # data
y = diabetes["Outcome"] # target
# StandardScaler in dataframe mean=0 , Std=1
Stand = StandardScaler()
X = pd.DataFrame(Stand.fit_transform(X), columns=X.columns)
def evaluate(model, X, target, n_folds=5):
"""
Evaluate the performance of the model
Inputs:
Model ,
Data ,
Target
Number folds optional
Outputs:
Mean cross score,
Mean Squared Error
Root Mean Squared Error
classification report
"""
# split the data into training and testing
X_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.25)
model.fit(X_train, y_train) # fit model
pred_y = model.predict(X_test)
kfold = KFold(n_splits=n_folds, shuffle=True, random_state=42)
scores = cross_val_score(model, X, target, cv=kfold)
print("model :", model)
print("Mean cross score:", scores.mean())
# print(" Score" ,model.score(X_test,y_test))
print("MSE: ", mean_squared_error(y_test, pred_y))
print("RMSE:", np.sqrt(mean_squared_error(y_test, pred_y)))
print("Classification Report")
print(classification_report(y_test, pred_y))
# ## K Nearest Neighbour predicted
# It can be said that the Neighbors Nearest-k ,It is the simplest machine learning algorithm composed Build the model only from storing the training data set. To make a forecast for a new point in a group data, the algorithm finds the closest data points in the training data set
# First, let's see if we can confirm the relationship between model complexity and accuracy:
# split data into train ,split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=66)
training_accuracy = []
test_accuracy = []
# try n_neighbors from 1 to 10
neighbors_settings = range(1, 11)
for n_neighbors in neighbors_settings:
# bulding nodel
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(knn.score(X_train, y_train))
# record test set accuracy
test_accuracy.append(knn.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
plt.plot(neighbors_settings, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
## We check accuracy of the k-nearest neighbors algorithm for predicting diabetes
evaluate(knn, X, y)
#
# ## Decision tree classifier
tree = DecisionTreeClassifier(random_state=0)
tree.fit(X_train, y_train)
print(
"Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train))
) # To calculate the accuracy of the training data
print(
"Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test))
) # To calculate the accuracy of the test data
# The accuracy on the training set using the decision tree classifier is 100
# While the accuracy of the test set is much worse. This is an indication that the tree is suffering from over-adjustment
# overfitting , It does not generalize well to new data. Therefore, we need to apply pre-pruning
# on the tree
# Now I will do it again by setting
# 3 = depth_m
# Which reduces the depth of the tree.
# This leads to a lower accuracy in the training set, but improves the test set.
## We check accuracy of the Decision tree classifier algorithm for predicting diabetes
tree = DecisionTreeClassifier(max_depth=3)
evaluate(tree, X, y)
# ## LogisticRegression for predicting diabetes
logistic = LogisticRegression()
logistic.fit(X_train, y_train)
print(
"Accuracy on training set: {:.2f}".format(logistic.score(X_train, y_train))
) # To calculate the accuracy of the training data
print(
"Accuracy on test set: {:.2f}".format(logistic.score(X_test, y_test))
) # To calculate the accuracy of the testing data
## We check accuracy of the Logistic Regression algorithm for predicting diabetes
evaluate(LogisticRegression(), X, y)
# ## Neural networks for predicting diabetes
mlp = MLPClassifier(max_iter=1000, alpha=1, random_state=0)
mlp.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(mlp.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(mlp.score(X_test, y_test)))
## We check accuracy of the MLP Classi fier algorithm for predicting diabetes
evaluate(MLPClassifier(max_iter=100, alpha=1), X, y)
# Our model so far is the default neural network model after measurement. Now I will draw a heat map of the weights of the first layer of the learned neural network in order to predict diabetes using the dataset.
plt.figure(figsize=(20, 5)) #
plt.imshow(mlp.coefs_[0])
plt.yticks(range(8))
plt.xlabel("Columns in weight matrix")
plt.ylabel("Input feature")
plt.colorbar()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/444/129444526.ipynb
|
diabetes-data-set
|
mathchi
|
[{"Id": 129444526, "ScriptId": 38488486, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7577589, "CreationDate": "05/13/2023 21:53:25", "VersionNumber": 1.0, "Title": "prediction Diabetes classification 99%", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 425.0, "LinesInsertedFromPrevious": 425.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
|
[{"Id": 185499797, "KernelVersionId": 129444526, "SourceDatasetVersionId": 1400440}]
|
[{"Id": 1400440, "DatasetId": 818300, "DatasourceVersionId": 1433199, "CreatorUserId": 3650837, "LicenseName": "CC0: Public Domain", "CreationDate": "08/05/2020 21:27:01", "VersionNumber": 1.0, "Title": "Diabetes Dataset", "Slug": "diabetes-data-set", "Subtitle": "This dataset is originally from the N. Inst. of Diabetes & Diges. & Kidney Dis.", "Description": "### Context\n\nThis dataset is originally from the National Institute of Diabetes and Digestive and Kidney Diseases. The objective is to predict based on diagnostic measurements whether a patient has diabetes.\n\n\n### Content\n\nSeveral constraints were placed on the selection of these instances from a larger database. In particular, all patients here are females at least 21 years old of Pima Indian heritage.\n\n- Pregnancies: Number of times pregnant \n- Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test \n- BloodPressure: Diastolic blood pressure (mm Hg) \n- SkinThickness: Triceps skin fold thickness (mm) \n- Insulin: 2-Hour serum insulin (mu U/ml) \n- BMI: Body mass index (weight in kg/(height in m)^2) \n- DiabetesPedigreeFunction: Diabetes pedigree function \n- Age: Age (years) \n- Outcome: Class variable (0 or 1)\n\n#### Sources:\n (a) Original owners: National Institute of Diabetes and Digestive and\n Kidney Diseases\n (b) Donor of database: Vincent Sigillito ([email protected])\n Research Center, RMI Group Leader\n Applied Physics Laboratory\n The Johns Hopkins University\n Johns Hopkins Road\n Laurel, MD 20707\n (301) 953-6231\n (c) Date received: 9 May 1990\n\n#### Past Usage:\n 1. Smith,~J.~W., Everhart,~J.~E., Dickson,~W.~C., Knowler,~W.~C., \\&\n Johannes,~R.~S. (1988). Using the ADAP learning algorithm to forecast\n the onset of diabetes mellitus. In {\\it Proceedings of the Symposium\n on Computer Applications and Medical Care} (pp. 261--265). IEEE\n Computer Society Press.\n\n The diagnostic, binary-valued variable investigated is whether the\n patient shows signs of diabetes according to World Health Organization\n criteria (i.e., if the 2 hour post-load plasma glucose was at least \n 200 mg/dl at any survey examination or if found during routine medical\n care). The population lives near Phoenix, Arizona, USA.\n\n Results: Their ADAP algorithm makes a real-valued prediction between\n 0 and 1. This was transformed into a binary decision using a cutoff of \n 0.448. Using 576 training instances, the sensitivity and specificity\n of their algorithm was 76% on the remaining 192 instances.\n\n#### Relevant Information:\n Several constraints were placed on the selection of these instances from\n a larger database. In particular, all patients here are females at\n least 21 years old of Pima Indian heritage. ADAP is an adaptive learning\n routine that generates and executes digital analogs of perceptron-like\n devices. It is a unique algorithm; see the paper for details.\n\n#### Number of Instances: 768\n\n#### Number of Attributes: 8 plus class \n\n#### For Each Attribute: (all numeric-valued)\n 1. Number of times pregnant\n 2. Plasma glucose concentration a 2 hours in an oral glucose tolerance test\n 3. Diastolic blood pressure (mm Hg)\n 4. Triceps skin fold thickness (mm)\n 5. 2-Hour serum insulin (mu U/ml)\n 6. Body mass index (weight in kg/(height in m)^2)\n 7. Diabetes pedigree function\n 8. Age (years)\n 9. Class variable (0 or 1)\n\n#### Missing Attribute Values: Yes\n\n#### Class Distribution: (class value 1 is interpreted as \"tested positive for\n diabetes\")", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 818300, "CreatorUserId": 3650837, "OwnerUserId": 3650837.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 1400440.0, "CurrentDatasourceVersionId": 1433199.0, "ForumId": 833406, "Type": 2, "CreationDate": "08/05/2020 21:27:01", "LastActivityDate": "08/05/2020", "TotalViews": 440450, "TotalDownloads": 65613, "TotalVotes": 496, "TotalKernels": 245}]
|
[{"Id": 3650837, "UserName": "mathchi", "DisplayName": "Mehmet Akturk", "RegisterDate": "09/01/2019", "PerformanceTier": 3}]
|
# # Predict Diabetes using with Machine Learnin
# Import Packages
import pandas as pd # Used to work with datasets
import numpy as np # Used to work with arrays
# Visualization
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.neighbors import (
KNeighborsClassifier,
) # Classifier implementing the k-nearest neighbors vote
from sklearn.tree import (
DecisionTreeClassifier,
) ## is a class capable of performing multiclass classification on a dataset.
from sklearn.neural_network import (
MLPClassifier,
) # Iteratively trains because at each time step the partial derivatives of the loss function with respect to the model parameters are computed.
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import cross_val_score, KFold
from sklearn.preprocessing import (
StandardScaler,
) ## Removes the average and scales each feature/variable for unit variance. This process is carried out in an independent manner
from sklearn.model_selection import (
train_test_split,
) # divide the data into training data and test data
from sklearn.metrics import (
r2_score,
confusion_matrix,
mean_squared_error,
classification_report,
)
import warnings
warnings.filterwarnings("ignore")
# Data
# Pregnancies: Number of times pregnant
# Glucose: Plasma glucose concentration a 2 hours in an oral glucose tolerance test
# BloodPressure: Diastolic blood pressure (mm Hg)
# SkinThickness: Triceps skin fold thickness (mm)
# Insulin: 2-Hour serum insulin (mu U/ml)
# BMI: Body mass index (weight in kg/(height in m)^2)
# DiabetesPedigreeFunction: Diabetes pedigree function
# Age: Age (years)
# Outcome: Class variable (0 or 1)
# read data
diabetes = pd.read_csv("/kaggle/input/diabetes-data-set/diabetes.csv")
# name columns
print(diabetes.columns)
diabetes.head() # Show part of the data
# shape data
print("dimension of data: {}".format(diabetes.shape))
# The diabetes dataset consists of 768 data points, with 9 features each:
## print about information
diabetes.info()
# check is null data
diabetes.isnull().sum()
## print describtion
diabetes.describe()
# "outcome" Is the feature that I will expect, 0 means no diabetes, 1 means presence
print(diabetes.groupby("Outcome").size())
# 500 is rated as 0 and 268 as 1:
## The number of views in each categorical basket using bars.
sns.countplot(data=diabetes, x="Outcome", label="Count")
sns.countplot(data=diabetes, x="Pregnancies", hue="Outcome")
# create datarame in Outcome =0 and Outcome=1
diabetes_0 = diabetes[diabetes["Outcome"] == 0]
diabetes_1 = diabetes[diabetes["Outcome"] == 1]
# histogram of the "Age" variable in the "Outcome=0" dataset
plt.hist(diabetes_0["Age"])
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
# histogram of the "Age" variable in the "Outcome=1" dataset
plt.hist(diabetes_1["Age"])
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
# histogram of the "Age"
sns.histplot(data=diabetes, x="Age", hue="Outcome")
plt.xlabel("Age")
plt.ylabel("Count")
plt.show()
diabetes_0["Age"].mean()
diabetes_1["Age"].mean()
# ###### The incidence of diabetes increases from the age of 38
# histogram of the "SkinThickness"
sns.histplot(data=diabetes, x="SkinThickness", hue="Outcome")
plt.xlabel("SkinThickness")
plt.ylabel("Count")
plt.show()
# average healthy people SkinThickness
diabetes_0["SkinThickness"].mean()
# max healthy people SkinThickness
diabetes_0["SkinThickness"].max()
# average diabetics SkinThickness
diabetes_1["SkinThickness"].mean()
# max diabetics SkinThickness
diabetes_1["SkinThickness"].max()
# ###### The thickness of the skin of diabetics is higher than that of healthy people
## histogram of the "BMi"
sns.histplot(data=diabetes, x="BMI", hue="Outcome")
plt.xlabel("BMI")
plt.ylabel("Count")
plt.show()
# average healthy people BMI
diabetes_0["BMI"].mean()
# max healthy people BMI
diabetes_0["BMI"].max()
# average healthy people BMI
diabetes_1["BMI"].mean()
# max healthy people BMI
diabetes_1["BMI"].max()
# ###### BMI is more common in affected patients than in healthy people.
## histogram of the "Pregnancies"
sns.histplot(data=diabetes, x="Pregnancies", hue="Outcome")
plt.xlabel("Pregnancies")
plt.ylabel("Count")
plt.xticks([1, 3, 5, 7, 9])
plt.show()
# average healthy people Pregnancies
diabetes_0["Pregnancies"].mean()
# max healthy people Pregnancies
diabetes_0["Pregnancies"].max()
# average healthy people Pregnancies
diabetes_1["Pregnancies"].mean()
# max healthy people Pregnancies
diabetes_1["Pregnancies"].max()
# ###### The Number of times pregnant of diabetics is higher than that of healthy people
# scateer plot relationship between Age with BMI
plt.scatter(diabetes["BMI"], diabetes["Age"])
plt.title("The relationship between Age with BMI ")
plt.xlabel("BMI")
plt.ylabel("Age")
plt.show()
# to compare correlation between a target and other features in absolute
correlations = diabetes.corr()["Outcome"].drop("Outcome")
sorted_correlations = correlations.abs().sort_values(ascending=False)
sorted_correlations
# show bar to compare correlation between a target and other features in absolute
# to be organized and easy to compare
sns.barplot(x=sorted_correlations.index, y=sorted_correlations)
plt.xticks(rotation=90)
plt.xlabel("Features")
plt.ylabel("Absolute Correlation")
plt.show()
# visualizing the correlation between the variables in the diabetes
plt.figure(figsize=(15, 15))
sns.heatmap(np.abs(diabetes.corr()), annot=True)
plt.title("Correlation data ", fontsize=12)
# split data
X = diabetes.drop(columns=["Outcome"]) # data
y = diabetes["Outcome"] # target
# StandardScaler in dataframe mean=0 , Std=1
Stand = StandardScaler()
X = pd.DataFrame(Stand.fit_transform(X), columns=X.columns)
def evaluate(model, X, target, n_folds=5):
"""
Evaluate the performance of the model
Inputs:
Model ,
Data ,
Target
Number folds optional
Outputs:
Mean cross score,
Mean Squared Error
Root Mean Squared Error
classification report
"""
# split the data into training and testing
X_train, X_test, y_train, y_test = train_test_split(X, target, test_size=0.25)
model.fit(X_train, y_train) # fit model
pred_y = model.predict(X_test)
kfold = KFold(n_splits=n_folds, shuffle=True, random_state=42)
scores = cross_val_score(model, X, target, cv=kfold)
print("model :", model)
print("Mean cross score:", scores.mean())
# print(" Score" ,model.score(X_test,y_test))
print("MSE: ", mean_squared_error(y_test, pred_y))
print("RMSE:", np.sqrt(mean_squared_error(y_test, pred_y)))
print("Classification Report")
print(classification_report(y_test, pred_y))
# ## K Nearest Neighbour predicted
# It can be said that the Neighbors Nearest-k ,It is the simplest machine learning algorithm composed Build the model only from storing the training data set. To make a forecast for a new point in a group data, the algorithm finds the closest data points in the training data set
# First, let's see if we can confirm the relationship between model complexity and accuracy:
# split data into train ,split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=66)
training_accuracy = []
test_accuracy = []
# try n_neighbors from 1 to 10
neighbors_settings = range(1, 11)
for n_neighbors in neighbors_settings:
# bulding nodel
knn = KNeighborsClassifier(n_neighbors=n_neighbors)
knn.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(knn.score(X_train, y_train))
# record test set accuracy
test_accuracy.append(knn.score(X_test, y_test))
plt.plot(neighbors_settings, training_accuracy, label="training accuracy")
plt.plot(neighbors_settings, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
## We check accuracy of the k-nearest neighbors algorithm for predicting diabetes
evaluate(knn, X, y)
#
# ## Decision tree classifier
tree = DecisionTreeClassifier(random_state=0)
tree.fit(X_train, y_train)
print(
"Accuracy on training set: {:.3f}".format(tree.score(X_train, y_train))
) # To calculate the accuracy of the training data
print(
"Accuracy on test set: {:.3f}".format(tree.score(X_test, y_test))
) # To calculate the accuracy of the test data
# The accuracy on the training set using the decision tree classifier is 100
# While the accuracy of the test set is much worse. This is an indication that the tree is suffering from over-adjustment
# overfitting , It does not generalize well to new data. Therefore, we need to apply pre-pruning
# on the tree
# Now I will do it again by setting
# 3 = depth_m
# Which reduces the depth of the tree.
# This leads to a lower accuracy in the training set, but improves the test set.
## We check accuracy of the Decision tree classifier algorithm for predicting diabetes
tree = DecisionTreeClassifier(max_depth=3)
evaluate(tree, X, y)
# ## LogisticRegression for predicting diabetes
logistic = LogisticRegression()
logistic.fit(X_train, y_train)
print(
"Accuracy on training set: {:.2f}".format(logistic.score(X_train, y_train))
) # To calculate the accuracy of the training data
print(
"Accuracy on test set: {:.2f}".format(logistic.score(X_test, y_test))
) # To calculate the accuracy of the testing data
## We check accuracy of the Logistic Regression algorithm for predicting diabetes
evaluate(LogisticRegression(), X, y)
# ## Neural networks for predicting diabetes
mlp = MLPClassifier(max_iter=1000, alpha=1, random_state=0)
mlp.fit(X_train, y_train)
print("Accuracy on training set: {:.3f}".format(mlp.score(X_train, y_train)))
print("Accuracy on test set: {:.3f}".format(mlp.score(X_test, y_test)))
## We check accuracy of the MLP Classi fier algorithm for predicting diabetes
evaluate(MLPClassifier(max_iter=100, alpha=1), X, y)
# Our model so far is the default neural network model after measurement. Now I will draw a heat map of the weights of the first layer of the learned neural network in order to predict diabetes using the dataset.
plt.figure(figsize=(20, 5)) #
plt.imshow(mlp.coefs_[0])
plt.yticks(range(8))
plt.xlabel("Columns in weight matrix")
plt.ylabel("Input feature")
plt.colorbar()
plt.show()
|
[{"diabetes-data-set/diabetes.csv": {"column_names": "[\"Pregnancies\", \"Glucose\", \"BloodPressure\", \"SkinThickness\", \"Insulin\", \"BMI\", \"DiabetesPedigreeFunction\", \"Age\", \"Outcome\"]", "column_data_types": "{\"Pregnancies\": \"int64\", \"Glucose\": \"int64\", \"BloodPressure\": \"int64\", \"SkinThickness\": \"int64\", \"Insulin\": \"int64\", \"BMI\": \"float64\", \"DiabetesPedigreeFunction\": \"float64\", \"Age\": \"int64\", \"Outcome\": \"int64\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 768 entries, 0 to 767\nData columns (total 9 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 Pregnancies 768 non-null int64 \n 1 Glucose 768 non-null int64 \n 2 BloodPressure 768 non-null int64 \n 3 SkinThickness 768 non-null int64 \n 4 Insulin 768 non-null int64 \n 5 BMI 768 non-null float64\n 6 DiabetesPedigreeFunction 768 non-null float64\n 7 Age 768 non-null int64 \n 8 Outcome 768 non-null int64 \ndtypes: float64(2), int64(7)\nmemory usage: 54.1 KB\n", "summary": "{\"Pregnancies\": {\"count\": 768.0, \"mean\": 3.8450520833333335, \"std\": 3.3695780626988694, \"min\": 0.0, \"25%\": 1.0, \"50%\": 3.0, \"75%\": 6.0, \"max\": 17.0}, \"Glucose\": {\"count\": 768.0, \"mean\": 120.89453125, \"std\": 31.97261819513622, \"min\": 0.0, \"25%\": 99.0, \"50%\": 117.0, \"75%\": 140.25, \"max\": 199.0}, \"BloodPressure\": {\"count\": 768.0, \"mean\": 69.10546875, \"std\": 19.355807170644777, \"min\": 0.0, \"25%\": 62.0, \"50%\": 72.0, \"75%\": 80.0, \"max\": 122.0}, \"SkinThickness\": {\"count\": 768.0, \"mean\": 20.536458333333332, \"std\": 15.952217567727637, \"min\": 0.0, \"25%\": 0.0, \"50%\": 23.0, \"75%\": 32.0, \"max\": 99.0}, \"Insulin\": {\"count\": 768.0, \"mean\": 79.79947916666667, \"std\": 115.24400235133817, \"min\": 0.0, \"25%\": 0.0, \"50%\": 30.5, \"75%\": 127.25, \"max\": 846.0}, \"BMI\": {\"count\": 768.0, \"mean\": 31.992578124999998, \"std\": 7.884160320375446, \"min\": 0.0, \"25%\": 27.3, \"50%\": 32.0, \"75%\": 36.6, \"max\": 67.1}, \"DiabetesPedigreeFunction\": {\"count\": 768.0, \"mean\": 0.47187630208333325, \"std\": 0.3313285950127749, \"min\": 0.078, \"25%\": 0.24375, \"50%\": 0.3725, \"75%\": 0.62625, \"max\": 2.42}, \"Age\": {\"count\": 768.0, \"mean\": 33.240885416666664, \"std\": 11.760231540678685, \"min\": 21.0, \"25%\": 24.0, \"50%\": 29.0, \"75%\": 41.0, \"max\": 81.0}, \"Outcome\": {\"count\": 768.0, \"mean\": 0.3489583333333333, \"std\": 0.47695137724279896, \"min\": 0.0, \"25%\": 0.0, \"50%\": 0.0, \"75%\": 1.0, \"max\": 1.0}}", "examples": "{\"Pregnancies\":{\"0\":6,\"1\":1,\"2\":8,\"3\":1},\"Glucose\":{\"0\":148,\"1\":85,\"2\":183,\"3\":89},\"BloodPressure\":{\"0\":72,\"1\":66,\"2\":64,\"3\":66},\"SkinThickness\":{\"0\":35,\"1\":29,\"2\":0,\"3\":23},\"Insulin\":{\"0\":0,\"1\":0,\"2\":0,\"3\":94},\"BMI\":{\"0\":33.6,\"1\":26.6,\"2\":23.3,\"3\":28.1},\"DiabetesPedigreeFunction\":{\"0\":0.627,\"1\":0.351,\"2\":0.672,\"3\":0.167},\"Age\":{\"0\":50,\"1\":31,\"2\":32,\"3\":21},\"Outcome\":{\"0\":1,\"1\":0,\"2\":1,\"3\":0}}"}}]
| true | 1 |
<start_data_description><data_path>diabetes-data-set/diabetes.csv:
<column_names>
['Pregnancies', 'Glucose', 'BloodPressure', 'SkinThickness', 'Insulin', 'BMI', 'DiabetesPedigreeFunction', 'Age', 'Outcome']
<column_types>
{'Pregnancies': 'int64', 'Glucose': 'int64', 'BloodPressure': 'int64', 'SkinThickness': 'int64', 'Insulin': 'int64', 'BMI': 'float64', 'DiabetesPedigreeFunction': 'float64', 'Age': 'int64', 'Outcome': 'int64'}
<dataframe_Summary>
{'Pregnancies': {'count': 768.0, 'mean': 3.8450520833333335, 'std': 3.3695780626988694, 'min': 0.0, '25%': 1.0, '50%': 3.0, '75%': 6.0, 'max': 17.0}, 'Glucose': {'count': 768.0, 'mean': 120.89453125, 'std': 31.97261819513622, 'min': 0.0, '25%': 99.0, '50%': 117.0, '75%': 140.25, 'max': 199.0}, 'BloodPressure': {'count': 768.0, 'mean': 69.10546875, 'std': 19.355807170644777, 'min': 0.0, '25%': 62.0, '50%': 72.0, '75%': 80.0, 'max': 122.0}, 'SkinThickness': {'count': 768.0, 'mean': 20.536458333333332, 'std': 15.952217567727637, 'min': 0.0, '25%': 0.0, '50%': 23.0, '75%': 32.0, 'max': 99.0}, 'Insulin': {'count': 768.0, 'mean': 79.79947916666667, 'std': 115.24400235133817, 'min': 0.0, '25%': 0.0, '50%': 30.5, '75%': 127.25, 'max': 846.0}, 'BMI': {'count': 768.0, 'mean': 31.992578124999998, 'std': 7.884160320375446, 'min': 0.0, '25%': 27.3, '50%': 32.0, '75%': 36.6, 'max': 67.1}, 'DiabetesPedigreeFunction': {'count': 768.0, 'mean': 0.47187630208333325, 'std': 0.3313285950127749, 'min': 0.078, '25%': 0.24375, '50%': 0.3725, '75%': 0.62625, 'max': 2.42}, 'Age': {'count': 768.0, 'mean': 33.240885416666664, 'std': 11.760231540678685, 'min': 21.0, '25%': 24.0, '50%': 29.0, '75%': 41.0, 'max': 81.0}, 'Outcome': {'count': 768.0, 'mean': 0.3489583333333333, 'std': 0.47695137724279896, 'min': 0.0, '25%': 0.0, '50%': 0.0, '75%': 1.0, 'max': 1.0}}
<dataframe_info>
RangeIndex: 768 entries, 0 to 767
Data columns (total 9 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Pregnancies 768 non-null int64
1 Glucose 768 non-null int64
2 BloodPressure 768 non-null int64
3 SkinThickness 768 non-null int64
4 Insulin 768 non-null int64
5 BMI 768 non-null float64
6 DiabetesPedigreeFunction 768 non-null float64
7 Age 768 non-null int64
8 Outcome 768 non-null int64
dtypes: float64(2), int64(7)
memory usage: 54.1 KB
<some_examples>
{'Pregnancies': {'0': 6, '1': 1, '2': 8, '3': 1}, 'Glucose': {'0': 148, '1': 85, '2': 183, '3': 89}, 'BloodPressure': {'0': 72, '1': 66, '2': 64, '3': 66}, 'SkinThickness': {'0': 35, '1': 29, '2': 0, '3': 23}, 'Insulin': {'0': 0, '1': 0, '2': 0, '3': 94}, 'BMI': {'0': 33.6, '1': 26.6, '2': 23.3, '3': 28.1}, 'DiabetesPedigreeFunction': {'0': 0.627, '1': 0.351, '2': 0.672, '3': 0.167}, 'Age': {'0': 50, '1': 31, '2': 32, '3': 21}, 'Outcome': {'0': 1, '1': 0, '2': 1, '3': 0}}
<end_description>
| 3,119 | 4 | 4,825 | 3,119 |
129444676
|
# # Generate CIFAR10 FID statistics
# ### Step 1: Extract the train images to a directory
# ### Step 2: Generate the npz file
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/444/129444676.ipynb
| null | null |
[{"Id": 129444676, "ScriptId": 38487164, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 482622, "CreationDate": "05/13/2023 21:56:13", "VersionNumber": 8.0, "Title": "CIFAR10 FID score statistics npz", "EvaluationDate": "05/13/2023", "IsChange": true, "TotalLines": 10.0, "LinesInsertedFromPrevious": 4.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 6.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
| null | null | null | null |
# # Generate CIFAR10 FID statistics
# ### Step 1: Extract the train images to a directory
# ### Step 2: Generate the npz file
| false | 0 | 39 | 3 | 39 | 39 |
||
129549520
|
import json
import os
import pandas as pd
from pathlib import Path
import random
from fastai import *
train_images_path = Path("/kaggle/input/benetech-making-graphs-accessible/train/images")
annotations_path = Path(
"/kaggle/input/benetech-making-graphs-accessible/train/annotations"
)
test_images_path = Path("/kaggle/input/benetech-making-graphs-accessible/test/images")
nested_dict = dict()
for file_name in os.listdir(annotations_path):
image_idx = file_name.split(".")[0]
nested_dict[image_idx] = json.load(open(annotations_path / file_name))
df = pd.DataFrame.from_dict(nested_dict, orient="index")
df.head()
df.rename(
columns={
"chart-type": "chart_type",
"plot-bb": "plot_bb",
"data-series": "data_series",
"axes": "xy_axes",
},
inplace=True,
)
df = df.reset_index()
df["fname"] = df["index"].map(lambda x: x + ".jpg")
df = df.set_index("index")
df.head(3)
df.chart_type.unique()
vbar_idx = df[df.chart_type == "vertical_bar"].index
hbar_idx = df[df.chart_type == "horizontal_bar"].index
line_idx = df[df.chart_type == "line"].index
scatter_idx = df[df.chart_type == "scatter"].index
dot_idx = df[df.chart_type == "dot"].index
vbar_idx_val = random.sample(set(vbar_idx), int(len(vbar_idx) * 0.2))
hbar_idx_val = random.sample(set(hbar_idx), int(len(hbar_idx) * 0.2))
line_idx_val = random.sample(set(line_idx), int(len(line_idx) * 0.2))
scatter_idx_val = random.sample(set(scatter_idx), int(len(scatter_idx) * 0.2))
dot_idx_val = random.sample(set(dot_idx), int(len(dot_idx) * 0.2))
val_idx = vbar_idx_val + hbar_idx_val + line_idx_val + scatter_idx_val + dot_idx_val
df["is_valid"] = False
for idx in val_idx:
df.loc[idx, "is_valid"] = True
df.is_valid.value_counts()
data = df[["fname", "chart_type", "is_valid"]]
data = data.rename(columns={"chart_type": "label"})
data.head()
from fastai.vision.all import *
# path = "C:\\Users\\ASUS TUF GAMING\\Documents\\benetech_competition\\Data\\kaggle_data\\train"
dls = ImageDataLoaders.from_df(
data, train_images_path, valid_col="is_valid", item_tfms=Resize(224)
)
dls.show_batch(max_n=9)
dls.valid.show_batch(max_n=6)
learn = vision_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
example_image = Image.open(
"/kaggle/input/benetech-making-graphs-accessible/test/images/000b92c3b098.jpg"
)
example_image
learn.predict(
"/kaggle/input/benetech-making-graphs-accessible/test/images/000b92c3b098.jpg"
)[0]
second_example_image = Image.open(
"/kaggle/input/benetech-making-graphs-accessible/test/images/00dcf883a459.jpg"
)
second_example_image
learn.predict(
"/kaggle/input/benetech-making-graphs-accessible/test/images/00dcf883a459.jpg"
)[0]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/549/129549520.ipynb
| null | null |
[{"Id": 129549520, "ScriptId": 38520347, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5952353, "CreationDate": "05/14/2023 18:29:44", "VersionNumber": 1.0, "Title": "Chart Classifier using Fastai", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 77.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 9}]
| null | null | null | null |
import json
import os
import pandas as pd
from pathlib import Path
import random
from fastai import *
train_images_path = Path("/kaggle/input/benetech-making-graphs-accessible/train/images")
annotations_path = Path(
"/kaggle/input/benetech-making-graphs-accessible/train/annotations"
)
test_images_path = Path("/kaggle/input/benetech-making-graphs-accessible/test/images")
nested_dict = dict()
for file_name in os.listdir(annotations_path):
image_idx = file_name.split(".")[0]
nested_dict[image_idx] = json.load(open(annotations_path / file_name))
df = pd.DataFrame.from_dict(nested_dict, orient="index")
df.head()
df.rename(
columns={
"chart-type": "chart_type",
"plot-bb": "plot_bb",
"data-series": "data_series",
"axes": "xy_axes",
},
inplace=True,
)
df = df.reset_index()
df["fname"] = df["index"].map(lambda x: x + ".jpg")
df = df.set_index("index")
df.head(3)
df.chart_type.unique()
vbar_idx = df[df.chart_type == "vertical_bar"].index
hbar_idx = df[df.chart_type == "horizontal_bar"].index
line_idx = df[df.chart_type == "line"].index
scatter_idx = df[df.chart_type == "scatter"].index
dot_idx = df[df.chart_type == "dot"].index
vbar_idx_val = random.sample(set(vbar_idx), int(len(vbar_idx) * 0.2))
hbar_idx_val = random.sample(set(hbar_idx), int(len(hbar_idx) * 0.2))
line_idx_val = random.sample(set(line_idx), int(len(line_idx) * 0.2))
scatter_idx_val = random.sample(set(scatter_idx), int(len(scatter_idx) * 0.2))
dot_idx_val = random.sample(set(dot_idx), int(len(dot_idx) * 0.2))
val_idx = vbar_idx_val + hbar_idx_val + line_idx_val + scatter_idx_val + dot_idx_val
df["is_valid"] = False
for idx in val_idx:
df.loc[idx, "is_valid"] = True
df.is_valid.value_counts()
data = df[["fname", "chart_type", "is_valid"]]
data = data.rename(columns={"chart_type": "label"})
data.head()
from fastai.vision.all import *
# path = "C:\\Users\\ASUS TUF GAMING\\Documents\\benetech_competition\\Data\\kaggle_data\\train"
dls = ImageDataLoaders.from_df(
data, train_images_path, valid_col="is_valid", item_tfms=Resize(224)
)
dls.show_batch(max_n=9)
dls.valid.show_batch(max_n=6)
learn = vision_learner(dls, resnet34, metrics=error_rate)
learn.fine_tune(1)
example_image = Image.open(
"/kaggle/input/benetech-making-graphs-accessible/test/images/000b92c3b098.jpg"
)
example_image
learn.predict(
"/kaggle/input/benetech-making-graphs-accessible/test/images/000b92c3b098.jpg"
)[0]
second_example_image = Image.open(
"/kaggle/input/benetech-making-graphs-accessible/test/images/00dcf883a459.jpg"
)
second_example_image
learn.predict(
"/kaggle/input/benetech-making-graphs-accessible/test/images/00dcf883a459.jpg"
)[0]
| false | 0 | 1,021 | 9 | 1,021 | 1,021 |
||
129549266
|
import cv2
import numpy as np
from matplotlib import pyplot as plt
# **Load the blurred image**
image = cv2.imread("/kaggle/input/blurred-image2/blurred_image.jpg")
# **Display the original blurred image**
if image is None:
raise ValueError(f"Failed to load image at '{image}'")
plt.subplot(2, 3, 1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Original Blurred Image")
plt.axis("off")
# **Apply gamma correction**
gamma = 1.5
gamma_corrected = np.power(image / 255.0, gamma)
gamma_corrected = np.uint8(gamma_corrected * 255.0)
# **Display the gamma-corrected image**
plt.subplot(2, 3, 2)
plt.imshow(cv2.cvtColor(gamma_corrected, cv2.COLOR_BGR2RGB))
plt.title("Gamma Corrected")
plt.axis("off")
# **Apply Gaussian blur**
gaussian_blur = cv2.GaussianBlur(image, (5, 5), 0)
# **Display the Gaussian blurred image**
plt.subplot(2, 3, 3)
plt.imshow(cv2.cvtColor(gaussian_blur, cv2.COLOR_BGR2RGB))
plt.title("Gaussian Blur")
plt.axis("off")
# **Apply median filter**
median_filtered = cv2.medianBlur(image, 5)
# **Display the median filtered image**
plt.subplot(2, 3, 4)
plt.imshow(cv2.cvtColor(median_filtered, cv2.COLOR_BGR2RGB))
plt.title("Median Filter")
plt.axis("off")
# **Apply bilateral filter**
bilateral_filtered = cv2.bilateralFilter(image, 9, 75, 75)
# **Display the bilateral filtered image**
plt.subplot(2, 3, 5)
plt.imshow(cv2.cvtColor(bilateral_filtered, cv2.COLOR_BGR2RGB))
plt.title("Bilateral Filter")
plt.axis("off")
# **Apply Fourier transform**
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20 * np.log(np.abs(fshift))
# **Display the magnitude spectrum**
plt.subplot(2, 3, 6)
plt.imshow(magnitude_spectrum, cmap="gray")
plt.title("Magnitude Spectrum")
plt.axis("off")
# **Show the plot**
plt.tight_layout()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/549/129549266.ipynb
| null | null |
[{"Id": 129549266, "ScriptId": 38520784, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12671037, "CreationDate": "05/14/2023 18:26:48", "VersionNumber": 1.0, "Title": "image-processing-project", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 81.0, "LinesInsertedFromPrevious": 81.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import cv2
import numpy as np
from matplotlib import pyplot as plt
# **Load the blurred image**
image = cv2.imread("/kaggle/input/blurred-image2/blurred_image.jpg")
# **Display the original blurred image**
if image is None:
raise ValueError(f"Failed to load image at '{image}'")
plt.subplot(2, 3, 1)
plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
plt.title("Original Blurred Image")
plt.axis("off")
# **Apply gamma correction**
gamma = 1.5
gamma_corrected = np.power(image / 255.0, gamma)
gamma_corrected = np.uint8(gamma_corrected * 255.0)
# **Display the gamma-corrected image**
plt.subplot(2, 3, 2)
plt.imshow(cv2.cvtColor(gamma_corrected, cv2.COLOR_BGR2RGB))
plt.title("Gamma Corrected")
plt.axis("off")
# **Apply Gaussian blur**
gaussian_blur = cv2.GaussianBlur(image, (5, 5), 0)
# **Display the Gaussian blurred image**
plt.subplot(2, 3, 3)
plt.imshow(cv2.cvtColor(gaussian_blur, cv2.COLOR_BGR2RGB))
plt.title("Gaussian Blur")
plt.axis("off")
# **Apply median filter**
median_filtered = cv2.medianBlur(image, 5)
# **Display the median filtered image**
plt.subplot(2, 3, 4)
plt.imshow(cv2.cvtColor(median_filtered, cv2.COLOR_BGR2RGB))
plt.title("Median Filter")
plt.axis("off")
# **Apply bilateral filter**
bilateral_filtered = cv2.bilateralFilter(image, 9, 75, 75)
# **Display the bilateral filtered image**
plt.subplot(2, 3, 5)
plt.imshow(cv2.cvtColor(bilateral_filtered, cv2.COLOR_BGR2RGB))
plt.title("Bilateral Filter")
plt.axis("off")
# **Apply Fourier transform**
f = np.fft.fft2(image)
fshift = np.fft.fftshift(f)
magnitude_spectrum = 20 * np.log(np.abs(fshift))
# **Display the magnitude spectrum**
plt.subplot(2, 3, 6)
plt.imshow(magnitude_spectrum, cmap="gray")
plt.title("Magnitude Spectrum")
plt.axis("off")
# **Show the plot**
plt.tight_layout()
plt.show()
| false | 0 | 663 | 0 | 663 | 663 |
||
129549523
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.decomposition import PCA
import plotly.express as px
# Load the data
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission_df = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
train_df.head()
# Merge the train_data and greeks_data based on a common identifier
merged_data = pd.merge(train_df, greeks_df, on="Id")
# Save the merged dataset to a new CSV file
merged_data.to_csv("data.csv", index=False)
# The code loads the "train_df.csv" and "greeks_df.csv" datasets into separate DataFrames. Then, it merges the two DataFrames based on the common identifier column "Id" using the pd.merge() function. The resulting merged dataset is stored in the merged_data DataFrame.
# Finally, the merged dataset is saved to a new CSV file named "data.csv" using the to_csv() function. The index=False argument ensures that the row index is not included in the saved file.
#
# Load the combined_dataset.csv
data = pd.read_csv("data.csv")
data.head()
import plotly.graph_objects as go
# Value counts for categorical columns
columns = ["Alpha", "Beta", "Gamma", "Delta", "EJ"]
figs = []
for col in columns:
counts = data[col].value_counts()
fig = go.Figure(
data=[
go.Bar(
x=counts.index,
y=counts.values,
marker=dict(color="skyblue"),
opacity=0.8,
)
]
)
fig.update_layout(
title=f"Value Counts for '{col}'",
xaxis_title=col,
yaxis_title="Count",
showlegend=False,
)
figs.append(fig)
# Display the bar charts
for fig in figs:
fig.show()
# Preprocessing the Data
# Preprocessing the data
# Drop irrelevant columns
data.drop(["Id"], axis=1, inplace=True)
data.drop(["Epsilon"], axis=1, inplace=True)
# Convert categorical variable to numeric using one-hot encoding
data = pd.get_dummies(data, columns=["Alpha", "Beta", "Gamma", "Delta", "EJ"])
# Visualize the class distribution
import plotly.express as px
# Class distribution
class_distribution = data["Class"].value_counts()
# Create a bar chart using Plotly
fig = px.bar(
class_distribution, x=class_distribution.index, y=class_distribution.values
)
# Update the layout of the figure for better visualization
fig.update_layout(title="Class Distribution", xaxis_title="Class", yaxis_title="Count")
# Show the bar chart
fig.show()
# Display the first few rows of the preprocessed data
print(data.head())
# Check if dataset is empty
if len(data) == 0:
print("Empty dataset after preprocessing. Please check your data.")
exit()
import plotly.express as px
# Summary statistics of numeric columns
summary_stats = data.describe()
# Transpose the summary statistics DataFrame for easier plotting
summary_stats = summary_stats.T
# Create a bar chart using Plotly
fig = px.bar(
summary_stats,
x=summary_stats.index,
y=["mean", "std", "min", "25%", "50%", "75%", "max"],
)
# Update the layout of the figure for better visualization
fig.update_layout(
title="Summary Statistics of Numeric Columns",
xaxis_title="Columns",
yaxis_title="Values",
barmode="group",
)
# Show the bar chart
fig.show()
# Check for missing values
print(data.isnull().sum())
import plotly.express as px
# Check for missing values
missing_values = data.isnull().sum()
# Create a bar chart using Plotly
fig = px.bar(missing_values, x=missing_values.index, y=missing_values.values)
# Update the layout of the figure for better visualization
fig.update_layout(title="Missing Values", xaxis_title="Columns", yaxis_title="Count")
# Show the bar chart
fig.show()
# Normalize numerical columns
scaler = StandardScaler()
numerical_cols = data.select_dtypes(include=[np.number]).columns
data[numerical_cols] = scaler.fit_transform(data[numerical_cols])
# Preprocessing the data
# Drop irrelevant columns
data.drop(["BQ"], axis=1, inplace=True)
data.drop(["EL"], axis=1, inplace=True)
# Fill null values with the mean
data = data.fillna(data.mean())
# Check if there are any remaining null values
print(data.isnull().sum())
# Perform PCA
pca = PCA(n_components=20) # Increase the number of components to 20
pca_result = pca.fit_transform(data.drop(columns=["Class"]))
# Create the PCA DataFrame
pca_df = pd.DataFrame(data=pca_result, columns=[f"PC{i}" for i in range(1, 21)])
pca_df["Class"] = data["Class"]
# Explained variance ratio
explained_variance = pca.explained_variance_ratio_
print(f"Explained variance ratio: {explained_variance}")
import plotly.graph_objects as go
explained_variance = pca.explained_variance_ratio_
fig = go.Figure(
data=go.Bar(
x=[f"PC{i+1}" for i in range(len(explained_variance))],
y=explained_variance,
marker=dict(color="skyblue"),
opacity=0.8,
)
)
fig.update_layout(
title="Explained Variance Ratio",
xaxis_title="Principal Components",
yaxis_title="Explained Variance",
showlegend=False,
)
fig.show()
# Scatter plot of PCA components
fig = px.scatter(pca_df, x="PC1", y="PC2", color="Class")
fig.show()
# Perform PCA
# Get the explained variance ratio
explained_variance_ratio = pca.explained_variance_ratio_
# Calculate the cumulative explained variance
cumulative_explained_variance = np.cumsum(explained_variance_ratio)
# Plot explained variance ratio
plt.figure(figsize=(8, 6))
plt.bar(
range(1, len(explained_variance_ratio) + 1), explained_variance_ratio, alpha=0.8
)
plt.xlabel("Principal Components")
plt.ylabel("Explained Variance Ratio")
plt.title("Explained Variance Ratio by Principal Components")
plt.xticks(range(1, len(explained_variance_ratio) + 1))
plt.show()
# Plot cumulative explained variance
plt.figure(figsize=(8, 6))
plt.plot(
range(1, len(cumulative_explained_variance) + 1),
cumulative_explained_variance,
marker="o",
linestyle="--",
)
plt.xlabel("Principal Components")
plt.ylabel("Cumulative Explained Variance")
plt.title("Cumulative Explained Variance by Principal Components")
plt.xticks(range(1, len(cumulative_explained_variance) + 1))
plt.show()
# In this code, the n_components parameter of the PCA class is set to 20, indicating that you want to retain 20 principal components. The column names in the pca_df DataFrame are updated to reflect the 20 principal components (PC1 to PC20).
# Please note that with an increased number of principal components, the dimensionality of the dataset will also increase, and visualizing all 20 components on a scatter plot may not be practical. You may need to explore other visualization techniques or focus on specific subsets of principal components for analysis.
#
# Perform PCA
pca = PCA(n_components=20)
pca_result = pca.fit_transform(data.drop(columns=["Class"]))
# Create the PCA DataFrame
pca_df = pd.DataFrame(data=pca_result, columns=[f"PC{i}" for i in range(1, 21)])
pca_df["Class"] = data["Class"]
# Variance explained by each PC
explained_variance_ratio = pca.explained_variance_ratio_
variance_df = pd.DataFrame(
{
"PC": [f"PC{i}" for i in range(1, 21)],
"Explained Variance": explained_variance_ratio,
}
)
# Display table
print(variance_df)
# Bar plot of explained variance
plt.figure(figsize=(10, 6))
sns.barplot(x="PC", y="Explained Variance", data=variance_df)
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance")
plt.title("Explained Variance by Principal Component")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/549/129549523.ipynb
| null | null |
[{"Id": 129549523, "ScriptId": 38520131, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8454780, "CreationDate": "05/14/2023 18:29:46", "VersionNumber": 1.0, "Title": "Data Exploration and PCA", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 272.0, "LinesInsertedFromPrevious": 272.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 5}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.decomposition import PCA
import plotly.express as px
# Load the data
train_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/train.csv")
test_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/test.csv")
greeks_df = pd.read_csv("/kaggle/input/icr-identify-age-related-conditions/greeks.csv")
sample_submission_df = pd.read_csv(
"/kaggle/input/icr-identify-age-related-conditions/sample_submission.csv"
)
train_df.head()
# Merge the train_data and greeks_data based on a common identifier
merged_data = pd.merge(train_df, greeks_df, on="Id")
# Save the merged dataset to a new CSV file
merged_data.to_csv("data.csv", index=False)
# The code loads the "train_df.csv" and "greeks_df.csv" datasets into separate DataFrames. Then, it merges the two DataFrames based on the common identifier column "Id" using the pd.merge() function. The resulting merged dataset is stored in the merged_data DataFrame.
# Finally, the merged dataset is saved to a new CSV file named "data.csv" using the to_csv() function. The index=False argument ensures that the row index is not included in the saved file.
#
# Load the combined_dataset.csv
data = pd.read_csv("data.csv")
data.head()
import plotly.graph_objects as go
# Value counts for categorical columns
columns = ["Alpha", "Beta", "Gamma", "Delta", "EJ"]
figs = []
for col in columns:
counts = data[col].value_counts()
fig = go.Figure(
data=[
go.Bar(
x=counts.index,
y=counts.values,
marker=dict(color="skyblue"),
opacity=0.8,
)
]
)
fig.update_layout(
title=f"Value Counts for '{col}'",
xaxis_title=col,
yaxis_title="Count",
showlegend=False,
)
figs.append(fig)
# Display the bar charts
for fig in figs:
fig.show()
# Preprocessing the Data
# Preprocessing the data
# Drop irrelevant columns
data.drop(["Id"], axis=1, inplace=True)
data.drop(["Epsilon"], axis=1, inplace=True)
# Convert categorical variable to numeric using one-hot encoding
data = pd.get_dummies(data, columns=["Alpha", "Beta", "Gamma", "Delta", "EJ"])
# Visualize the class distribution
import plotly.express as px
# Class distribution
class_distribution = data["Class"].value_counts()
# Create a bar chart using Plotly
fig = px.bar(
class_distribution, x=class_distribution.index, y=class_distribution.values
)
# Update the layout of the figure for better visualization
fig.update_layout(title="Class Distribution", xaxis_title="Class", yaxis_title="Count")
# Show the bar chart
fig.show()
# Display the first few rows of the preprocessed data
print(data.head())
# Check if dataset is empty
if len(data) == 0:
print("Empty dataset after preprocessing. Please check your data.")
exit()
import plotly.express as px
# Summary statistics of numeric columns
summary_stats = data.describe()
# Transpose the summary statistics DataFrame for easier plotting
summary_stats = summary_stats.T
# Create a bar chart using Plotly
fig = px.bar(
summary_stats,
x=summary_stats.index,
y=["mean", "std", "min", "25%", "50%", "75%", "max"],
)
# Update the layout of the figure for better visualization
fig.update_layout(
title="Summary Statistics of Numeric Columns",
xaxis_title="Columns",
yaxis_title="Values",
barmode="group",
)
# Show the bar chart
fig.show()
# Check for missing values
print(data.isnull().sum())
import plotly.express as px
# Check for missing values
missing_values = data.isnull().sum()
# Create a bar chart using Plotly
fig = px.bar(missing_values, x=missing_values.index, y=missing_values.values)
# Update the layout of the figure for better visualization
fig.update_layout(title="Missing Values", xaxis_title="Columns", yaxis_title="Count")
# Show the bar chart
fig.show()
# Normalize numerical columns
scaler = StandardScaler()
numerical_cols = data.select_dtypes(include=[np.number]).columns
data[numerical_cols] = scaler.fit_transform(data[numerical_cols])
# Preprocessing the data
# Drop irrelevant columns
data.drop(["BQ"], axis=1, inplace=True)
data.drop(["EL"], axis=1, inplace=True)
# Fill null values with the mean
data = data.fillna(data.mean())
# Check if there are any remaining null values
print(data.isnull().sum())
# Perform PCA
pca = PCA(n_components=20) # Increase the number of components to 20
pca_result = pca.fit_transform(data.drop(columns=["Class"]))
# Create the PCA DataFrame
pca_df = pd.DataFrame(data=pca_result, columns=[f"PC{i}" for i in range(1, 21)])
pca_df["Class"] = data["Class"]
# Explained variance ratio
explained_variance = pca.explained_variance_ratio_
print(f"Explained variance ratio: {explained_variance}")
import plotly.graph_objects as go
explained_variance = pca.explained_variance_ratio_
fig = go.Figure(
data=go.Bar(
x=[f"PC{i+1}" for i in range(len(explained_variance))],
y=explained_variance,
marker=dict(color="skyblue"),
opacity=0.8,
)
)
fig.update_layout(
title="Explained Variance Ratio",
xaxis_title="Principal Components",
yaxis_title="Explained Variance",
showlegend=False,
)
fig.show()
# Scatter plot of PCA components
fig = px.scatter(pca_df, x="PC1", y="PC2", color="Class")
fig.show()
# Perform PCA
# Get the explained variance ratio
explained_variance_ratio = pca.explained_variance_ratio_
# Calculate the cumulative explained variance
cumulative_explained_variance = np.cumsum(explained_variance_ratio)
# Plot explained variance ratio
plt.figure(figsize=(8, 6))
plt.bar(
range(1, len(explained_variance_ratio) + 1), explained_variance_ratio, alpha=0.8
)
plt.xlabel("Principal Components")
plt.ylabel("Explained Variance Ratio")
plt.title("Explained Variance Ratio by Principal Components")
plt.xticks(range(1, len(explained_variance_ratio) + 1))
plt.show()
# Plot cumulative explained variance
plt.figure(figsize=(8, 6))
plt.plot(
range(1, len(cumulative_explained_variance) + 1),
cumulative_explained_variance,
marker="o",
linestyle="--",
)
plt.xlabel("Principal Components")
plt.ylabel("Cumulative Explained Variance")
plt.title("Cumulative Explained Variance by Principal Components")
plt.xticks(range(1, len(cumulative_explained_variance) + 1))
plt.show()
# In this code, the n_components parameter of the PCA class is set to 20, indicating that you want to retain 20 principal components. The column names in the pca_df DataFrame are updated to reflect the 20 principal components (PC1 to PC20).
# Please note that with an increased number of principal components, the dimensionality of the dataset will also increase, and visualizing all 20 components on a scatter plot may not be practical. You may need to explore other visualization techniques or focus on specific subsets of principal components for analysis.
#
# Perform PCA
pca = PCA(n_components=20)
pca_result = pca.fit_transform(data.drop(columns=["Class"]))
# Create the PCA DataFrame
pca_df = pd.DataFrame(data=pca_result, columns=[f"PC{i}" for i in range(1, 21)])
pca_df["Class"] = data["Class"]
# Variance explained by each PC
explained_variance_ratio = pca.explained_variance_ratio_
variance_df = pd.DataFrame(
{
"PC": [f"PC{i}" for i in range(1, 21)],
"Explained Variance": explained_variance_ratio,
}
)
# Display table
print(variance_df)
# Bar plot of explained variance
plt.figure(figsize=(10, 6))
sns.barplot(x="PC", y="Explained Variance", data=variance_df)
plt.xlabel("Principal Component")
plt.ylabel("Explained Variance")
plt.title("Explained Variance by Principal Component")
plt.show()
| false | 0 | 2,440 | 5 | 2,440 | 2,440 |
||
129887166
|
# # 0. Imports and predefines
import os
import random
import warnings
from pathlib import Path
from dataclasses import dataclass, asdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (
f1_score,
accuracy_score,
precision_score,
recall_score,
roc_auc_score,
)
from tqdm.notebook import tqdm
import torch
import transformers
# Hardware
num_workers = 2
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# seed all
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
g = torch.Generator()
g.manual_seed(SEED)
def seed_dataloader_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
from kaggle_secrets import UserSecretsClient
import wandb
user_secrets = UserSecretsClient()
# I have saved my API token with "wandb_api" as Label.
# If you use some other Label make sure to change the same below.
wandb_api = user_secrets.get_secret("WANDB_KEY")
wandb.login(key=wandb_api)
# # 1. Prepare data
import gdown
gdown.download_folder(
"https://drive.google.com/drive/folders/1YWTFh0So7mVLVZcW-HPepULjJ86NO1Ev",
quiet=True,
)
def read_tsv(path):
return pd.read_csv(path, delimiter="\t", index_col=0)
# final_folder = Path("/content/drive/MyDrive/NLP_project_2023/data/final")
final_folder = Path("/kaggle/working/data/final")
final_train = read_tsv(final_folder / "train.tsv")
final_val = read_tsv(final_folder / "dev.tsv")
final_test = read_tsv(final_folder / "test.tsv")
swap_train = read_tsv("/kaggle/working/data/swap/train.tsv")
unlabeled_train = read_tsv("/kaggle/working/data/unlabeled/final/train.tsv")
unlabeled_val = read_tsv("/kaggle/working/data/unlabeled/final/dev.tsv")
# print some info
for df in [
final_train,
final_val,
final_test,
swap_train,
unlabeled_train,
unlabeled_val,
]:
print("+" * 30)
print(df.info())
# merge into train, val and test
train_df = pd.concat([final_train, swap_train])
val_df = pd.concat([final_val])
test_df = pd.concat([final_test])
print("Train:", train_df.shape)
print("Val:", val_df.shape)
print("Test:", test_df.shape)
train_df.head(5)
class PairedSentenceDataset(torch.utils.data.Dataset):
def __init__(
self,
table: pd.DataFrame,
tokenizer: transformers.PreTrainedTokenizer,
max_length: int,
):
super().__init__()
self.first_sentences = table["sentence1"].values
self.second_sentences = table["sentence2"].values
self.labels = table["label"].values
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.first_sentences)
def __getitem__(self, index: int):
first_sentence = self.first_sentences[index]
second_sentence = self.second_sentences[index]
label = self.labels[index]
tokenizer_output = self.tokenizer(
first_sentence,
second_sentence,
return_tensors="pt",
return_token_type_ids=True,
max_length=self.max_length,
padding="max_length",
truncation=True,
)
return {"labels": torch.LongTensor([label]), **tokenizer_output}
def build_tokenizer(model: str):
return transformers.AutoTokenizer.from_pretrained(model, use_fast=False)
tokenizer = build_tokenizer("microsoft/deberta-v3-large")
dataset = PairedSentenceDataset(train_df, tokenizer, 128)
assert dataset[0]["labels"].shape == (1,)
assert dataset[0]["input_ids"].shape == (1, 128)
assert dataset[0]
dataset = PairedSentenceDataset(train_df, tokenizer, 20)
assert dataset[0]["input_ids"].shape == (1, 20)
# # 2. Model side
@dataclass
class TrainConfig:
model: str
checkpoints_folder: str
device: torch.device
batch_size: int
epochs: int
max_length: int
lr: float
def train_model(
model: transformers.DebertaModel,
optimizer: torch.optim.Optimizer,
train_dataloader: torch.utils.data.DataLoader,
val_dataloader: torch.utils.data.DataLoader,
config: TrainConfig,
wandb_project: str,
):
wandb.init(project=wandb_project, config=config)
for epoch in range(config["epochs"]):
# train
model.train()
for batch in tqdm(train_dataloader):
optimizer.zero_grad()
batch = {key: batch[key].squeeze().to(device) for key in batch}
outputs = model(**batch)
outputs["loss"].backward()
wandb.log({"train_loss": outputs["loss"].detach().cpu().numpy()})
optimizer.step()
# val
model.eval()
predicts_batches = []
ground_truths_batches = []
for batch in tqdm(val_dataloader):
ground_truths_batches.append(batch["labels"])
batch = {key: batch[key].squeeze().to(device) for key in batch}
outputs = model(batch)
predicts_batches.append(torch.argmax(outputs.cpu(), dim=-1).numpy())
predicts = np.concatenate(predicts_batches)
ground_truths = np.concatenate(ground_truths_batches)
wandb.log(
{
"accuracy": accuracy_score(ground_truths, predicts),
"f1": f1_score(ground_truths, predicts),
"recall": recall_score(ground_truths, predicts),
"precision": precision_score(ground_truths, predicts),
}
)
wandb.finish()
class Trainer:
checkpoint_field_model: str = "model"
checkpoint_field_optimizer: str = "optimizer"
checkpoint_field_epoch: int = "epoch"
checkpoint_last_name: str = "last.tar"
# TODO add logging of the best
checkpoint_best_name: str = "best.tar"
def __init__(
self, model: transformers.DebertaModel, optimizer: torch.optim.Optimizer
) -> None:
self.model = model
self.optimizer = optimizer
def train(
self,
train_dataloader: torch.utils.data.DataLoader,
val_dataloader: torch.utils.data.DataLoader,
config: TrainConfig,
wandb_project: str,
) -> None:
wandb.init(project=wandb_project, config=asdict(config))
model.to(config.device)
if not os.path.exists(config.checkpoints_folder):
os.makedirs(config.checkpoints_folder)
start_epoch = self.load_checkpoint(config.checkpoints_folder) + 1
for epoch in range(start_epoch, config.epochs):
self.make_train_step(train_dataloader)
self.make_evaluation_step(val_dataloader)
self.save_checkpoint(config.checkpoints_folder, epoch)
wandb.finish()
def make_inference(self, dataloader: torch.utils.data.DataLoader) -> tuple:
self.model.eval()
with torch.no_grad():
predicts = []
labels = []
for batch in tqdm(dataloader):
labels.append(batch["labels"])
batch = Trainer._move_dict_items_to_device(batch, self.model.device)
outputs = self.model(**batch)
predicts.append(outputs["logits"].cpu())
return torch.cat(predicts), torch.cat(labels).squeeze()
@staticmethod
def _move_dict_items_to_device(target_dict: dict, device: str):
return {key: target_dict[key].squeeze().to(device) for key in target_dict}
# TODO: get rid of manual metric specification
def make_evaluation_step(
self, dataloader: torch.utils.data.DataLoader, return_labels: bool = True
):
logits, labels = self.make_inference(dataloader)
predicted_probas = torch.softmax(logits, dim=-1).numpy()
predicted_labels = torch.argmax(logits, dim=-1).numpy()
wandb.log(
{
"accuracy": accuracy_score(labels, predicted_labels),
"f1": f1_score(labels, predicted_labels),
"recall": recall_score(labels, predicted_labels),
"precision": precision_score(labels, predicted_labels),
"auc_score": roc_auc_score(labels, predicted_probas[:, 1]),
}
)
def make_train_step(self, dataloader: torch.utils.data.DataLoader):
self.model.train()
for batch in tqdm(dataloader):
self.optimizer.zero_grad()
batch = Trainer._move_dict_items_to_device(batch, self.model.device)
outputs = self.model(**batch)
loss = outputs["loss"]
loss.backward()
wandb.log({"train_loss": loss.detach().cpu().numpy()})
optimizer.step()
def load_checkpoint(self, folder: str) -> int:
checkpoint_path = Path(folder) / Trainer.checkpoint_last_name
if not os.path.exists(checkpoint_path):
warnings.warn(
"No checkpoints found. Start epoch 0 with given model and optimizer."
)
return -1
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint[Trainer.checkpoint_field_model])
self.optimizer.load_state_dict(checkpoint[Trainer.checkpoint_field_optimizer])
return checkpoint[Trainer.checkpoint_field_epoch]
def save_checkpoint(self, folder: str, epoch: int) -> None:
checkpoint_name = Path(folder) / Trainer.checkpoint_last_name
torch.save(
{
Trainer.checkpoint_field_model: self.model.state_dict(),
Trainer.checkpoint_field_optimizer: self.optimizer.state_dict(),
Trainer.checkpoint_field_epoch: epoch,
},
checkpoint_name,
)
# # 3. Experiment
checkpoints_folder = "/artifacts/init_exp"
config = TrainConfig(
model="microsoft/deberta-v3-large",
checkpoints_folder=checkpoints_folder,
batch_size=8,
epochs=1,
max_length=64,
lr=6e-6,
device=str(device),
)
tokenizer = build_tokenizer(config.model)
train_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(train_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=True,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
val_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(val_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
test_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(test_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
model = transformers.DebertaV2ForSequenceClassification.from_pretrained(
config.model, num_labels=2
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
trainer = Trainer(model, optimizer)
trainer.train(train_loader, val_loader, config, wandb_project="nlp_project_2023")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/887/129887166.ipynb
| null | null |
[{"Id": 129887166, "ScriptId": 38611328, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 5726406, "CreationDate": "05/17/2023 08:04:38", "VersionNumber": 1.0, "Title": "NLP_project_2023/6c618e0", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 402.0, "LinesInsertedFromPrevious": 7.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 395.0, "LinesInsertedFromFork": 7.0, "LinesDeletedFromFork": 4.0, "LinesChangedFromFork": 0.0, "LinesUnchangedFromFork": 395.0, "TotalVotes": 0}]
| null | null | null | null |
# # 0. Imports and predefines
import os
import random
import warnings
from pathlib import Path
from dataclasses import dataclass, asdict
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import (
f1_score,
accuracy_score,
precision_score,
recall_score,
roc_auc_score,
)
from tqdm.notebook import tqdm
import torch
import transformers
# Hardware
num_workers = 2
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
# seed all
SEED = 42
torch.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
g = torch.Generator()
g.manual_seed(SEED)
def seed_dataloader_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
from kaggle_secrets import UserSecretsClient
import wandb
user_secrets = UserSecretsClient()
# I have saved my API token with "wandb_api" as Label.
# If you use some other Label make sure to change the same below.
wandb_api = user_secrets.get_secret("WANDB_KEY")
wandb.login(key=wandb_api)
# # 1. Prepare data
import gdown
gdown.download_folder(
"https://drive.google.com/drive/folders/1YWTFh0So7mVLVZcW-HPepULjJ86NO1Ev",
quiet=True,
)
def read_tsv(path):
return pd.read_csv(path, delimiter="\t", index_col=0)
# final_folder = Path("/content/drive/MyDrive/NLP_project_2023/data/final")
final_folder = Path("/kaggle/working/data/final")
final_train = read_tsv(final_folder / "train.tsv")
final_val = read_tsv(final_folder / "dev.tsv")
final_test = read_tsv(final_folder / "test.tsv")
swap_train = read_tsv("/kaggle/working/data/swap/train.tsv")
unlabeled_train = read_tsv("/kaggle/working/data/unlabeled/final/train.tsv")
unlabeled_val = read_tsv("/kaggle/working/data/unlabeled/final/dev.tsv")
# print some info
for df in [
final_train,
final_val,
final_test,
swap_train,
unlabeled_train,
unlabeled_val,
]:
print("+" * 30)
print(df.info())
# merge into train, val and test
train_df = pd.concat([final_train, swap_train])
val_df = pd.concat([final_val])
test_df = pd.concat([final_test])
print("Train:", train_df.shape)
print("Val:", val_df.shape)
print("Test:", test_df.shape)
train_df.head(5)
class PairedSentenceDataset(torch.utils.data.Dataset):
def __init__(
self,
table: pd.DataFrame,
tokenizer: transformers.PreTrainedTokenizer,
max_length: int,
):
super().__init__()
self.first_sentences = table["sentence1"].values
self.second_sentences = table["sentence2"].values
self.labels = table["label"].values
self.tokenizer = tokenizer
self.max_length = max_length
def __len__(self):
return len(self.first_sentences)
def __getitem__(self, index: int):
first_sentence = self.first_sentences[index]
second_sentence = self.second_sentences[index]
label = self.labels[index]
tokenizer_output = self.tokenizer(
first_sentence,
second_sentence,
return_tensors="pt",
return_token_type_ids=True,
max_length=self.max_length,
padding="max_length",
truncation=True,
)
return {"labels": torch.LongTensor([label]), **tokenizer_output}
def build_tokenizer(model: str):
return transformers.AutoTokenizer.from_pretrained(model, use_fast=False)
tokenizer = build_tokenizer("microsoft/deberta-v3-large")
dataset = PairedSentenceDataset(train_df, tokenizer, 128)
assert dataset[0]["labels"].shape == (1,)
assert dataset[0]["input_ids"].shape == (1, 128)
assert dataset[0]
dataset = PairedSentenceDataset(train_df, tokenizer, 20)
assert dataset[0]["input_ids"].shape == (1, 20)
# # 2. Model side
@dataclass
class TrainConfig:
model: str
checkpoints_folder: str
device: torch.device
batch_size: int
epochs: int
max_length: int
lr: float
def train_model(
model: transformers.DebertaModel,
optimizer: torch.optim.Optimizer,
train_dataloader: torch.utils.data.DataLoader,
val_dataloader: torch.utils.data.DataLoader,
config: TrainConfig,
wandb_project: str,
):
wandb.init(project=wandb_project, config=config)
for epoch in range(config["epochs"]):
# train
model.train()
for batch in tqdm(train_dataloader):
optimizer.zero_grad()
batch = {key: batch[key].squeeze().to(device) for key in batch}
outputs = model(**batch)
outputs["loss"].backward()
wandb.log({"train_loss": outputs["loss"].detach().cpu().numpy()})
optimizer.step()
# val
model.eval()
predicts_batches = []
ground_truths_batches = []
for batch in tqdm(val_dataloader):
ground_truths_batches.append(batch["labels"])
batch = {key: batch[key].squeeze().to(device) for key in batch}
outputs = model(batch)
predicts_batches.append(torch.argmax(outputs.cpu(), dim=-1).numpy())
predicts = np.concatenate(predicts_batches)
ground_truths = np.concatenate(ground_truths_batches)
wandb.log(
{
"accuracy": accuracy_score(ground_truths, predicts),
"f1": f1_score(ground_truths, predicts),
"recall": recall_score(ground_truths, predicts),
"precision": precision_score(ground_truths, predicts),
}
)
wandb.finish()
class Trainer:
checkpoint_field_model: str = "model"
checkpoint_field_optimizer: str = "optimizer"
checkpoint_field_epoch: int = "epoch"
checkpoint_last_name: str = "last.tar"
# TODO add logging of the best
checkpoint_best_name: str = "best.tar"
def __init__(
self, model: transformers.DebertaModel, optimizer: torch.optim.Optimizer
) -> None:
self.model = model
self.optimizer = optimizer
def train(
self,
train_dataloader: torch.utils.data.DataLoader,
val_dataloader: torch.utils.data.DataLoader,
config: TrainConfig,
wandb_project: str,
) -> None:
wandb.init(project=wandb_project, config=asdict(config))
model.to(config.device)
if not os.path.exists(config.checkpoints_folder):
os.makedirs(config.checkpoints_folder)
start_epoch = self.load_checkpoint(config.checkpoints_folder) + 1
for epoch in range(start_epoch, config.epochs):
self.make_train_step(train_dataloader)
self.make_evaluation_step(val_dataloader)
self.save_checkpoint(config.checkpoints_folder, epoch)
wandb.finish()
def make_inference(self, dataloader: torch.utils.data.DataLoader) -> tuple:
self.model.eval()
with torch.no_grad():
predicts = []
labels = []
for batch in tqdm(dataloader):
labels.append(batch["labels"])
batch = Trainer._move_dict_items_to_device(batch, self.model.device)
outputs = self.model(**batch)
predicts.append(outputs["logits"].cpu())
return torch.cat(predicts), torch.cat(labels).squeeze()
@staticmethod
def _move_dict_items_to_device(target_dict: dict, device: str):
return {key: target_dict[key].squeeze().to(device) for key in target_dict}
# TODO: get rid of manual metric specification
def make_evaluation_step(
self, dataloader: torch.utils.data.DataLoader, return_labels: bool = True
):
logits, labels = self.make_inference(dataloader)
predicted_probas = torch.softmax(logits, dim=-1).numpy()
predicted_labels = torch.argmax(logits, dim=-1).numpy()
wandb.log(
{
"accuracy": accuracy_score(labels, predicted_labels),
"f1": f1_score(labels, predicted_labels),
"recall": recall_score(labels, predicted_labels),
"precision": precision_score(labels, predicted_labels),
"auc_score": roc_auc_score(labels, predicted_probas[:, 1]),
}
)
def make_train_step(self, dataloader: torch.utils.data.DataLoader):
self.model.train()
for batch in tqdm(dataloader):
self.optimizer.zero_grad()
batch = Trainer._move_dict_items_to_device(batch, self.model.device)
outputs = self.model(**batch)
loss = outputs["loss"]
loss.backward()
wandb.log({"train_loss": loss.detach().cpu().numpy()})
optimizer.step()
def load_checkpoint(self, folder: str) -> int:
checkpoint_path = Path(folder) / Trainer.checkpoint_last_name
if not os.path.exists(checkpoint_path):
warnings.warn(
"No checkpoints found. Start epoch 0 with given model and optimizer."
)
return -1
checkpoint = torch.load(checkpoint_path)
self.model.load_state_dict(checkpoint[Trainer.checkpoint_field_model])
self.optimizer.load_state_dict(checkpoint[Trainer.checkpoint_field_optimizer])
return checkpoint[Trainer.checkpoint_field_epoch]
def save_checkpoint(self, folder: str, epoch: int) -> None:
checkpoint_name = Path(folder) / Trainer.checkpoint_last_name
torch.save(
{
Trainer.checkpoint_field_model: self.model.state_dict(),
Trainer.checkpoint_field_optimizer: self.optimizer.state_dict(),
Trainer.checkpoint_field_epoch: epoch,
},
checkpoint_name,
)
# # 3. Experiment
checkpoints_folder = "/artifacts/init_exp"
config = TrainConfig(
model="microsoft/deberta-v3-large",
checkpoints_folder=checkpoints_folder,
batch_size=8,
epochs=1,
max_length=64,
lr=6e-6,
device=str(device),
)
tokenizer = build_tokenizer(config.model)
train_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(train_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=True,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
val_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(val_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
test_loader = torch.utils.data.DataLoader(
PairedSentenceDataset(test_df, tokenizer, config.max_length),
batch_size=config.batch_size,
shuffle=False,
num_workers=num_workers,
worker_init_fn=seed_dataloader_worker,
generator=g,
)
model = transformers.DebertaV2ForSequenceClassification.from_pretrained(
config.model, num_labels=2
).to(device)
optimizer = torch.optim.Adam(model.parameters(), lr=config.lr)
trainer = Trainer(model, optimizer)
trainer.train(train_loader, val_loader, config, wandb_project="nlp_project_2023")
| false | 0 | 3,062 | 0 | 3,062 | 3,062 |
||
129887385
|
# # Predicting the rating of a movie using machine learning algorithm.
# In this notebook, we're going to go through an example machine learning project with the goal of predicting the rating of a movie.
# ## 1.Problem definition
# Movie rating prediction based on review using machine learning & deep learning model
# ## 2.Data
# This data set is downloaded from IMDb movie reviews dataset.
# Link: https://ieee-dataport.org/open-access/imdb-movie-reviews-dataset
# ### Primary Target:
# Predict rating of one movie’s rating based on data.
# ### Further Target:
# Predict rating of different movies.
# ## 3.Steps
# Step1: download the data and there will be a lot of movie’s csv file where we need to extract review and rating.
# Step2: after extracting feature we need to apply world embedding process to create train and test data.
# Word embedding process:
# * 1.https://www.turing.com/kb/guide-on-word-embeddings-in-nlp
# * 2.https://www.geeksforgeeks.org/word-embeddings-in-nlp/
# * 3.https://towardsdatascience.com/introduction-to-word-embedding-and-word2vec-652d0c2060fa
# * 4.https://machinelearningmastery.com/what-are-word-embeddings/
# ## 4.Modeling
# Design models using machine learning algorithms:
# 1. Use ML algorithms like SVM
# 2. Use RNN model like LSTM
# ## 5. Evaluating
# ## 6. Improving
# Imporving machine learning model using:
# * `Grid Search CV`
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Preparing the tools:
# We're going to use `pandas` `numpy` `matplotlib` for data manipulation and analysis.
# Import all the tools we need
# Regular EDA(exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load Data
# Load the data from directory
df = pd.read_csv("/kaggle/input/dataset/3 Idiots 2009.csv")
df.head(3)
test = df.review[0]
test
# ## Cleaning up the text
# using python `replace()` function to replce the unnecessary things.
# A function to clean a paragraph.
def clean_text(test):
"""
Clean's up the unnecessary thiings of a text by replacing them with proper symbols.
"""
test = test.replace("<br/>", "")
test = test.replace("--", " ")
test = test.replace("'", "")
test = test.replace('"', "")
return test
test_li = df.review
new_list = test_li.apply(clean_text)
new_list[1]
df.head(2)
df.review = new_list
df.review[1]
df.shape
df.drop(["date", "title", "username"], axis=1, inplace=True)
df.head(2)
df.rating.value_counts().plot(kind="bar", cmap="winter", title="Rating count")
df.isna().sum()
df.describe()
df.rating.value_counts()
df.dtypes
# ## Now change the text to vector using `spaCy`
df.review[1]
import spacy
np.random.seed(42)
nlp = spacy.load("en_core_web_sm")
doc = nlp("My girlfriend is Dr. Tareen. She loves to eat burger.")
for sentence in doc.sents:
print(sentence)
for sentence in doc.sents:
for word in sentence:
print(word, end="\n")
# ### Write a function to convert the paragraph to sentence.
def to_sentence(para):
"""
Converts the given paragraph to sentences.
"""
doc = nlp(para)
for sentence in doc.sents:
print(sentence)
to_sentence(df.review[1])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/887/129887385.ipynb
| null | null |
[{"Id": 129887385, "ScriptId": 38481776, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14254243, "CreationDate": "05/17/2023 08:06:02", "VersionNumber": 1.0, "Title": "first_notebook", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 173.0, "LinesInsertedFromPrevious": 173.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
# # Predicting the rating of a movie using machine learning algorithm.
# In this notebook, we're going to go through an example machine learning project with the goal of predicting the rating of a movie.
# ## 1.Problem definition
# Movie rating prediction based on review using machine learning & deep learning model
# ## 2.Data
# This data set is downloaded from IMDb movie reviews dataset.
# Link: https://ieee-dataport.org/open-access/imdb-movie-reviews-dataset
# ### Primary Target:
# Predict rating of one movie’s rating based on data.
# ### Further Target:
# Predict rating of different movies.
# ## 3.Steps
# Step1: download the data and there will be a lot of movie’s csv file where we need to extract review and rating.
# Step2: after extracting feature we need to apply world embedding process to create train and test data.
# Word embedding process:
# * 1.https://www.turing.com/kb/guide-on-word-embeddings-in-nlp
# * 2.https://www.geeksforgeeks.org/word-embeddings-in-nlp/
# * 3.https://towardsdatascience.com/introduction-to-word-embedding-and-word2vec-652d0c2060fa
# * 4.https://machinelearningmastery.com/what-are-word-embeddings/
# ## 4.Modeling
# Design models using machine learning algorithms:
# 1. Use ML algorithms like SVM
# 2. Use RNN model like LSTM
# ## 5. Evaluating
# ## 6. Improving
# Imporving machine learning model using:
# * `Grid Search CV`
#
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# ### Preparing the tools:
# We're going to use `pandas` `numpy` `matplotlib` for data manipulation and analysis.
# Import all the tools we need
# Regular EDA(exploratory data analysis) and plotting libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# ## Load Data
# Load the data from directory
df = pd.read_csv("/kaggle/input/dataset/3 Idiots 2009.csv")
df.head(3)
test = df.review[0]
test
# ## Cleaning up the text
# using python `replace()` function to replce the unnecessary things.
# A function to clean a paragraph.
def clean_text(test):
"""
Clean's up the unnecessary thiings of a text by replacing them with proper symbols.
"""
test = test.replace("<br/>", "")
test = test.replace("--", " ")
test = test.replace("'", "")
test = test.replace('"', "")
return test
test_li = df.review
new_list = test_li.apply(clean_text)
new_list[1]
df.head(2)
df.review = new_list
df.review[1]
df.shape
df.drop(["date", "title", "username"], axis=1, inplace=True)
df.head(2)
df.rating.value_counts().plot(kind="bar", cmap="winter", title="Rating count")
df.isna().sum()
df.describe()
df.rating.value_counts()
df.dtypes
# ## Now change the text to vector using `spaCy`
df.review[1]
import spacy
np.random.seed(42)
nlp = spacy.load("en_core_web_sm")
doc = nlp("My girlfriend is Dr. Tareen. She loves to eat burger.")
for sentence in doc.sents:
print(sentence)
for sentence in doc.sents:
for word in sentence:
print(word, end="\n")
# ### Write a function to convert the paragraph to sentence.
def to_sentence(para):
"""
Converts the given paragraph to sentences.
"""
doc = nlp(para)
for sentence in doc.sents:
print(sentence)
to_sentence(df.review[1])
| false | 0 | 1,141 | 2 | 1,141 | 1,141 |
||
129913271
|
# # Linear SVM - Email Spam Classifier
# - We will build a linear svm classifier to classify emails into spam and ham.
# - Support Vector Machines (SVM) and Logistic Regression are both popular machine learning models that can be used for classification tasks.
# - SVM and logistic regression are both linear models, but SVM can be used in cases where the data is not linearly separable by mapping the data into a higher-dimensional space using kernel functions. SVM tries to find the hyperplane that maximizes the margin between the two classes, which can lead to better classification performance in some cases.
# - Logistic regression, on the other hand, models the probability of the binary outcome directly, by fitting a logistic function to the data. Logistic regression assumes a linear relationship between the predictors and the logit of the probability of the binary outcome.
# - In some cases, SVM can perform better than logistic regression when the data is not linearly separable, or when there are outliers in the data. However, in some cases, logistic regression may perform better when the data is linearly separable and the relationship between the predictors and the outcome is linear.
# - In general, the choice between SVM and logistic regression depends on the specific characteristics of the data and the problem at hand. It is recommended to try both models and compare their performance on a validation set or through cross-validation to determine which model is the best fit for the particular problem.
# # Data Understanding
# - Let's first load the data and understand the attributes meanings, shape of the dataset etc.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# load the dataset
pd.set_option("display.max_columns", None)
email = pd.read_csv("/Users/sakshimunde/Downloads/Spam.csv")
email.head()
# Let's see the dimension of the data frame
email.shape
# Let's look at the summary
email.info()
# Let's look at null values
email.isnull().sum()
# let's look at spam emails
email.spam.describe()
# - We can see that the dataset is balanced, as it has no null or missing values.The dataset has 58 columns of feature to classify, and there are approximately 40% of spam emails.
# ## Data Preparation
email.describe()
# #### Splitting data into X and y
X = email.drop("spam", axis=1)
y = email.spam.values.astype(int)
# scaling the features : x - mean(x)/std(x)
from sklearn.preprocessing import scale
X = scale(X)
# ### Splitting data into train test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
# confirm that splitting has similar distribution of spam and ham
print(y_train.mean())
print(y_test.mean())
# ## Model Building
# - Let's build a linear SVM model. The SVC() class does that in sklearn.
# - Maximal margin classifier has certain limitations. For instance, it will not find a separator if the classes are not linearly separable.
# - The soft margin classifier overcomes the drawbacks of the maximal margin classifier by allowing certain points to be misclassified. You can control the number of misclassifications using the cost of misclassification C, where C is the maximum value of the summation of the slack variable epsilon(ϵ), i.e., ∑ϵi ≤ C.
# ---
# - If the value of C is high, a higher number of points will be allowed to misclassify or violate the margin. In this case, the model is flexible, more generalisable and less likely to overfit. In other words, it has a high bias.
# - On the other hand, if the value of C is low, a lesser number of points will be allowed to misclassify or violate the margin. In this case, the model is less flexible, less generalisable and more likely to overfit. In other words, it has a high variance.
# - So, C represents the ‘liberty of misclassification’ that you provide to the model.
#
# - Note:
# - The C defined above and the hyperparameter C used in the SVC() function in Python are the inverse of each other. In SVC(), C represents the ‘penalty for misclassification’.
# - Penalty of misclassification and liberty of misclassification are related to the cost of making classification errors in a machine learning model.
# - The penalty of misclassification refers to the cost or "penalty" associated with misclassifying a data point. For example, in a medical diagnosis task, misclassifying a patient with a disease as healthy could result in serious health consequences for the patient. The penalty of such a misclassification is therefore high.
# - In contrast, the liberty of misclassification refers to the degree of tolerance that is allowed for classification errors. In some tasks, such as spam filtering, it may be acceptable to misclassify some legitimate emails as spam in order to avoid missing any actual spam emails. This means that the liberty of misclassification is high in such tasks.
# - In machine learning, the choice of penalty of misclassification and liberty of misclassification can be controlled through hyperparameters of the model. For example, in a support vector machine (SVM), the penalty of misclassification is controlled by the parameter C, which determines the tradeoff between maximizing the margin between classes and minimizing the misclassification of training samples. Similarly, in decision trees, the liberty of misclassification can be controlled through the choice of the decision threshold.
# - Choosing the appropriate penalty of misclassification and liberty of misclassification depends on the specific task and the consequences of classification errors. In some cases, a high penalty of misclassification and low liberty of misclassification may be necessary to minimize the risk of errors, while in other cases, a more relaxed approach may be acceptable.
from sklearn.svm import SVC
# instantiate an object of class SVC()
# we will use c=1
model = SVC(C=1)
# fit the model
model.fit(X_train, y_train)
# predict
y_pred = model.predict(X_train)
# evaluate the model using confusion matrix
from sklearn import metrics
metrics.confusion_matrix(y_train, y_pred)
print("Accuracy", metrics.accuracy_score(y_train, y_pred))
print("Precision : ", metrics.precision_score(y_train, y_pred))
print("Recall : ", metrics.recall_score(y_train, y_pred))
# specificity (% of hams correctly classified)
1919 / (1919 + 58)
# - The SVM we have built gives descent results with an accuracy of 94% ,sensitivity/recall of 90% and specificity of 97%.
# - 94% of all emails are correctly classified.
# - 90% of spams are identified correctly.
# - 97% of hams are identified correctly.
# # Hyperparameter Tuning
# ### K- fold cross validation
# - K-fold cross-validation is a method of evaluating the performance of a machine learning model. It involves splitting the data into K equal subsets, or folds, then using K-1 folds as the training data and the remaining 1 fold as the validation data. This process is repeated K times, with each fold used once as the validation data.
# - The main advantage of K-fold cross-validation is that it provides a more reliable estimate of the model's performance than a simple train-test split, especially when the amount of data is limited. It also allows for a more thorough evaluation of the model's ability to generalize to new data, as all data points are used for both training and validation.
# - After performing K-fold cross-validation, one can average the performance metrics (e.g. accuracy, precision, recall) across the K folds to get an estimate of the model's overall performance. This can be used to compare the performance of different models or to tune the hyperparameters of the model.
# - We have used the default value of C=1 for the SVM model.
# - `We can evaluate the performance of the model using cross-validation and assess whether the model is underfitting or overfitting`.
# - If the model is underfitting, we may need to decrease the value of C to increase the flexibility of the model. Conversely, if the model is overfitting, we may need to increase the value of C to reduce the complexity of the model.
# - Overfitting: Good performance on the training data, poor generliazation to other data.
# - Underfitting: Poor performance on the training data and poor generalization to other data
#
from sklearn.model_selection import validation_curve
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
# Creating an object of KFold with 5 splits
# help(KFold)
folds = KFold(n_splits=5, shuffle=True, random_state=4)
folds
# instantiating model with cost =1
model = SVC(C=1)
# Computing the cross validation score
# Note that the argument cv takes the 'folds' object and we have specified 'accuracy' as the metric
# cv: cross validation
cv = cross_val_score(model, X_train, y_train, cv=folds, scoring="accuracy")
cv
# average of accuracy
cv.mean()
# # Grid search to find optimal Hyperparameter C
# - esyimator : Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed.
#
# specify range of parameters (C) as a list
params = {"C": [0.01, 1, 10, 100, 1000]}
model = SVC()
# set up grid search scheme
model_cv = GridSearchCV(
estimator=model,
param_grid=params,
scoring="accuracy",
cv=folds,
verbose=1,
return_train_score=True,
)
model_cv
# fit the model - it will fit 5 folds across all values of c
model_cv.fit(X_train, y_train)
model_cv.best_score_
model_cv.best_params_
# - The highest test accuracy is 93% at C = 10.
#
# Results of grid search CV
cv = pd.DataFrame(model_cv.cv_results_)
cv
#
# - To get a better sense of how training and test accuracy varies with C, let's plot the tranining and test accuracies against C.
# plot C versus train-test scores
plt.plot(cv["param_C"], cv["mean_test_score"])
plt.plot(cv["param_C"], cv["mean_train_score"])
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.xscale("log")
# - Let's now look at the metrics corresponding to C = 10
model_cv.best_params_
# Building a model with best value of C i.e, optimal C = 10
model = SVC(C=10) # or SVC(C = best_params_)
# fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
y_pred
# metrics : print accuracy , precision and recall
print("accuracy_score :", metrics.accuracy_score(y_train, y_pred))
print("precision_score :", metrics.precision_score(y_train, y_pred))
print("recall_score : ", metrics.recall_score(y_train, y_pred))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/913/129913271.ipynb
| null | null |
[{"Id": 129913271, "ScriptId": 38643936, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12439749, "CreationDate": "05/17/2023 11:46:22", "VersionNumber": 1.0, "Title": "Linear SVM - Email Spam Classifier", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 223.0, "LinesInsertedFromPrevious": 223.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # Linear SVM - Email Spam Classifier
# - We will build a linear svm classifier to classify emails into spam and ham.
# - Support Vector Machines (SVM) and Logistic Regression are both popular machine learning models that can be used for classification tasks.
# - SVM and logistic regression are both linear models, but SVM can be used in cases where the data is not linearly separable by mapping the data into a higher-dimensional space using kernel functions. SVM tries to find the hyperplane that maximizes the margin between the two classes, which can lead to better classification performance in some cases.
# - Logistic regression, on the other hand, models the probability of the binary outcome directly, by fitting a logistic function to the data. Logistic regression assumes a linear relationship between the predictors and the logit of the probability of the binary outcome.
# - In some cases, SVM can perform better than logistic regression when the data is not linearly separable, or when there are outliers in the data. However, in some cases, logistic regression may perform better when the data is linearly separable and the relationship between the predictors and the outcome is linear.
# - In general, the choice between SVM and logistic regression depends on the specific characteristics of the data and the problem at hand. It is recommended to try both models and compare their performance on a validation set or through cross-validation to determine which model is the best fit for the particular problem.
# # Data Understanding
# - Let's first load the data and understand the attributes meanings, shape of the dataset etc.
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
# load the dataset
pd.set_option("display.max_columns", None)
email = pd.read_csv("/Users/sakshimunde/Downloads/Spam.csv")
email.head()
# Let's see the dimension of the data frame
email.shape
# Let's look at the summary
email.info()
# Let's look at null values
email.isnull().sum()
# let's look at spam emails
email.spam.describe()
# - We can see that the dataset is balanced, as it has no null or missing values.The dataset has 58 columns of feature to classify, and there are approximately 40% of spam emails.
# ## Data Preparation
email.describe()
# #### Splitting data into X and y
X = email.drop("spam", axis=1)
y = email.spam.values.astype(int)
# scaling the features : x - mean(x)/std(x)
from sklearn.preprocessing import scale
X = scale(X)
# ### Splitting data into train test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
# confirm that splitting has similar distribution of spam and ham
print(y_train.mean())
print(y_test.mean())
# ## Model Building
# - Let's build a linear SVM model. The SVC() class does that in sklearn.
# - Maximal margin classifier has certain limitations. For instance, it will not find a separator if the classes are not linearly separable.
# - The soft margin classifier overcomes the drawbacks of the maximal margin classifier by allowing certain points to be misclassified. You can control the number of misclassifications using the cost of misclassification C, where C is the maximum value of the summation of the slack variable epsilon(ϵ), i.e., ∑ϵi ≤ C.
# ---
# - If the value of C is high, a higher number of points will be allowed to misclassify or violate the margin. In this case, the model is flexible, more generalisable and less likely to overfit. In other words, it has a high bias.
# - On the other hand, if the value of C is low, a lesser number of points will be allowed to misclassify or violate the margin. In this case, the model is less flexible, less generalisable and more likely to overfit. In other words, it has a high variance.
# - So, C represents the ‘liberty of misclassification’ that you provide to the model.
#
# - Note:
# - The C defined above and the hyperparameter C used in the SVC() function in Python are the inverse of each other. In SVC(), C represents the ‘penalty for misclassification’.
# - Penalty of misclassification and liberty of misclassification are related to the cost of making classification errors in a machine learning model.
# - The penalty of misclassification refers to the cost or "penalty" associated with misclassifying a data point. For example, in a medical diagnosis task, misclassifying a patient with a disease as healthy could result in serious health consequences for the patient. The penalty of such a misclassification is therefore high.
# - In contrast, the liberty of misclassification refers to the degree of tolerance that is allowed for classification errors. In some tasks, such as spam filtering, it may be acceptable to misclassify some legitimate emails as spam in order to avoid missing any actual spam emails. This means that the liberty of misclassification is high in such tasks.
# - In machine learning, the choice of penalty of misclassification and liberty of misclassification can be controlled through hyperparameters of the model. For example, in a support vector machine (SVM), the penalty of misclassification is controlled by the parameter C, which determines the tradeoff between maximizing the margin between classes and minimizing the misclassification of training samples. Similarly, in decision trees, the liberty of misclassification can be controlled through the choice of the decision threshold.
# - Choosing the appropriate penalty of misclassification and liberty of misclassification depends on the specific task and the consequences of classification errors. In some cases, a high penalty of misclassification and low liberty of misclassification may be necessary to minimize the risk of errors, while in other cases, a more relaxed approach may be acceptable.
from sklearn.svm import SVC
# instantiate an object of class SVC()
# we will use c=1
model = SVC(C=1)
# fit the model
model.fit(X_train, y_train)
# predict
y_pred = model.predict(X_train)
# evaluate the model using confusion matrix
from sklearn import metrics
metrics.confusion_matrix(y_train, y_pred)
print("Accuracy", metrics.accuracy_score(y_train, y_pred))
print("Precision : ", metrics.precision_score(y_train, y_pred))
print("Recall : ", metrics.recall_score(y_train, y_pred))
# specificity (% of hams correctly classified)
1919 / (1919 + 58)
# - The SVM we have built gives descent results with an accuracy of 94% ,sensitivity/recall of 90% and specificity of 97%.
# - 94% of all emails are correctly classified.
# - 90% of spams are identified correctly.
# - 97% of hams are identified correctly.
# # Hyperparameter Tuning
# ### K- fold cross validation
# - K-fold cross-validation is a method of evaluating the performance of a machine learning model. It involves splitting the data into K equal subsets, or folds, then using K-1 folds as the training data and the remaining 1 fold as the validation data. This process is repeated K times, with each fold used once as the validation data.
# - The main advantage of K-fold cross-validation is that it provides a more reliable estimate of the model's performance than a simple train-test split, especially when the amount of data is limited. It also allows for a more thorough evaluation of the model's ability to generalize to new data, as all data points are used for both training and validation.
# - After performing K-fold cross-validation, one can average the performance metrics (e.g. accuracy, precision, recall) across the K folds to get an estimate of the model's overall performance. This can be used to compare the performance of different models or to tune the hyperparameters of the model.
# - We have used the default value of C=1 for the SVM model.
# - `We can evaluate the performance of the model using cross-validation and assess whether the model is underfitting or overfitting`.
# - If the model is underfitting, we may need to decrease the value of C to increase the flexibility of the model. Conversely, if the model is overfitting, we may need to increase the value of C to reduce the complexity of the model.
# - Overfitting: Good performance on the training data, poor generliazation to other data.
# - Underfitting: Poor performance on the training data and poor generalization to other data
#
from sklearn.model_selection import validation_curve
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
# Creating an object of KFold with 5 splits
# help(KFold)
folds = KFold(n_splits=5, shuffle=True, random_state=4)
folds
# instantiating model with cost =1
model = SVC(C=1)
# Computing the cross validation score
# Note that the argument cv takes the 'folds' object and we have specified 'accuracy' as the metric
# cv: cross validation
cv = cross_val_score(model, X_train, y_train, cv=folds, scoring="accuracy")
cv
# average of accuracy
cv.mean()
# # Grid search to find optimal Hyperparameter C
# - esyimator : Either estimator needs to provide a ``score`` function, or ``scoring`` must be passed.
#
# specify range of parameters (C) as a list
params = {"C": [0.01, 1, 10, 100, 1000]}
model = SVC()
# set up grid search scheme
model_cv = GridSearchCV(
estimator=model,
param_grid=params,
scoring="accuracy",
cv=folds,
verbose=1,
return_train_score=True,
)
model_cv
# fit the model - it will fit 5 folds across all values of c
model_cv.fit(X_train, y_train)
model_cv.best_score_
model_cv.best_params_
# - The highest test accuracy is 93% at C = 10.
#
# Results of grid search CV
cv = pd.DataFrame(model_cv.cv_results_)
cv
#
# - To get a better sense of how training and test accuracy varies with C, let's plot the tranining and test accuracies against C.
# plot C versus train-test scores
plt.plot(cv["param_C"], cv["mean_test_score"])
plt.plot(cv["param_C"], cv["mean_train_score"])
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.xscale("log")
# - Let's now look at the metrics corresponding to C = 10
model_cv.best_params_
# Building a model with best value of C i.e, optimal C = 10
model = SVC(C=10) # or SVC(C = best_params_)
# fit the model
model.fit(X_train, y_train)
y_pred = model.predict(X_train)
y_pred
# metrics : print accuracy , precision and recall
print("accuracy_score :", metrics.accuracy_score(y_train, y_pred))
print("precision_score :", metrics.precision_score(y_train, y_pred))
print("recall_score : ", metrics.recall_score(y_train, y_pred))
| false | 0 | 2,770 | 1 | 2,770 | 2,770 |
||
129913105
|
# # Non Linear SVM Email Spam Classifier
# - We'll build a non-linear SVM classifier to classify emails and compare the performance with the linear SVM model.
#
import pandas as pd, numpy as np
import matplotlib.pyplot as plt, seaborn as sns
# supress warnings
import warnings
warnings.filterwarnings("ignore")
# ### Data understanding
pd.set_option("display.max_columns", None)
email = pd.read_csv("/Users/sakshimunde/Downloads/Spam.csv")
email.head()
email.info()
# let's check for null values
email.isnull().sum()
# - There are no null values.
# ## Data Preparation
email.describe()
# - There are around 40% of spam emails.
#
# splitting data into x and y
X = email.drop("spam", axis=1)
y = email.spam.values.astype(int)
y
# ### Splitting data into train test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ## Model building
# - In SVC()
# - If the value of C is low, higher number of points will be allowed to misclassify or violate the margin.
# - On the other hand, if the value of C is high, a lesser number of points will be allowed to misclassify or violate the margin.
from sklearn.svm import SVC
# Build model using rbf kernel and c=1
model = SVC(C=1, kernel="rbf")
# fit the model
model.fit(X_train, y_train)
# predict the model
y_pred = model.predict(X_train)
y_pred
# ## Model evaluation
# - In nonlinear kernels like the RBF, you use the parameter ‘gamma’ to control the amount of nonlinearity in the model. Higher values of ‘gamma’ result in more nonlinearity, while lower values of ‘gamma’ result in less nonlinearity. It is also denoted as sigma in some texts and packages.
# - The plot below shows three RBF kernels with different values of ‘gamma’.
#
from sklearn import metrics
# confusion matrix
metrics.confusion_matrix(y_train, y_pred)
# - 1763 emails are correctly predicted as 'ham'. 537 emails are correctly predicted as spam.
# accuracy
print(metrics.accuracy_score(y_train, y_pred))
# precision
print(metrics.precision_score(y_train, y_pred))
# recall
print(metrics.recall_score(y_train, y_pred))
# ## Hyperparameter Tuning
# - Now we have multiple parameters to optimise
# - The choice of kernel(linear, rbf, polynomial etc)
# - C
# - gamma
# - We will use the `GridSearchCV()` to tune the hyperparameters
# #### Kfolds cross validation
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
folds = KFold(n_splits=5, shuffle=True, random_state=4)
# #### Grid search to find optimal hyperparamter C
# Specify range of hyperparameters
params = [{"gamma": [1e-2, 1e-3, 1e-4], "C": [1, 10, 100, 1000]}]
# build model
model = SVC(kernel="rbf")
# setup grid search scheme
model_cv = GridSearchCV(
estimator=model,
param_grid=params,
scoring="accuracy",
cv=folds,
verbose=1,
return_train_score=True,
)
model_cv
# fit the model
model_cv.fit(X_train, y_train)
# CV results
cv = pd.DataFrame(model_cv.cv_results_)
cv.head()
cv.info()
# optimal accuracy score of the model
print(model_cv.best_score_)
# optimal hyperparameters
print(model_cv.best_params_)
# - The best score is 91% coressponding to hyperparameters C =1000 and gamma = 0.0001.
# - To achieve high accuracy there is a trade off between:
# - High gamma(i.e., high non-linearity) and average value of C.
# - Low gamma (i.e., less non-linearity) and high value of C.
# Plotting C versus train-test score with different gamma values
plt.figure(figsize=[15, 6])
cv["param_C"] = cv["param_C"].astype(int)
plt.subplot(1, 3, 1)
gamma_01 = cv[cv["param_gamma"] == 0.01]
plt.plot(gamma_01["param_C"], gamma_01["mean_test_score"])
plt.plot(gamma_01["param_C"], gamma_01["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.01")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.subplot(1, 3, 2)
gamma_001 = cv[cv["param_gamma"] == 0.001]
plt.plot(gamma_001["param_C"], gamma_001["mean_test_score"])
plt.plot(gamma_001["param_C"], gamma_001["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.001")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.subplot(1, 3, 3)
gamma_0001 = cv[cv["param_gamma"] == 0.0001]
plt.plot(gamma_0001["param_C"], gamma_0001["mean_test_score"])
plt.plot(gamma_0001["param_C"], gamma_0001["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.0001")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.show()
plt.tight_layout()
# ## Building and Evaluating the final model
# lets build and evaluate final model with optimal hyperparameters
# C=100, gamma= 0.0001 and kernel = 'rbf'
model = SVC(C=100, gamma=0.0001, kernel="rbf")
# fit the model
model.fit(X_train, y_train)
# predict
y_pred = model.predict(X_train)
# metrics
print("Confusion matrix", metrics.confusion_matrix(y_train, y_pred))
print("Accuracy Score", metrics.accuracy_score(y_train, y_pred))
print("Precision score", metrics.precision_score(y_train, y_pred))
print("Recall score", metrics.recall_score(y_train, y_pred))
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/913/129913105.ipynb
| null | null |
[{"Id": 129913105, "ScriptId": 38643890, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 12439749, "CreationDate": "05/17/2023 11:44:49", "VersionNumber": 1.0, "Title": "Non Linear SVM Email Spam Classifier", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 201.0, "LinesInsertedFromPrevious": 201.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
# # Non Linear SVM Email Spam Classifier
# - We'll build a non-linear SVM classifier to classify emails and compare the performance with the linear SVM model.
#
import pandas as pd, numpy as np
import matplotlib.pyplot as plt, seaborn as sns
# supress warnings
import warnings
warnings.filterwarnings("ignore")
# ### Data understanding
pd.set_option("display.max_columns", None)
email = pd.read_csv("/Users/sakshimunde/Downloads/Spam.csv")
email.head()
email.info()
# let's check for null values
email.isnull().sum()
# - There are no null values.
# ## Data Preparation
email.describe()
# - There are around 40% of spam emails.
#
# splitting data into x and y
X = email.drop("spam", axis=1)
y = email.spam.values.astype(int)
y
# ### Splitting data into train test split
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, train_size=0.7, test_size=0.3, random_state=100
)
print(X_train.shape)
print(y_train.shape)
print(X_test.shape)
print(y_test.shape)
# ## Model building
# - In SVC()
# - If the value of C is low, higher number of points will be allowed to misclassify or violate the margin.
# - On the other hand, if the value of C is high, a lesser number of points will be allowed to misclassify or violate the margin.
from sklearn.svm import SVC
# Build model using rbf kernel and c=1
model = SVC(C=1, kernel="rbf")
# fit the model
model.fit(X_train, y_train)
# predict the model
y_pred = model.predict(X_train)
y_pred
# ## Model evaluation
# - In nonlinear kernels like the RBF, you use the parameter ‘gamma’ to control the amount of nonlinearity in the model. Higher values of ‘gamma’ result in more nonlinearity, while lower values of ‘gamma’ result in less nonlinearity. It is also denoted as sigma in some texts and packages.
# - The plot below shows three RBF kernels with different values of ‘gamma’.
#
from sklearn import metrics
# confusion matrix
metrics.confusion_matrix(y_train, y_pred)
# - 1763 emails are correctly predicted as 'ham'. 537 emails are correctly predicted as spam.
# accuracy
print(metrics.accuracy_score(y_train, y_pred))
# precision
print(metrics.precision_score(y_train, y_pred))
# recall
print(metrics.recall_score(y_train, y_pred))
# ## Hyperparameter Tuning
# - Now we have multiple parameters to optimise
# - The choice of kernel(linear, rbf, polynomial etc)
# - C
# - gamma
# - We will use the `GridSearchCV()` to tune the hyperparameters
# #### Kfolds cross validation
from sklearn.model_selection import KFold
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import GridSearchCV
folds = KFold(n_splits=5, shuffle=True, random_state=4)
# #### Grid search to find optimal hyperparamter C
# Specify range of hyperparameters
params = [{"gamma": [1e-2, 1e-3, 1e-4], "C": [1, 10, 100, 1000]}]
# build model
model = SVC(kernel="rbf")
# setup grid search scheme
model_cv = GridSearchCV(
estimator=model,
param_grid=params,
scoring="accuracy",
cv=folds,
verbose=1,
return_train_score=True,
)
model_cv
# fit the model
model_cv.fit(X_train, y_train)
# CV results
cv = pd.DataFrame(model_cv.cv_results_)
cv.head()
cv.info()
# optimal accuracy score of the model
print(model_cv.best_score_)
# optimal hyperparameters
print(model_cv.best_params_)
# - The best score is 91% coressponding to hyperparameters C =1000 and gamma = 0.0001.
# - To achieve high accuracy there is a trade off between:
# - High gamma(i.e., high non-linearity) and average value of C.
# - Low gamma (i.e., less non-linearity) and high value of C.
# Plotting C versus train-test score with different gamma values
plt.figure(figsize=[15, 6])
cv["param_C"] = cv["param_C"].astype(int)
plt.subplot(1, 3, 1)
gamma_01 = cv[cv["param_gamma"] == 0.01]
plt.plot(gamma_01["param_C"], gamma_01["mean_test_score"])
plt.plot(gamma_01["param_C"], gamma_01["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.01")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.subplot(1, 3, 2)
gamma_001 = cv[cv["param_gamma"] == 0.001]
plt.plot(gamma_001["param_C"], gamma_001["mean_test_score"])
plt.plot(gamma_001["param_C"], gamma_001["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.001")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.subplot(1, 3, 3)
gamma_0001 = cv[cv["param_gamma"] == 0.0001]
plt.plot(gamma_0001["param_C"], gamma_0001["mean_test_score"])
plt.plot(gamma_0001["param_C"], gamma_0001["mean_train_score"])
plt.xscale("log")
plt.xlabel("C")
plt.ylabel("Accuracy")
plt.title("Gamma = 0.0001")
plt.ylim([0.80, 1])
plt.legend(["test accuracy", "train accuracy"], loc="upper left")
plt.show()
plt.tight_layout()
# ## Building and Evaluating the final model
# lets build and evaluate final model with optimal hyperparameters
# C=100, gamma= 0.0001 and kernel = 'rbf'
model = SVC(C=100, gamma=0.0001, kernel="rbf")
# fit the model
model.fit(X_train, y_train)
# predict
y_pred = model.predict(X_train)
# metrics
print("Confusion matrix", metrics.confusion_matrix(y_train, y_pred))
print("Accuracy Score", metrics.accuracy_score(y_train, y_pred))
print("Precision score", metrics.precision_score(y_train, y_pred))
print("Recall score", metrics.recall_score(y_train, y_pred))
| false | 0 | 1,852 | 1 | 1,852 | 1,852 |
||
129197802
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
bully = pd.read_csv("/kaggle/input/bullying/Bullying.csv")
bully
bully
bully.drop("record", axis=1, inplace=True)
bully
bully["Custom_Age"].unique()
bully.info()
bully["Custom_Age"] = bully["Custom_Age"].str.extract("(\d+)").astype(float)
bully["Custom_Age"].replace(" ", 14, inplace=True)
bully["Custom_Age"].fillna(14, inplace=True)
bully
bully["Custom_Age"].unique()
bully["OnSchool_Bullying_12mo"].unique()
bully["OnSchool_Bullying_12mo"].replace(" ", "Yes", inplace=True)
bully["OnSchool_Bullying_12mo"].unique()
bully["OffSchool_Bullying_12mo"].unique()
bully["OffSchool_Bullying_12mo"].replace(" ", "Yes", inplace=True)
bully["OffSchool_Bullying_12mo"].unique()
bully["Cyberbullying_12mo"].unique()
bully["Cyberbullying_12mo"].replace(" ", "Yes", inplace=True)
bully["Cyberbullying_12mo"].unique()
bully["Sex"].unique()
bully["Sex"].value_counts()
bully["Sex"].replace(" ", "Male", inplace=True)
bully["Sex"].unique()
bully["Physically_attacked"].unique()
bully["Physically_attacked"].value_counts()
bully["Physically_attacked"].replace(1, "1 time", inplace=True)
bully["Physically_attacked"].value_counts()
bully["Physically_attacked"] = (
bully["Physically_attacked"].str.extract("^(\d+)").astype(int)
)
bully["Physically_attacked"].unique()
bully["Physical_fighting"].unique()
bully["Physical_fighting"].value_counts()
bully["Physical_fighting"].replace(" ", "0 times", inplace=True)
bully["Physical_fighting"].value_counts()
bully["Physical_fighting"] = (
bully["Physical_fighting"].str.extract("^(\d+)").astype(int)
)
bully["Physical_fighting"].unique()
bully["Felt_lonely"].unique()
bully["Felt_lonely"].value_counts()
bully["Felt_lonely"].replace(" ", "Never", inplace=True)
bully["Felt_lonely"].value_counts()
bully["Close_friends"].value_counts()
bully["Close_friends"].replace(" ", "3 or more", inplace=True)
bully["Close_friends"] = bully["Close_friends"].str.extract("^(\d+)").astype(int)
bully["Close_friends"].unique()
bully["Days_Unexcused_Absence"].value_counts()
bully["Days_Unexcused_Absence"].replace(" ", "0 days", inplace=True)
bully["Days_Unexcused_Absence"] = (
bully["Days_Unexcused_Absence"].str.extract("^(\d+)").astype(int)
)
bully["Days_Unexcused_Absence"].unique()
bully["Supportive_Classmates"].value_counts()
bully["Supportive_Classmates"].replace(" ", "Sometimes", inplace=True)
bully["Supportive_Classmates"].unique()
bully["Supportive_Parents"].value_counts()
bully["Supportive_Parents"].replace(" ", "Always", inplace=True)
bully["Supportive_Parents"].unique()
bully["Persistent_Loneliness"].value_counts()
bully["Persistent_Loneliness"].replace(" ", "No", inplace=True)
bully["Persistent_Loneliness"].unique()
bully["Unexcused_Absence"].value_counts()
bully["Unexcused_Absence"].replace(" ", "No", inplace=True)
bully["Unexcused_Absence"].unique()
bully["Underweight"].value_counts()
bully["Underweight"].replace(" ", "Unknown", inplace=True)
bully["Underweight"].unique()
bully["Overweight"].value_counts()
bully["Overweight"].replace(" ", "Unknown", inplace=True)
bully["Overweight"].unique()
bully["Obese"].value_counts()
bully["Obese"].replace(" ", "Unknown", inplace=True)
bully["Obese"].unique()
bully
bully.describe()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/197/129197802.ipynb
| null | null |
[{"Id": 129197802, "ScriptId": 38393769, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8605824, "CreationDate": "05/11/2023 18:28:17", "VersionNumber": 1.0, "Title": "Bullying_Analysis", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 139.0, "LinesInsertedFromPrevious": 139.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
bully = pd.read_csv("/kaggle/input/bullying/Bullying.csv")
bully
bully
bully.drop("record", axis=1, inplace=True)
bully
bully["Custom_Age"].unique()
bully.info()
bully["Custom_Age"] = bully["Custom_Age"].str.extract("(\d+)").astype(float)
bully["Custom_Age"].replace(" ", 14, inplace=True)
bully["Custom_Age"].fillna(14, inplace=True)
bully
bully["Custom_Age"].unique()
bully["OnSchool_Bullying_12mo"].unique()
bully["OnSchool_Bullying_12mo"].replace(" ", "Yes", inplace=True)
bully["OnSchool_Bullying_12mo"].unique()
bully["OffSchool_Bullying_12mo"].unique()
bully["OffSchool_Bullying_12mo"].replace(" ", "Yes", inplace=True)
bully["OffSchool_Bullying_12mo"].unique()
bully["Cyberbullying_12mo"].unique()
bully["Cyberbullying_12mo"].replace(" ", "Yes", inplace=True)
bully["Cyberbullying_12mo"].unique()
bully["Sex"].unique()
bully["Sex"].value_counts()
bully["Sex"].replace(" ", "Male", inplace=True)
bully["Sex"].unique()
bully["Physically_attacked"].unique()
bully["Physically_attacked"].value_counts()
bully["Physically_attacked"].replace(1, "1 time", inplace=True)
bully["Physically_attacked"].value_counts()
bully["Physically_attacked"] = (
bully["Physically_attacked"].str.extract("^(\d+)").astype(int)
)
bully["Physically_attacked"].unique()
bully["Physical_fighting"].unique()
bully["Physical_fighting"].value_counts()
bully["Physical_fighting"].replace(" ", "0 times", inplace=True)
bully["Physical_fighting"].value_counts()
bully["Physical_fighting"] = (
bully["Physical_fighting"].str.extract("^(\d+)").astype(int)
)
bully["Physical_fighting"].unique()
bully["Felt_lonely"].unique()
bully["Felt_lonely"].value_counts()
bully["Felt_lonely"].replace(" ", "Never", inplace=True)
bully["Felt_lonely"].value_counts()
bully["Close_friends"].value_counts()
bully["Close_friends"].replace(" ", "3 or more", inplace=True)
bully["Close_friends"] = bully["Close_friends"].str.extract("^(\d+)").astype(int)
bully["Close_friends"].unique()
bully["Days_Unexcused_Absence"].value_counts()
bully["Days_Unexcused_Absence"].replace(" ", "0 days", inplace=True)
bully["Days_Unexcused_Absence"] = (
bully["Days_Unexcused_Absence"].str.extract("^(\d+)").astype(int)
)
bully["Days_Unexcused_Absence"].unique()
bully["Supportive_Classmates"].value_counts()
bully["Supportive_Classmates"].replace(" ", "Sometimes", inplace=True)
bully["Supportive_Classmates"].unique()
bully["Supportive_Parents"].value_counts()
bully["Supportive_Parents"].replace(" ", "Always", inplace=True)
bully["Supportive_Parents"].unique()
bully["Persistent_Loneliness"].value_counts()
bully["Persistent_Loneliness"].replace(" ", "No", inplace=True)
bully["Persistent_Loneliness"].unique()
bully["Unexcused_Absence"].value_counts()
bully["Unexcused_Absence"].replace(" ", "No", inplace=True)
bully["Unexcused_Absence"].unique()
bully["Underweight"].value_counts()
bully["Underweight"].replace(" ", "Unknown", inplace=True)
bully["Underweight"].unique()
bully["Overweight"].value_counts()
bully["Overweight"].replace(" ", "Unknown", inplace=True)
bully["Overweight"].unique()
bully["Obese"].value_counts()
bully["Obese"].replace(" ", "Unknown", inplace=True)
bully["Obese"].unique()
bully
bully.describe()
| false | 0 | 1,291 | 0 | 1,291 | 1,291 |
||
129197003
|
<jupyter_start><jupyter_text>Flight Price Prediction
###INTRODUCTION
The objective of the study is to analyse the flight booking dataset obtained from “Ease My Trip” website and to conduct various statistical hypothesis tests in order to get meaningful information from it. The 'Linear Regression' statistical algorithm would be used to train the dataset and predict a continuous target variable. 'Easemytrip' is an internet platform for booking flight tickets, and hence a platform that potential passengers use to buy tickets. A thorough study of the data will aid in the discovery of valuable insights that will be of enormous value to passengers.
###Research Questions
The aim of our study is to answer the below research questions:
a) Does price vary with Airlines?
b) How is the price affected when tickets are bought in just 1 or 2 days before departure?
c) Does ticket price change based on the departure time and arrival time?
d) How the price changes with change in Source and Destination?
e) How does the ticket price vary between Economy and Business class?
###DATA COLLECTION AND METHODOLOGY
Octoparse scraping tool was used to extract data from the website. Data was collected in two parts: one for economy class tickets and another for business class tickets. A total of 300261 distinct flight booking options was extracted from the site. Data was collected for 50 days, from February 11th to March 31st, 2022.
Data source was secondary data and was collected from Ease my trip website.
###DATASET
Dataset contains information about flight booking options from the website Easemytrip for flight travel between India's top 6 metro cities. There are 300261 datapoints and 11 features in the cleaned dataset.
###FEATURES
The various features of the cleaned dataset are explained below:
1) Airline: The name of the airline company is stored in the airline column. It is a categorical feature having 6 different airlines.
2) Flight: Flight stores information regarding the plane's flight code. It is a categorical feature.
3) Source City: City from which the flight takes off. It is a categorical feature having 6 unique cities.
4) Departure Time: This is a derived categorical feature obtained created by grouping time periods into bins. It stores information about the departure time and have 6 unique time labels.
5) Stops: A categorical feature with 3 distinct values that stores the number of stops between the source and destination cities.
6) Arrival Time: This is a derived categorical feature created by grouping time intervals into bins. It has six distinct time labels and keeps information about the arrival time.
7) Destination City: City where the flight will land. It is a categorical feature having 6 unique cities.
8) Class: A categorical feature that contains information on seat class; it has two distinct values: Business and Economy.
9) Duration: A continuous feature that displays the overall amount of time it takes to travel between cities in hours.
10)Days Left: This is a derived characteristic that is calculated by subtracting the trip date by the booking date.
11) Price: Target variable stores information of the ticket price.
===================To boost learning, try to create an end-to-end project using the dataset.==================================
Kaggle dataset identifier: flight-price-prediction
<jupyter_script>import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dataset = "/kaggle/input/flight-price-prediction/Clean_Dataset.csv"
df = pd.read_csv(dataset)
df.head()
df.drop(["Unnamed: 0"], axis=1, inplace=True)
df.info()
df.isnull().sum()
categorical_features = [feature for feature in df.columns if df[feature].dtype == "O"]
## print number of unique values
cat_number_of_unique_ = {}
for feature in categorical_features:
cat_number_of_unique_[feature] = df[feature].nunique()
cat_number_of_unique_
## Split the data into economy and business class dataframe
economy = df[df["class"] == "Economy"]
business = df[df["class"] == "Business"]
sns.barplot(
data=economy,
x="airline",
y="price",
hue="stops",
)
plt.show()
sns.barplot(data=business, x="airline", y="price", hue="stops")
plt.show()
sns.barplot(data=economy, x="airline", y="duration")
city_list = df["destination_city"].unique().tolist()
color_code = ["red", "blue", "green", "yellow", "orange", "violet"]
color_code = dict(zip(city_list, color_code))
required_lists = ["duration", "days_left", "price"]
sns.barplot(data=economy, x="airline", y="price", hue="source_city", palette=color_code)
sns.barplot(
data=economy, x="airline", y="price", hue="destination_city", palette=color_code
)
plt.show()
pd.crosstab(df["source_city"], df["destination_city"], margins=False)
pivot = pd.pivot_table(
economy,
values="duration",
index=["departure_time", "arrival_time"],
columns=["stops"],
aggfunc=[np.mean, np.std],
)
# # Different supporting functions
def list_of_same_value(list_of_number):
diff_number = []
for i in list_of_number:
if i not in diff_number:
diff_number.append(i)
return diff_number
def split_data_into_based_on_days(data):
# split the data into 2
till_second_week = data[data["days_left"] < 16]
rest_weak = data[data["days_left"] > 15]
# value of price on 15 days_left
weak_last_number = till_second_week.loc[
till_second_week["days_left"] == 15, "price"
].values
return till_second_week, rest_weak, weak_last_number
def collect_dataframe_less_than_20(data):
if len(data) < 20:
return data.index.to_list()
else:
pass
def required_df_dep_and_indep_data_split(data):
# independent feature
x = data[["duration", "days_left"]]
# dependent features
y = data["price"]
return x, y
def varience_threshold(data, threshold):
x, y = required_df_dep_and_indep_data_split(data)
# train and test data split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42
)
model = VarianceThreshold(threshold=threshold)
# fit the model
model.fit(x_train)
return model.fit(x_train)
def outlier_removal_std(dict_name, std):
right_side = dict_name["mean"] + dict_name["std"] * std
left_side = dict_name["mean"] - dict_name["std"] * std
return right_side, left_side
# ## Plot Diagrams
def plot_line_based_on_airline_city(data, time_and_city):
my_colors = ["green", "blue", "orange"]
for airline, airline_df in data.groupby("airline"):
for time_city, time_city_df in airline_df.groupby(
["departure_time", "arrival_time", "source_city", "destination_city"]
):
try:
if time_city == tuple(time_and_city):
fig, axs = plt.subplots(ncols=2, figsize=(10, 4))
# Plot price vs days left for each stop
sns.lineplot(
data=airline_df,
x="days_left",
y="price",
hue="stops",
palette=my_colors,
ax=axs[0],
)
axs[0].set_title("Price vs Days Left")
axs[0].set_xlabel("Days Left")
axs[0].set_ylabel("Price")
# Plot price vs duration for each stop
sns.lineplot(
data=airline_df,
x="duration",
y="price",
hue="stops",
palette=my_colors,
ax=axs[1],
)
axs[1].set_title("Price vs Duration")
axs[1].set_xlabel("Duration")
axs[1].set_ylabel("Price")
axs[1].legend(loc="lower right")
plt.suptitle(
"{} : {} to {} ({} to {})".format(
airline,
time_city[2],
time_city[3],
time_city[0],
time_city[1],
)
)
plt.tight_layout()
plt.show()
except:
print("Combination of place and time is not applicable")
plot_line_based_on_airline_city(economy, ["Evening", "Morning", "Mumbai", "Delhi"])
def scatter_plot_on_airlines(data, hue):
hues = ["zero", "one", "two_or_more"]
my_colors = ["green", "tab:blue", "tab:orange"]
for airline, airline_df in economy.groupby("airline"):
fig, axs = plt.subplots(ncols=2, figsize=(10, 4))
if hue == "stops":
sns.scatterplot(
data=airline_df,
x="duration",
y="price",
hue=hue,
hue_order=hues,
ax=axs[0],
palette=my_colors,
alpha=0.3,
)
axs[0].set_title("Price vs Duration")
axs[0].set_xlabel("Duration")
axs[0].set_ylabel("Price")
axs[0].set_xlim([0, 60])
axs[0].set_ylim([0, 45000])
sns.scatterplot(
data=airline_df,
x="days_left",
y="price",
hue=hue,
hue_order=hues,
ax=axs[1],
palette=my_colors,
alpha=0.45,
)
axs[1].set_title("Price vs Days Left")
axs[1].set_xlabel("days_left")
axs[1].set_ylabel("Price")
axs[1].set_xlim([0, 60])
axs[1].set_ylim([0, 45000])
else:
sns.scatterplot(
data=airline_df, x="duration", y="price", hue=hue, ax=axs[0], alpha=0.3
)
axs[0].set_title("Price vs Duration")
axs[0].set_xlabel("Duration")
axs[0].set_ylabel("Price")
axs[0].set_xlim([0, 60])
axs[0].set_ylim([0, 45000])
sns.scatterplot(
data=airline_df,
x="days_left",
y="price",
hue=hue,
ax=axs[1],
alpha=0.45,
)
axs[1].set_title("Price vs Days Left")
axs[1].set_xlabel("days_left")
axs[1].set_ylabel("Price")
axs[1].set_xlim([0, 60])
axs[1].set_ylim([0, 45000])
plt.suptitle("{} ".format(airline))
plt.tight_layout()
plt.show()
scatter_plot_on_airlines(economy, "stops")
# In days_left vs price , the prices are high steep drop after day 15
# ## Outlier Remove
def remove_duration_outlier(data):
non_duration_df = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
city_dict = {}
for city, city_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
city_dict[city] = {
"mean": np.mean(city_df["duration"]),
"std": np.std(city_df["duration"]),
"count": city_df.shape[0],
}
for city, city_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
stats = city_dict.get(city)
right_side, left_side = outlier_removal_std(stats, std=2)
# duration_df =
city_out = city_df[
(city_df["duration"] < right_side) & (city_df["duration"] > left_side)
]
non_duration_df = pd.concat([non_duration_df, city_out])
return non_duration_df
business1 = remove_duration_outlier(business)
economy1 = remove_duration_outlier(economy)
# ## Radial polar diagram
## extract a list of colors for later use with each visualization.
def get_color(name, number):
pal = list(sns.color_palette(palette=name, n_colors=number))
return pal
pal_vi = get_color("viridis_r", len(df))
pal_plas = get_color("plasma_r", len(df))
pal_spec = get_color("Spectral", len(df))
pal_hsv = get_color("hsv", len(df))
def create_dataframe_based_on_duration(data, name):
for airline, airline_df in data.groupby("airline"):
if airline == name:
city_name = list()
duration_df = pd.DataFrame(columns=["city_name", "duration"])
plc_df = airline_df.groupby(["source_city", "destination_city"])
for i, (place, place_df) in enumerate(plc_df):
city2 = "%s %s" % (place[0], place[1])
##city_name.append(city2)
city_name = city2
duration = place_df["duration"].min()
duration_df.loc[i] = [city_name, duration]
duration_df = duration_df.sort_values(by="duration")
# color code
pal_vi = plt.get_cmap("viridis_r", len(duration_df))
plt.figure(figsize=(10, 10))
ax = plt.subplot(111, polar=True)
ax.set_theta_zero_location("E")
# set min and max value
lowerLimit = 0
max_v = duration_df["duration"].max()
# set heights and width
heights = duration_df["duration"]
width = 2 * np.pi / len(duration_df.index)
# set index and angle
indexes = list(range(1, len(duration_df.index) + 1))
angles = [element * width for element in indexes]
bars = ax.bar(
x=angles,
height=heights,
width=width,
bottom=lowerLimit,
linewidth=1,
edgecolor="white",
color=pal_vi(np.arange(len(duration_df))),
)
labelPadding = 0.5
for bar, angle, height, label in zip(
bars, angles, heights, duration_df["city_name"]
):
rotation = np.rad2deg(angle)
alignment = ""
if angle >= np.pi / 2 and angle <= 3 * np.pi / 2:
alignment = "right"
rotation = rotation + 180
else:
alignment = "left"
ax.text(
x=angle,
y=lowerLimit + height + labelPadding,
s=label,
ha=alignment,
va="center",
rotation=rotation,
rotation_mode="anchor",
)
ax.set_thetagrids([], [])
ax.title.set_rotation(180)
plt.title(f"{name}---- Duration - Places", rotation=0, fontsize=20)
plt.tight_layout(pad=2)
plt.show()
create_dataframe_based_on_duration(economy1, "AirAsia")
def remove_outlier(data):
df_out = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
second_week_dict = {}
rest_weak_dict = {}
place_dict = {}
for place, place_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
second_week, rest_week, week_last_number = split_data_into_based_on_days(
place_df
)
second_week_dict[place] = {
"mean": np.mean(second_week["price"]),
"std": np.std(second_week["price"]),
"count": second_week.shape[0],
}
rest_weak_dict[place] = {
"mean": np.mean(second_week["price"]),
"std": np.std(second_week["price"]),
"count": second_week.shape[0],
}
place_dict[place] = {
"mean": np.mean(place_df["price"]),
"std": np.std(place_df["price"]),
"count": second_week.shape[0],
}
for place, place_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
stat = place_dict.get(place)
if stat["std"] != 0 and stat["count"] > 15:
(
second_week,
rest_week,
week_last_number,
) = split_data_into_based_on_days(place_df)
# get place stat for second and rest dataframe
second_stat = second_week_dict.get(place)
rest_stat = rest_weak_dict.get(place)
# right and left hand side values, for outliers
second_right_side, second_left_side = outlier_removal_std(
second_stat, std=3
)
rest_right_side, rest_left_side = outlier_removal_std(rest_stat, std=3)
second_place_out = second_week[
(second_week["price"] < second_right_side)
& (second_week["price"] > second_left_side)
]
rest_place_out = rest_week[
(rest_week["price"] < rest_right_side)
& (rest_week["price"] > rest_left_side)
]
# concat the second and rest place_out data frame
place_df_concat = pd.concat([second_place_out, rest_place_out])
# concat final dataframe
df_out = pd.concat([df_out, place_df_concat])
return df_out
eco_df = remove_outlier(economy1)
buss_df = remove_outlier(business1)
# ## Static tests
from statsmodels.formula.api import ols
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2
# ## Anova test
def is_there_any_association(data, target, feature):
replaced_place_df = pd.DataFrame()
eco_out = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
for place, place_df in airline_df.groupby(["source_city", "destination_city"]):
# avova test for duration and stops
anova_0 = ols(target + "~" + feature, data=place_df).fit()
stop_test = sm.stats.anova_lm(anova_0, type=2)
# number of unique value
no_of_unique_value = place_df[feature].nunique()
if stop_test["PR(>F)"].loc[feature] > 0.05:
high_no_index = (
place_df[feature]
.value_counts()
.nlargest(no_of_unique_value)
.index[0]
)
replace_index = (
place_df[feature]
.value_counts()
.nlargest()
.index[1:no_of_unique_value]
)
# replce lesser count index to mainly used value
place_df[feature].replace(replace_index, high_no_index, inplace=True)
# concat the dataframe
replaced_place_df = pd.concat([replaced_place_df, place_df])
else:
eco_out = pd.concat([eco_out, place_df])
df_final = pd.concat([replaced_place_df, eco_out])
return df_final
# conducted ANOVA test for is there is any independece between **price V/s Stops** and **Price V/s duration**, If there is no independece between stops and price , we make the stop into single variable.
df1 = pd.concat([eco_df, buss_df])
# # Feature selection
from category_encoders import TargetEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
# ## target encording
## target encording
df2 = df1.copy()
tar_en = TargetEncoder()
tar_en.fit(df2["flight"], df2["price"])
df2["flight_"] = tar_en.transform(df2["flight"])
df2.drop("flight", axis=1, inplace=True)
# ## One hotencording
df3 = df2.copy()
df3 = pd.get_dummies(df2, drop_first=True)
# # Model Buiding
# independet feature
x = df3.drop(["price"], axis=1)
# dependent feature
y = df3["price"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42, shuffle=True
)
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import LinearSVR, SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.metrics import r2_score, mean_absolute_error
### Define models
models = {
"Linear regression": LinearRegression(),
"Linear regression (L2)": Ridge(),
"Linear regression(L1)": Lasso(),
"Gradient boosting": GradientBoostingRegressor(),
"XGBoosting": XGBRegressor(),
"Light GBM": LGBMRegressor(),
"K-nearest neighbour": KNeighborsRegressor(),
"Neural Network": MLPRegressor(),
"Decision tree": DecisionTreeRegressor(),
"Random forest": RandomForestRegressor(),
}
### make a scaler
scaler = StandardScaler()
for name, model in models.items():
### construct a pipeline
pipeline = Pipeline(steps=[("scaling", scaler), ("Modeling", model)])
pipeline.fit(x_train, y_train)
print(name + "___trained")
for name, model in models.items():
### construct a pipeline
pipeline = Pipeline(steps=[("scaling", scaler), ("Modeling", model)])
pipeline.fit(x_train, y_train)
print(name + "___trained")
print(name + "_accuracy score(test): {:.5f}".format(pipeline.score(x_test, y_test)))
print("--" * 50)
# # Baseline model: Gradient boosting regression with all features
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score
gbr = GradientBoostingRegressor(max_depth=5, random_state=42)
# train regression using all features
gbr.fit(x_train, y_train)
from sklearn.feature_selection import SelectKBest
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.feature_selection import mutual_info_regression
gbr = GradientBoostingRegressor()
r2_score_list = []
for k in range(1, 31):
selector = SelectKBest(mutual_info_regression, k=k)
selector.fit(x_train, y_train)
sel_x_train = selector.transform(x_train)
sel_x_test = selector.transform(x_test)
gbr.fit(sel_x_train, y_train)
y_pred = gbr.predict(sel_x_test)
r2 = r2_score(y_test, y_pred)
r2_score_list.append(r2)
best_k = r2_score_list.index(max(r2_score_list)) + 1
# Select the best k features
selector = SelectKBest(mutual_info_regression, k=best_k)
selector.fit(x_train, y_train)
sel_x_train = selector.transform(x_train)
sel_x_test = selector.transform(x_test)
gbr.fit(sel_x_train, y_train)
y_pred = gbr.predict(sel_x_test)
fig, ax = plt.subplots()
x = np.arange(1, 31)
y = np.round(r2_score_list, 5)
ax.bar(x, y, width=0.2)
ax.set_xlabel("Number_of features selected using mutual information")
ax.set_ylabel("mean_squared error ")
ax.set_ylim(0, 1.2)
ax.set_xticks(np.arange(1, 31))
ax.set_xticklabels(np.arange(1, 31), fontsize=12, rotation=90)
for i, v in enumerate(y):
plt.text(
x=i + 1, y=v + 0.05, s=str(v), rotation=90
) # x position, y position of text to be put
# ## Select the best perfoming Algorithm
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
cv = ShuffleSplit(n_splits=3, test_size=0.1, random_state=0)
cross_val_score(RandomForestRegressor(), x, y, cv=cv)
rand = RandomForestRegressor()
rand.fit(x_train, y_train)
rand.score(x_test, y_test)
# ## Perfomance Evaluation
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
y_pred = rand.predict(x_test)
print(f"MAE == {mean_absolute_error(y_test,y_pred)}")
print(f"MSE == {mean_squared_error(y_test,y_pred)}")
print(f"RMSE == {np.sqrt(mean_squared_error(y_test,y_pred))}")
# ## SAVE MODEL
import pickle
# save the model
with open("random_forest_class_model1", "wb") as model:
pickle.dump(rand, model)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/197/129197003.ipynb
|
flight-price-prediction
|
shubhambathwal
|
[{"Id": 129197003, "ScriptId": 38306985, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10187602, "CreationDate": "05/11/2023 18:19:26", "VersionNumber": 1.0, "Title": "flight price Prediction", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 574.0, "LinesInsertedFromPrevious": 574.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185028468, "KernelVersionId": 129197003, "SourceDatasetVersionId": 3228623}]
|
[{"Id": 3228623, "DatasetId": 1957837, "DatasourceVersionId": 3278670, "CreatorUserId": 5890725, "LicenseName": "CC0: Public Domain", "CreationDate": "02/25/2022 17:58:19", "VersionNumber": 2.0, "Title": "Flight Price Prediction", "Slug": "flight-price-prediction", "Subtitle": "Predict Fllight Price, practise feature engineering, implement ensemble models", "Description": "###INTRODUCTION\nThe objective of the study is to analyse the flight booking dataset obtained from \u201cEase My Trip\u201d website and to conduct various statistical hypothesis tests in order to get meaningful information from it. The 'Linear Regression' statistical algorithm would be used to train the dataset and predict a continuous target variable. 'Easemytrip' is an internet platform for booking flight tickets, and hence a platform that potential passengers use to buy tickets. A thorough study of the data will aid in the discovery of valuable insights that will be of enormous value to passengers.\n\n###Research Questions\nThe aim of our study is to answer the below research questions:\na) Does price vary with Airlines?\nb) How is the price affected when tickets are bought in just 1 or 2 days before departure?\nc) Does ticket price change based on the departure time and arrival time?\nd) How the price changes with change in Source and Destination?\ne) How does the ticket price vary between Economy and Business class?\n\n\n###DATA COLLECTION AND METHODOLOGY\nOctoparse scraping tool was used to extract data from the website. Data was collected in two parts: one for economy class tickets and another for business class tickets. A total of 300261 distinct flight booking options was extracted from the site. Data was collected for 50 days, from February 11th to March 31st, 2022.\nData source was secondary data and was collected from Ease my trip website.\n\n\n###DATASET\nDataset contains information about flight booking options from the website Easemytrip for flight travel between India's top 6 metro cities. There are 300261 datapoints and 11 features in the cleaned dataset.\n\n###FEATURES\nThe various features of the cleaned dataset are explained below:\n1) Airline: The name of the airline company is stored in the airline column. It is a categorical feature having 6 different airlines.\n2) Flight: Flight stores information regarding the plane's flight code. It is a categorical feature.\n3) Source City: City from which the flight takes off. It is a categorical feature having 6 unique cities.\n4) Departure Time: This is a derived categorical feature obtained created by grouping time periods into bins. It stores information about the departure time and have 6 unique time labels.\n5) Stops: A categorical feature with 3 distinct values that stores the number of stops between the source and destination cities.\n6) Arrival Time: This is a derived categorical feature created by grouping time intervals into bins. It has six distinct time labels and keeps information about the arrival time.\n7) Destination City: City where the flight will land. It is a categorical feature having 6 unique cities.\n8) Class: A categorical feature that contains information on seat class; it has two distinct values: Business and Economy.\n9) Duration: A continuous feature that displays the overall amount of time it takes to travel between cities in hours.\n10)Days Left: This is a derived characteristic that is calculated by subtracting the trip date by the booking date.\n11) Price: Target variable stores information of the ticket price.\n\n===================To boost learning, try to create an end-to-end project using the dataset.==================================", "VersionNotes": "Data Update 2022/02/25", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 1957837, "CreatorUserId": 5890725, "OwnerUserId": 5890725.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3228623.0, "CurrentDatasourceVersionId": 3278670.0, "ForumId": 1981847, "Type": 2, "CreationDate": "02/25/2022 17:47:44", "LastActivityDate": "02/25/2022", "TotalViews": 125687, "TotalDownloads": 17545, "TotalVotes": 232, "TotalKernels": 112}]
|
[{"Id": 5890725, "UserName": "shubhambathwal", "DisplayName": "Shubham Bathwal", "RegisterDate": "10/05/2020", "PerformanceTier": 0}]
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
dataset = "/kaggle/input/flight-price-prediction/Clean_Dataset.csv"
df = pd.read_csv(dataset)
df.head()
df.drop(["Unnamed: 0"], axis=1, inplace=True)
df.info()
df.isnull().sum()
categorical_features = [feature for feature in df.columns if df[feature].dtype == "O"]
## print number of unique values
cat_number_of_unique_ = {}
for feature in categorical_features:
cat_number_of_unique_[feature] = df[feature].nunique()
cat_number_of_unique_
## Split the data into economy and business class dataframe
economy = df[df["class"] == "Economy"]
business = df[df["class"] == "Business"]
sns.barplot(
data=economy,
x="airline",
y="price",
hue="stops",
)
plt.show()
sns.barplot(data=business, x="airline", y="price", hue="stops")
plt.show()
sns.barplot(data=economy, x="airline", y="duration")
city_list = df["destination_city"].unique().tolist()
color_code = ["red", "blue", "green", "yellow", "orange", "violet"]
color_code = dict(zip(city_list, color_code))
required_lists = ["duration", "days_left", "price"]
sns.barplot(data=economy, x="airline", y="price", hue="source_city", palette=color_code)
sns.barplot(
data=economy, x="airline", y="price", hue="destination_city", palette=color_code
)
plt.show()
pd.crosstab(df["source_city"], df["destination_city"], margins=False)
pivot = pd.pivot_table(
economy,
values="duration",
index=["departure_time", "arrival_time"],
columns=["stops"],
aggfunc=[np.mean, np.std],
)
# # Different supporting functions
def list_of_same_value(list_of_number):
diff_number = []
for i in list_of_number:
if i not in diff_number:
diff_number.append(i)
return diff_number
def split_data_into_based_on_days(data):
# split the data into 2
till_second_week = data[data["days_left"] < 16]
rest_weak = data[data["days_left"] > 15]
# value of price on 15 days_left
weak_last_number = till_second_week.loc[
till_second_week["days_left"] == 15, "price"
].values
return till_second_week, rest_weak, weak_last_number
def collect_dataframe_less_than_20(data):
if len(data) < 20:
return data.index.to_list()
else:
pass
def required_df_dep_and_indep_data_split(data):
# independent feature
x = data[["duration", "days_left"]]
# dependent features
y = data["price"]
return x, y
def varience_threshold(data, threshold):
x, y = required_df_dep_and_indep_data_split(data)
# train and test data split
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42
)
model = VarianceThreshold(threshold=threshold)
# fit the model
model.fit(x_train)
return model.fit(x_train)
def outlier_removal_std(dict_name, std):
right_side = dict_name["mean"] + dict_name["std"] * std
left_side = dict_name["mean"] - dict_name["std"] * std
return right_side, left_side
# ## Plot Diagrams
def plot_line_based_on_airline_city(data, time_and_city):
my_colors = ["green", "blue", "orange"]
for airline, airline_df in data.groupby("airline"):
for time_city, time_city_df in airline_df.groupby(
["departure_time", "arrival_time", "source_city", "destination_city"]
):
try:
if time_city == tuple(time_and_city):
fig, axs = plt.subplots(ncols=2, figsize=(10, 4))
# Plot price vs days left for each stop
sns.lineplot(
data=airline_df,
x="days_left",
y="price",
hue="stops",
palette=my_colors,
ax=axs[0],
)
axs[0].set_title("Price vs Days Left")
axs[0].set_xlabel("Days Left")
axs[0].set_ylabel("Price")
# Plot price vs duration for each stop
sns.lineplot(
data=airline_df,
x="duration",
y="price",
hue="stops",
palette=my_colors,
ax=axs[1],
)
axs[1].set_title("Price vs Duration")
axs[1].set_xlabel("Duration")
axs[1].set_ylabel("Price")
axs[1].legend(loc="lower right")
plt.suptitle(
"{} : {} to {} ({} to {})".format(
airline,
time_city[2],
time_city[3],
time_city[0],
time_city[1],
)
)
plt.tight_layout()
plt.show()
except:
print("Combination of place and time is not applicable")
plot_line_based_on_airline_city(economy, ["Evening", "Morning", "Mumbai", "Delhi"])
def scatter_plot_on_airlines(data, hue):
hues = ["zero", "one", "two_or_more"]
my_colors = ["green", "tab:blue", "tab:orange"]
for airline, airline_df in economy.groupby("airline"):
fig, axs = plt.subplots(ncols=2, figsize=(10, 4))
if hue == "stops":
sns.scatterplot(
data=airline_df,
x="duration",
y="price",
hue=hue,
hue_order=hues,
ax=axs[0],
palette=my_colors,
alpha=0.3,
)
axs[0].set_title("Price vs Duration")
axs[0].set_xlabel("Duration")
axs[0].set_ylabel("Price")
axs[0].set_xlim([0, 60])
axs[0].set_ylim([0, 45000])
sns.scatterplot(
data=airline_df,
x="days_left",
y="price",
hue=hue,
hue_order=hues,
ax=axs[1],
palette=my_colors,
alpha=0.45,
)
axs[1].set_title("Price vs Days Left")
axs[1].set_xlabel("days_left")
axs[1].set_ylabel("Price")
axs[1].set_xlim([0, 60])
axs[1].set_ylim([0, 45000])
else:
sns.scatterplot(
data=airline_df, x="duration", y="price", hue=hue, ax=axs[0], alpha=0.3
)
axs[0].set_title("Price vs Duration")
axs[0].set_xlabel("Duration")
axs[0].set_ylabel("Price")
axs[0].set_xlim([0, 60])
axs[0].set_ylim([0, 45000])
sns.scatterplot(
data=airline_df,
x="days_left",
y="price",
hue=hue,
ax=axs[1],
alpha=0.45,
)
axs[1].set_title("Price vs Days Left")
axs[1].set_xlabel("days_left")
axs[1].set_ylabel("Price")
axs[1].set_xlim([0, 60])
axs[1].set_ylim([0, 45000])
plt.suptitle("{} ".format(airline))
plt.tight_layout()
plt.show()
scatter_plot_on_airlines(economy, "stops")
# In days_left vs price , the prices are high steep drop after day 15
# ## Outlier Remove
def remove_duration_outlier(data):
non_duration_df = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
city_dict = {}
for city, city_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
city_dict[city] = {
"mean": np.mean(city_df["duration"]),
"std": np.std(city_df["duration"]),
"count": city_df.shape[0],
}
for city, city_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
stats = city_dict.get(city)
right_side, left_side = outlier_removal_std(stats, std=2)
# duration_df =
city_out = city_df[
(city_df["duration"] < right_side) & (city_df["duration"] > left_side)
]
non_duration_df = pd.concat([non_duration_df, city_out])
return non_duration_df
business1 = remove_duration_outlier(business)
economy1 = remove_duration_outlier(economy)
# ## Radial polar diagram
## extract a list of colors for later use with each visualization.
def get_color(name, number):
pal = list(sns.color_palette(palette=name, n_colors=number))
return pal
pal_vi = get_color("viridis_r", len(df))
pal_plas = get_color("plasma_r", len(df))
pal_spec = get_color("Spectral", len(df))
pal_hsv = get_color("hsv", len(df))
def create_dataframe_based_on_duration(data, name):
for airline, airline_df in data.groupby("airline"):
if airline == name:
city_name = list()
duration_df = pd.DataFrame(columns=["city_name", "duration"])
plc_df = airline_df.groupby(["source_city", "destination_city"])
for i, (place, place_df) in enumerate(plc_df):
city2 = "%s %s" % (place[0], place[1])
##city_name.append(city2)
city_name = city2
duration = place_df["duration"].min()
duration_df.loc[i] = [city_name, duration]
duration_df = duration_df.sort_values(by="duration")
# color code
pal_vi = plt.get_cmap("viridis_r", len(duration_df))
plt.figure(figsize=(10, 10))
ax = plt.subplot(111, polar=True)
ax.set_theta_zero_location("E")
# set min and max value
lowerLimit = 0
max_v = duration_df["duration"].max()
# set heights and width
heights = duration_df["duration"]
width = 2 * np.pi / len(duration_df.index)
# set index and angle
indexes = list(range(1, len(duration_df.index) + 1))
angles = [element * width for element in indexes]
bars = ax.bar(
x=angles,
height=heights,
width=width,
bottom=lowerLimit,
linewidth=1,
edgecolor="white",
color=pal_vi(np.arange(len(duration_df))),
)
labelPadding = 0.5
for bar, angle, height, label in zip(
bars, angles, heights, duration_df["city_name"]
):
rotation = np.rad2deg(angle)
alignment = ""
if angle >= np.pi / 2 and angle <= 3 * np.pi / 2:
alignment = "right"
rotation = rotation + 180
else:
alignment = "left"
ax.text(
x=angle,
y=lowerLimit + height + labelPadding,
s=label,
ha=alignment,
va="center",
rotation=rotation,
rotation_mode="anchor",
)
ax.set_thetagrids([], [])
ax.title.set_rotation(180)
plt.title(f"{name}---- Duration - Places", rotation=0, fontsize=20)
plt.tight_layout(pad=2)
plt.show()
create_dataframe_based_on_duration(economy1, "AirAsia")
def remove_outlier(data):
df_out = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
second_week_dict = {}
rest_weak_dict = {}
place_dict = {}
for place, place_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
second_week, rest_week, week_last_number = split_data_into_based_on_days(
place_df
)
second_week_dict[place] = {
"mean": np.mean(second_week["price"]),
"std": np.std(second_week["price"]),
"count": second_week.shape[0],
}
rest_weak_dict[place] = {
"mean": np.mean(second_week["price"]),
"std": np.std(second_week["price"]),
"count": second_week.shape[0],
}
place_dict[place] = {
"mean": np.mean(place_df["price"]),
"std": np.std(place_df["price"]),
"count": second_week.shape[0],
}
for place, place_df in airline_df.groupby(
["source_city", "destination_city", "stops"]
):
stat = place_dict.get(place)
if stat["std"] != 0 and stat["count"] > 15:
(
second_week,
rest_week,
week_last_number,
) = split_data_into_based_on_days(place_df)
# get place stat for second and rest dataframe
second_stat = second_week_dict.get(place)
rest_stat = rest_weak_dict.get(place)
# right and left hand side values, for outliers
second_right_side, second_left_side = outlier_removal_std(
second_stat, std=3
)
rest_right_side, rest_left_side = outlier_removal_std(rest_stat, std=3)
second_place_out = second_week[
(second_week["price"] < second_right_side)
& (second_week["price"] > second_left_side)
]
rest_place_out = rest_week[
(rest_week["price"] < rest_right_side)
& (rest_week["price"] > rest_left_side)
]
# concat the second and rest place_out data frame
place_df_concat = pd.concat([second_place_out, rest_place_out])
# concat final dataframe
df_out = pd.concat([df_out, place_df_concat])
return df_out
eco_df = remove_outlier(economy1)
buss_df = remove_outlier(business1)
# ## Static tests
from statsmodels.formula.api import ols
import statsmodels.api as sm
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import chi2
# ## Anova test
def is_there_any_association(data, target, feature):
replaced_place_df = pd.DataFrame()
eco_out = pd.DataFrame()
for airline, airline_df in data.groupby("airline"):
for place, place_df in airline_df.groupby(["source_city", "destination_city"]):
# avova test for duration and stops
anova_0 = ols(target + "~" + feature, data=place_df).fit()
stop_test = sm.stats.anova_lm(anova_0, type=2)
# number of unique value
no_of_unique_value = place_df[feature].nunique()
if stop_test["PR(>F)"].loc[feature] > 0.05:
high_no_index = (
place_df[feature]
.value_counts()
.nlargest(no_of_unique_value)
.index[0]
)
replace_index = (
place_df[feature]
.value_counts()
.nlargest()
.index[1:no_of_unique_value]
)
# replce lesser count index to mainly used value
place_df[feature].replace(replace_index, high_no_index, inplace=True)
# concat the dataframe
replaced_place_df = pd.concat([replaced_place_df, place_df])
else:
eco_out = pd.concat([eco_out, place_df])
df_final = pd.concat([replaced_place_df, eco_out])
return df_final
# conducted ANOVA test for is there is any independece between **price V/s Stops** and **Price V/s duration**, If there is no independece between stops and price , we make the stop into single variable.
df1 = pd.concat([eco_df, buss_df])
# # Feature selection
from category_encoders import TargetEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.pipeline import Pipeline
from sklearn.pipeline import make_pipeline
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, OrdinalEncoder, StandardScaler
# ## target encording
## target encording
df2 = df1.copy()
tar_en = TargetEncoder()
tar_en.fit(df2["flight"], df2["price"])
df2["flight_"] = tar_en.transform(df2["flight"])
df2.drop("flight", axis=1, inplace=True)
# ## One hotencording
df3 = df2.copy()
df3 = pd.get_dummies(df2, drop_first=True)
# # Model Buiding
# independet feature
x = df3.drop(["price"], axis=1)
# dependent feature
y = df3["price"]
x_train, x_test, y_train, y_test = train_test_split(
x, y, test_size=0.3, random_state=42, shuffle=True
)
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.neighbors import KNeighborsRegressor
from sklearn.neural_network import MLPRegressor
from sklearn.svm import LinearSVR, SVR
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor
from xgboost import XGBRegressor
from lightgbm import LGBMRegressor
from sklearn.metrics import r2_score, mean_absolute_error
### Define models
models = {
"Linear regression": LinearRegression(),
"Linear regression (L2)": Ridge(),
"Linear regression(L1)": Lasso(),
"Gradient boosting": GradientBoostingRegressor(),
"XGBoosting": XGBRegressor(),
"Light GBM": LGBMRegressor(),
"K-nearest neighbour": KNeighborsRegressor(),
"Neural Network": MLPRegressor(),
"Decision tree": DecisionTreeRegressor(),
"Random forest": RandomForestRegressor(),
}
### make a scaler
scaler = StandardScaler()
for name, model in models.items():
### construct a pipeline
pipeline = Pipeline(steps=[("scaling", scaler), ("Modeling", model)])
pipeline.fit(x_train, y_train)
print(name + "___trained")
for name, model in models.items():
### construct a pipeline
pipeline = Pipeline(steps=[("scaling", scaler), ("Modeling", model)])
pipeline.fit(x_train, y_train)
print(name + "___trained")
print(name + "_accuracy score(test): {:.5f}".format(pipeline.score(x_test, y_test)))
print("--" * 50)
# # Baseline model: Gradient boosting regression with all features
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.metrics import mean_squared_error, r2_score
gbr = GradientBoostingRegressor(max_depth=5, random_state=42)
# train regression using all features
gbr.fit(x_train, y_train)
from sklearn.feature_selection import SelectKBest
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.feature_selection import mutual_info_regression
gbr = GradientBoostingRegressor()
r2_score_list = []
for k in range(1, 31):
selector = SelectKBest(mutual_info_regression, k=k)
selector.fit(x_train, y_train)
sel_x_train = selector.transform(x_train)
sel_x_test = selector.transform(x_test)
gbr.fit(sel_x_train, y_train)
y_pred = gbr.predict(sel_x_test)
r2 = r2_score(y_test, y_pred)
r2_score_list.append(r2)
best_k = r2_score_list.index(max(r2_score_list)) + 1
# Select the best k features
selector = SelectKBest(mutual_info_regression, k=best_k)
selector.fit(x_train, y_train)
sel_x_train = selector.transform(x_train)
sel_x_test = selector.transform(x_test)
gbr.fit(sel_x_train, y_train)
y_pred = gbr.predict(sel_x_test)
fig, ax = plt.subplots()
x = np.arange(1, 31)
y = np.round(r2_score_list, 5)
ax.bar(x, y, width=0.2)
ax.set_xlabel("Number_of features selected using mutual information")
ax.set_ylabel("mean_squared error ")
ax.set_ylim(0, 1.2)
ax.set_xticks(np.arange(1, 31))
ax.set_xticklabels(np.arange(1, 31), fontsize=12, rotation=90)
for i, v in enumerate(y):
plt.text(
x=i + 1, y=v + 0.05, s=str(v), rotation=90
) # x position, y position of text to be put
# ## Select the best perfoming Algorithm
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import cross_val_score
cv = ShuffleSplit(n_splits=3, test_size=0.1, random_state=0)
cross_val_score(RandomForestRegressor(), x, y, cv=cv)
rand = RandomForestRegressor()
rand.fit(x_train, y_train)
rand.score(x_test, y_test)
# ## Perfomance Evaluation
from sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error
y_pred = rand.predict(x_test)
print(f"MAE == {mean_absolute_error(y_test,y_pred)}")
print(f"MSE == {mean_squared_error(y_test,y_pred)}")
print(f"RMSE == {np.sqrt(mean_squared_error(y_test,y_pred))}")
# ## SAVE MODEL
import pickle
# save the model
with open("random_forest_class_model1", "wb") as model:
pickle.dump(rand, model)
| false | 0 | 5,895 | 0 | 6,640 | 5,895 |
||
129197833
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
df = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2022/train.csv")
df.head()
df.info()
df.shape
df.isnull().sum()
df.columns
x = df.drop(["target"], axis=1)
y = df["target"]
from sklearn.preprocessing import StandardScaler
x = StandardScaler().fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(
data=principalComponents, columns=["principal component 1", "principal component 2"]
)
principalDf.head()
finalDf = pd.concat([principalDf, y], axis=1)
finalDf.head()
pca.explained_variance_ratio_
finalDf.target.unique()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel("Principal Component 1", fontsize=15)
ax.set_ylabel("Principal Component 2", fontsize=15)
ax.set_title("2 component PCA", fontsize=20)
targets = [
"Streptococcus_pyogenes",
"Salmonella_enterica",
"Enterococcus_hirae",
"Escherichia_coli",
"Campylobacter_jejuni",
"Streptococcus_pneumoniae",
"Staphylococcus_aureus",
"Escherichia_fergusonii",
"Bacteroides_fragilis",
"Klebsiella_pneumoniae",
]
colors = [
"red",
"green",
"brown",
"blue",
"yellow",
"pink",
"red",
"purple",
"orange",
"black",
]
for target, color in zip(targets, colors):
indicesToKeep = finalDf["target"] == target
ax.scatter(
finalDf.loc[indicesToKeep, "principal component 1"],
finalDf.loc[indicesToKeep, "principal component 2"],
c=color,
s=50,
alpha=0.5,
)
ax.legend(targets)
ax.grid()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/197/129197833.ipynb
| null | null |
[{"Id": 129197833, "ScriptId": 38409400, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 8017871, "CreationDate": "05/11/2023 18:28:37", "VersionNumber": 1.0, "Title": "Genetic Analysis using PCA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 83.0, "LinesInsertedFromPrevious": 83.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
df = pd.read_csv("/kaggle/input/tabular-playground-series-feb-2022/train.csv")
df.head()
df.info()
df.shape
df.isnull().sum()
df.columns
x = df.drop(["target"], axis=1)
y = df["target"]
from sklearn.preprocessing import StandardScaler
x = StandardScaler().fit_transform(x)
from sklearn.decomposition import PCA
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(
data=principalComponents, columns=["principal component 1", "principal component 2"]
)
principalDf.head()
finalDf = pd.concat([principalDf, y], axis=1)
finalDf.head()
pca.explained_variance_ratio_
finalDf.target.unique()
fig = plt.figure(figsize=(8, 8))
ax = fig.add_subplot(1, 1, 1)
ax.set_xlabel("Principal Component 1", fontsize=15)
ax.set_ylabel("Principal Component 2", fontsize=15)
ax.set_title("2 component PCA", fontsize=20)
targets = [
"Streptococcus_pyogenes",
"Salmonella_enterica",
"Enterococcus_hirae",
"Escherichia_coli",
"Campylobacter_jejuni",
"Streptococcus_pneumoniae",
"Staphylococcus_aureus",
"Escherichia_fergusonii",
"Bacteroides_fragilis",
"Klebsiella_pneumoniae",
]
colors = [
"red",
"green",
"brown",
"blue",
"yellow",
"pink",
"red",
"purple",
"orange",
"black",
]
for target, color in zip(targets, colors):
indicesToKeep = finalDf["target"] == target
ax.scatter(
finalDf.loc[indicesToKeep, "principal component 1"],
finalDf.loc[indicesToKeep, "principal component 2"],
c=color,
s=50,
alpha=0.5,
)
ax.legend(targets)
ax.grid()
| false | 0 | 757 | 1 | 757 | 757 |
||
129197408
|
<jupyter_start><jupyter_text>Pokemon with stats
This data set includes 721 Pokemon, including their number, name, first and second type, and basic stats: HP, Attack, Defense, Special Attack, Special Defense, and Speed. It has been of great use when teaching statistics to kids. With certain types you can also give a geeky introduction to machine learning.
This are the raw attributes that are used for calculating how much damage an attack will do in the games. This dataset is about the pokemon games (*NOT* pokemon cards or Pokemon Go).
The data as described by [Myles O'Neill](https://www.kaggle.com/mylesoneill) is:
- **#**: ID for each pokemon
- **Name**: Name of each pokemon
- **Type 1**: Each pokemon has a type, this determines weakness/resistance to attacks
- **Type 2**: Some pokemon are dual type and have 2
- **Total**: sum of all stats that come after this, a general guide to how strong a pokemon is
- **HP**: hit points, or health, defines how much damage a pokemon can withstand before fainting
- **Attack**: the base modifier for normal attacks (eg. Scratch, Punch)
- **Defense**: the base damage resistance against normal attacks
- **SP Atk**: special attack, the base modifier for special attacks (e.g. fire blast, bubble beam)
- **SP Def**: the base damage resistance against special attacks
- **Speed**: determines which pokemon attacks first each round
The data for this table has been acquired from several different sites, including:
- [pokemon.com](http://www.pokemon.com/us/pokedex/)
- [pokemondb](http://pokemondb.net/pokedex)
- [bulbapedia](http://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_by_National_Pok%C3%A9dex_number)
One question has been answered with this database: The type of a pokemon cannot be inferred only by it's Attack and Deffence. It would be worthy to find which two variables can define the type of a pokemon, if any. Two variables can be plotted in a 2D space, and used as an example for machine learning. This could mean the creation of a visual example any geeky Machine Learning class would love.
Kaggle dataset identifier: pokemon
<jupyter_code>import pandas as pd
df = pd.read_csv('pokemon/Pokemon.csv')
df.info()
<jupyter_output><class 'pandas.core.frame.DataFrame'>
RangeIndex: 800 entries, 0 to 799
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 # 800 non-null int64
1 Name 800 non-null object
2 Type 1 800 non-null object
3 Type 2 414 non-null object
4 Total 800 non-null int64
5 HP 800 non-null int64
6 Attack 800 non-null int64
7 Defense 800 non-null int64
8 Sp. Atk 800 non-null int64
9 Sp. Def 800 non-null int64
10 Speed 800 non-null int64
11 Generation 800 non-null int64
12 Legendary 800 non-null bool
dtypes: bool(1), int64(9), object(3)
memory usage: 75.9+ KB
<jupyter_text>Examples:
{
"#": 1,
"Name": "Bulbasaur",
"Type 1": "Grass",
"Type 2": "Poison",
"Total": 318,
"HP": 45,
"Attack": 49,
"Defense": 49,
"Sp. Atk": 65,
"Sp. Def": 65,
"Speed": 45,
"Generation": 1,
"Legendary": false
}
{
"#": 2,
"Name": "Ivysaur",
"Type 1": "Grass",
"Type 2": "Poison",
"Total": 405,
"HP": 60,
"Attack": 62,
"Defense": 63,
"Sp. Atk": 80,
"Sp. Def": 80,
"Speed": 60,
"Generation": 1,
"Legendary": false
}
{
"#": 3,
"Name": "Venusaur",
"Type 1": "Grass",
"Type 2": "Poison",
"Total": 525,
"HP": 80,
"Attack": 82,
"Defense": 83,
"Sp. Atk": 100,
"Sp. Def": 100,
"Speed": 80,
"Generation": 1,
"Legendary": false
}
{
"#": 3,
"Name": "VenusaurMega Venusaur",
"Type 1": "Grass",
"Type 2": "Poison",
"Total": 625,
"HP": 80,
"Attack": 100,
"Defense": 123,
"Sp. Atk": 122,
"Sp. Def": 120,
"Speed": 80,
"Generation": 1,
"Legendary": false
}
<jupyter_script>import pandas as pd
# pandas serileri ve pandas dataframe veri tipleri kullanılır
# pandas serileri
import pandas as pd
a = [1, 2, 3, 4]
my_series = pd.Series(a)
print(my_series)
my_series[0]
a = [1, 2, 3, 4]
my_series = pd.Series(a, index=["x", "z", "y", "t"])
my_series
my_series["x"]
my_dataset = {"cars": ["BMV", "Mercedes", "Audi"], "price": [123, 456, 789]}
my_df = pd.DataFrame(my_dataset)
my_df
my_df.loc[0]
my_df.loc[1]
my_df.loc[2]
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data)
df
df.loc[0:2]
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data, index=("gün1", "gün2", "gün3"))
print(df)
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data)
df["calories"] # df['calories'][0 ] şeklinde de yapabiliriz
df.calories
df.iloc[:, 0:2]
import pandas as pd
df = pd.read_csv("../input/userdata/user.csv")
df
# import os
# dir_path = os.path.dirname(os.path.realpath("__file__"))
# df = pd.read_cvs(dirpath +"/user.cvs")
# başka yerde buynu da yazmamız lazım
import pandas as pd
df = pd.read_csv("../input/yenidata/yenidata.csv")
df
import pandas as pd
df = pd.read_csv("../input/dataset3/data.csv")
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df.dropna(inplace=True)
df.head()
df["Duration"][0] = None
df
df.fillna(0, inplace=True)
df
df.columns
df["Duration"].head(5)
df["Duration"].std()
df["Duration"].mean()
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["Maxpulse"][4] = None
x = df["Maxpulse"].mean()
df["Maxpulse"].fillna(x, inplace=True)
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["Duration"].head(4)
df["Duration"] = pd.to_numeric(
df["Duration"], downcast="float"
) # parametre vermezse stringe çevirmeye çalışıyor
df["Duration"].head(4)
df["Duration"] = pd.to_numeric(df["Duration"], downcast="")
df["Duration"].head(4) # vermedi defoult değeri
df.loc[7, "Duration"] = 1234
df.head(8)
df["Duration"].value_counts()
df.index
import numpy as np
for i in df.index:
if df.loc[i, "Duration"] < 120:
df.loc[i, "Duration"] = np.random.randint(120, 200)
df.head(10)
import numpy as np
for j in df.columns:
for i in df.index:
if df.loc[i, j] < 120:
df.loc[i, j] = np.random.randint(120, 200)
df.head(20)
for i in df.index:
if df.loc[1, "Duration"] < 10:
df.drop(1, inplace=True)
df.head
df.drop("Duraction", axis=1, inplace=True)
df.head
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
x = df.drop("Maxpulse", axis=1)
y = df["Maxpulse"]
y
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df
df["Calories"] > 400
x = df["Calories"] > 1000
print(len(df[x]))
print(df[x])
kosul = (df["Calories"] > 400) & (df["Maxpulse"] > 125)
df[kosul]
np.logical_and(df["Calories"] > 400, df["Maxpulse"] > 400)
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
data1 = df.head()
data2 = df.tail()
concat_data_row = pd.concat([data1, data2], axis=0)
concat_data_row
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["kalori_seviyesi"] = ["high" if i > 500 else "low" for i in df["Calories"]]
df
df["ort_pulse"] = (df["Pulse"] + df["Maxipulse"]) / 2
print(df)
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df.head()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts()
df[df["Type 1"] == "Electric"]["HP"].mean()
df["Type 1"].unique()
df["Type 1"].nunique()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
a = df["Type 1"].value_counts()
a.head(3)
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts().head(3)
new_df = df[df["Type 1"] == "Electric"]
print(new_df)
print(df)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/197/129197408.ipynb
|
pokemon
|
abcsds
|
[{"Id": 129197408, "ScriptId": 38401626, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11068299, "CreationDate": "05/11/2023 18:23:48", "VersionNumber": 3.0, "Title": "Pandas", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 227.0, "LinesInsertedFromPrevious": 146.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 81.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185029190, "KernelVersionId": 129197408, "SourceDatasetVersionId": 280}]
|
[{"Id": 280, "DatasetId": 121, "DatasourceVersionId": 280, "CreatorUserId": 693375, "LicenseName": "CC0: Public Domain", "CreationDate": "08/29/2016 06:01:43", "VersionNumber": 2.0, "Title": "Pokemon with stats", "Slug": "pokemon", "Subtitle": "721 Pokemon with stats and types", "Description": "This data set includes 721 Pokemon, including their number, name, first and second type, and basic stats: HP, Attack, Defense, Special Attack, Special Defense, and Speed. It has been of great use when teaching statistics to kids. With certain types you can also give a geeky introduction to machine learning.\n\nThis are the raw attributes that are used for calculating how much damage an attack will do in the games. This dataset is about the pokemon games (*NOT* pokemon cards or Pokemon Go).\n\nThe data as described by [Myles O'Neill](https://www.kaggle.com/mylesoneill) is:\n\n- **#**: ID for each pokemon\n- **Name**: Name of each pokemon\n- **Type 1**: Each pokemon has a type, this determines weakness/resistance to attacks\n- **Type 2**: Some pokemon are dual type and have 2\n- **Total**: sum of all stats that come after this, a general guide to how strong a pokemon is\n- **HP**: hit points, or health, defines how much damage a pokemon can withstand before fainting\n- **Attack**: the base modifier for normal attacks (eg. Scratch, Punch)\n- **Defense**: the base damage resistance against normal attacks\n- **SP Atk**: special attack, the base modifier for special attacks (e.g. fire blast, bubble beam)\n- **SP Def**: the base damage resistance against special attacks\n- **Speed**: determines which pokemon attacks first each round\n\nThe data for this table has been acquired from several different sites, including: \n\n - [pokemon.com](http://www.pokemon.com/us/pokedex/)\n - [pokemondb](http://pokemondb.net/pokedex)\n - [bulbapedia](http://bulbapedia.bulbagarden.net/wiki/List_of_Pok%C3%A9mon_by_National_Pok%C3%A9dex_number)\n\nOne question has been answered with this database: The type of a pokemon cannot be inferred only by it's Attack and Deffence. It would be worthy to find which two variables can define the type of a pokemon, if any. Two variables can be plotted in a 2D space, and used as an example for machine learning. This could mean the creation of a visual example any geeky Machine Learning class would love.", "VersionNotes": "Thanks to [ChiragJhamb](https://www.kaggle.com/aspredicted) this new version contains the number of the generation (as an integer) each pokemon belongs to ( \"**Generation**\" ) and whether the pokemon is legendary or not ( \"**Legendary**\" ) as a boolean value.", "TotalCompressedBytes": 44028.0, "TotalUncompressedBytes": 44028.0}]
|
[{"Id": 121, "CreatorUserId": 693375, "OwnerUserId": 693375.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 280.0, "CurrentDatasourceVersionId": 280.0, "ForumId": 1440, "Type": 2, "CreationDate": "08/22/2016 22:44:53", "LastActivityDate": "02/06/2018", "TotalViews": 679736, "TotalDownloads": 106556, "TotalVotes": 2409, "TotalKernels": 1275}]
|
[{"Id": 693375, "UserName": "abcsds", "DisplayName": "Alberto Barradas", "RegisterDate": "08/22/2016", "PerformanceTier": 0}]
|
import pandas as pd
# pandas serileri ve pandas dataframe veri tipleri kullanılır
# pandas serileri
import pandas as pd
a = [1, 2, 3, 4]
my_series = pd.Series(a)
print(my_series)
my_series[0]
a = [1, 2, 3, 4]
my_series = pd.Series(a, index=["x", "z", "y", "t"])
my_series
my_series["x"]
my_dataset = {"cars": ["BMV", "Mercedes", "Audi"], "price": [123, 456, 789]}
my_df = pd.DataFrame(my_dataset)
my_df
my_df.loc[0]
my_df.loc[1]
my_df.loc[2]
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data)
df
df.loc[0:2]
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data, index=("gün1", "gün2", "gün3"))
print(df)
data = {"calories": [420, 380, 390], "duration": [50, 40, 45]}
df = pd.DataFrame(data)
df["calories"] # df['calories'][0 ] şeklinde de yapabiliriz
df.calories
df.iloc[:, 0:2]
import pandas as pd
df = pd.read_csv("../input/userdata/user.csv")
df
# import os
# dir_path = os.path.dirname(os.path.realpath("__file__"))
# df = pd.read_cvs(dirpath +"/user.cvs")
# başka yerde buynu da yazmamız lazım
import pandas as pd
df = pd.read_csv("../input/yenidata/yenidata.csv")
df
import pandas as pd
df = pd.read_csv("../input/dataset3/data.csv")
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df.dropna(inplace=True)
df.head()
df["Duration"][0] = None
df
df.fillna(0, inplace=True)
df
df.columns
df["Duration"].head(5)
df["Duration"].std()
df["Duration"].mean()
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["Maxpulse"][4] = None
x = df["Maxpulse"].mean()
df["Maxpulse"].fillna(x, inplace=True)
df
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["Duration"].head(4)
df["Duration"] = pd.to_numeric(
df["Duration"], downcast="float"
) # parametre vermezse stringe çevirmeye çalışıyor
df["Duration"].head(4)
df["Duration"] = pd.to_numeric(df["Duration"], downcast="")
df["Duration"].head(4) # vermedi defoult değeri
df.loc[7, "Duration"] = 1234
df.head(8)
df["Duration"].value_counts()
df.index
import numpy as np
for i in df.index:
if df.loc[i, "Duration"] < 120:
df.loc[i, "Duration"] = np.random.randint(120, 200)
df.head(10)
import numpy as np
for j in df.columns:
for i in df.index:
if df.loc[i, j] < 120:
df.loc[i, j] = np.random.randint(120, 200)
df.head(20)
for i in df.index:
if df.loc[1, "Duration"] < 10:
df.drop(1, inplace=True)
df.head
df.drop("Duraction", axis=1, inplace=True)
df.head
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
x = df.drop("Maxpulse", axis=1)
y = df["Maxpulse"]
y
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df
df["Calories"] > 400
x = df["Calories"] > 1000
print(len(df[x]))
print(df[x])
kosul = (df["Calories"] > 400) & (df["Maxpulse"] > 125)
df[kosul]
np.logical_and(df["Calories"] > 400, df["Maxpulse"] > 400)
import pandas as pd
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
data1 = df.head()
data2 = df.tail()
concat_data_row = pd.concat([data1, data2], axis=0)
concat_data_row
df = pd.read_csv("../input/eitim-veri-seti/data.csv")
df["kalori_seviyesi"] = ["high" if i > 500 else "low" for i in df["Calories"]]
df
df["ort_pulse"] = (df["Pulse"] + df["Maxipulse"]) / 2
print(df)
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df.head()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts()
df[df["Type 1"] == "Electric"]["HP"].mean()
df["Type 1"].unique()
df["Type 1"].nunique()
df = pd.read_csv("../input/pokemon/Pokemon.csv")
a = df["Type 1"].value_counts()
a.head(3)
df = pd.read_csv("../input/pokemon/Pokemon.csv")
df["Type 1"].value_counts().head(3)
new_df = df[df["Type 1"] == "Electric"]
print(new_df)
print(df)
|
[{"pokemon/Pokemon.csv": {"column_names": "[\"#\", \"Name\", \"Type 1\", \"Type 2\", \"Total\", \"HP\", \"Attack\", \"Defense\", \"Sp. Atk\", \"Sp. Def\", \"Speed\", \"Generation\", \"Legendary\"]", "column_data_types": "{\"#\": \"int64\", \"Name\": \"object\", \"Type 1\": \"object\", \"Type 2\": \"object\", \"Total\": \"int64\", \"HP\": \"int64\", \"Attack\": \"int64\", \"Defense\": \"int64\", \"Sp. Atk\": \"int64\", \"Sp. Def\": \"int64\", \"Speed\": \"int64\", \"Generation\": \"int64\", \"Legendary\": \"bool\"}", "info": "<class 'pandas.core.frame.DataFrame'>\nRangeIndex: 800 entries, 0 to 799\nData columns (total 13 columns):\n # Column Non-Null Count Dtype \n--- ------ -------------- ----- \n 0 # 800 non-null int64 \n 1 Name 800 non-null object\n 2 Type 1 800 non-null object\n 3 Type 2 414 non-null object\n 4 Total 800 non-null int64 \n 5 HP 800 non-null int64 \n 6 Attack 800 non-null int64 \n 7 Defense 800 non-null int64 \n 8 Sp. Atk 800 non-null int64 \n 9 Sp. Def 800 non-null int64 \n 10 Speed 800 non-null int64 \n 11 Generation 800 non-null int64 \n 12 Legendary 800 non-null bool \ndtypes: bool(1), int64(9), object(3)\nmemory usage: 75.9+ KB\n", "summary": "{\"#\": {\"count\": 800.0, \"mean\": 362.81375, \"std\": 208.34379756406662, \"min\": 1.0, \"25%\": 184.75, \"50%\": 364.5, \"75%\": 539.25, \"max\": 721.0}, \"Total\": {\"count\": 800.0, \"mean\": 435.1025, \"std\": 119.96303975551899, \"min\": 180.0, \"25%\": 330.0, \"50%\": 450.0, \"75%\": 515.0, \"max\": 780.0}, \"HP\": {\"count\": 800.0, \"mean\": 69.25875, \"std\": 25.53466903233207, \"min\": 1.0, \"25%\": 50.0, \"50%\": 65.0, \"75%\": 80.0, \"max\": 255.0}, \"Attack\": {\"count\": 800.0, \"mean\": 79.00125, \"std\": 32.45736586949845, \"min\": 5.0, \"25%\": 55.0, \"50%\": 75.0, \"75%\": 100.0, \"max\": 190.0}, \"Defense\": {\"count\": 800.0, \"mean\": 73.8425, \"std\": 31.183500559332934, \"min\": 5.0, \"25%\": 50.0, \"50%\": 70.0, \"75%\": 90.0, \"max\": 230.0}, \"Sp. Atk\": {\"count\": 800.0, \"mean\": 72.82, \"std\": 32.7222941688016, \"min\": 10.0, \"25%\": 49.75, \"50%\": 65.0, \"75%\": 95.0, \"max\": 194.0}, \"Sp. Def\": {\"count\": 800.0, \"mean\": 71.9025, \"std\": 27.82891579711746, \"min\": 20.0, \"25%\": 50.0, \"50%\": 70.0, \"75%\": 90.0, \"max\": 230.0}, \"Speed\": {\"count\": 800.0, \"mean\": 68.2775, \"std\": 29.060473717161464, \"min\": 5.0, \"25%\": 45.0, \"50%\": 65.0, \"75%\": 90.0, \"max\": 180.0}, \"Generation\": {\"count\": 800.0, \"mean\": 3.32375, \"std\": 1.6612904004849451, \"min\": 1.0, \"25%\": 2.0, \"50%\": 3.0, \"75%\": 5.0, \"max\": 6.0}}", "examples": "{\"#\":{\"0\":1,\"1\":2,\"2\":3,\"3\":3},\"Name\":{\"0\":\"Bulbasaur\",\"1\":\"Ivysaur\",\"2\":\"Venusaur\",\"3\":\"VenusaurMega Venusaur\"},\"Type 1\":{\"0\":\"Grass\",\"1\":\"Grass\",\"2\":\"Grass\",\"3\":\"Grass\"},\"Type 2\":{\"0\":\"Poison\",\"1\":\"Poison\",\"2\":\"Poison\",\"3\":\"Poison\"},\"Total\":{\"0\":318,\"1\":405,\"2\":525,\"3\":625},\"HP\":{\"0\":45,\"1\":60,\"2\":80,\"3\":80},\"Attack\":{\"0\":49,\"1\":62,\"2\":82,\"3\":100},\"Defense\":{\"0\":49,\"1\":63,\"2\":83,\"3\":123},\"Sp. Atk\":{\"0\":65,\"1\":80,\"2\":100,\"3\":122},\"Sp. Def\":{\"0\":65,\"1\":80,\"2\":100,\"3\":120},\"Speed\":{\"0\":45,\"1\":60,\"2\":80,\"3\":80},\"Generation\":{\"0\":1,\"1\":1,\"2\":1,\"3\":1},\"Legendary\":{\"0\":false,\"1\":false,\"2\":false,\"3\":false}}"}}]
| true | 5 |
<start_data_description><data_path>pokemon/Pokemon.csv:
<column_names>
['#', 'Name', 'Type 1', 'Type 2', 'Total', 'HP', 'Attack', 'Defense', 'Sp. Atk', 'Sp. Def', 'Speed', 'Generation', 'Legendary']
<column_types>
{'#': 'int64', 'Name': 'object', 'Type 1': 'object', 'Type 2': 'object', 'Total': 'int64', 'HP': 'int64', 'Attack': 'int64', 'Defense': 'int64', 'Sp. Atk': 'int64', 'Sp. Def': 'int64', 'Speed': 'int64', 'Generation': 'int64', 'Legendary': 'bool'}
<dataframe_Summary>
{'#': {'count': 800.0, 'mean': 362.81375, 'std': 208.34379756406662, 'min': 1.0, '25%': 184.75, '50%': 364.5, '75%': 539.25, 'max': 721.0}, 'Total': {'count': 800.0, 'mean': 435.1025, 'std': 119.96303975551899, 'min': 180.0, '25%': 330.0, '50%': 450.0, '75%': 515.0, 'max': 780.0}, 'HP': {'count': 800.0, 'mean': 69.25875, 'std': 25.53466903233207, 'min': 1.0, '25%': 50.0, '50%': 65.0, '75%': 80.0, 'max': 255.0}, 'Attack': {'count': 800.0, 'mean': 79.00125, 'std': 32.45736586949845, 'min': 5.0, '25%': 55.0, '50%': 75.0, '75%': 100.0, 'max': 190.0}, 'Defense': {'count': 800.0, 'mean': 73.8425, 'std': 31.183500559332934, 'min': 5.0, '25%': 50.0, '50%': 70.0, '75%': 90.0, 'max': 230.0}, 'Sp. Atk': {'count': 800.0, 'mean': 72.82, 'std': 32.7222941688016, 'min': 10.0, '25%': 49.75, '50%': 65.0, '75%': 95.0, 'max': 194.0}, 'Sp. Def': {'count': 800.0, 'mean': 71.9025, 'std': 27.82891579711746, 'min': 20.0, '25%': 50.0, '50%': 70.0, '75%': 90.0, 'max': 230.0}, 'Speed': {'count': 800.0, 'mean': 68.2775, 'std': 29.060473717161464, 'min': 5.0, '25%': 45.0, '50%': 65.0, '75%': 90.0, 'max': 180.0}, 'Generation': {'count': 800.0, 'mean': 3.32375, 'std': 1.6612904004849451, 'min': 1.0, '25%': 2.0, '50%': 3.0, '75%': 5.0, 'max': 6.0}}
<dataframe_info>
RangeIndex: 800 entries, 0 to 799
Data columns (total 13 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 # 800 non-null int64
1 Name 800 non-null object
2 Type 1 800 non-null object
3 Type 2 414 non-null object
4 Total 800 non-null int64
5 HP 800 non-null int64
6 Attack 800 non-null int64
7 Defense 800 non-null int64
8 Sp. Atk 800 non-null int64
9 Sp. Def 800 non-null int64
10 Speed 800 non-null int64
11 Generation 800 non-null int64
12 Legendary 800 non-null bool
dtypes: bool(1), int64(9), object(3)
memory usage: 75.9+ KB
<some_examples>
{'#': {'0': 1, '1': 2, '2': 3, '3': 3}, 'Name': {'0': 'Bulbasaur', '1': 'Ivysaur', '2': 'Venusaur', '3': 'VenusaurMega Venusaur'}, 'Type 1': {'0': 'Grass', '1': 'Grass', '2': 'Grass', '3': 'Grass'}, 'Type 2': {'0': 'Poison', '1': 'Poison', '2': 'Poison', '3': 'Poison'}, 'Total': {'0': 318, '1': 405, '2': 525, '3': 625}, 'HP': {'0': 45, '1': 60, '2': 80, '3': 80}, 'Attack': {'0': 49, '1': 62, '2': 82, '3': 100}, 'Defense': {'0': 49, '1': 63, '2': 83, '3': 123}, 'Sp. Atk': {'0': 65, '1': 80, '2': 100, '3': 122}, 'Sp. Def': {'0': 65, '1': 80, '2': 100, '3': 120}, 'Speed': {'0': 45, '1': 60, '2': 80, '3': 80}, 'Generation': {'0': 1, '1': 1, '2': 1, '3': 1}, 'Legendary': {'0': False, '1': False, '2': False, '3': False}}
<end_description>
| 1,670 | 0 | 3,013 | 1,670 |
129003546
|
<jupyter_start><jupyter_text>Netflix Movies and TV Shows
### Other Platform's Datasets (Click on the logos to view)
>
[![alt text][1]][2] [![alt text][3]][4] [![alt text][5]][6] [![alt text][7]][8]
[1]: https://i.imgur.com/As0PMcL.jpg =75x20
[2]: https://www.kaggle.com/shivamb/netflix-shows
[3]: https://i.imgur.com/r5t3MpQ.jpg =75x20
[4]: https://www.kaggle.com/shivamb/amazon-prime-movies-and-tv-shows
[5]: https://i.imgur.com/4a4ZMuy.png =75x30
[6]: https://www.kaggle.com/shivamb/disney-movies-and-tv-shows
[7]: https://i.imgur.com/nCL8Skc.png?1 =75x32
[8]: https://www.kaggle.com/shivamb/hulu-movies-and-tv-shows
- [Amazon Prime Video Movies and TV Shows](https://www.kaggle.com/shivamb/amazon-prime-movies-and-tv-shows)
- [Disney+ Movies and TV Shows](https://www.kaggle.com/shivamb/disney-movies-and-tv-shows)
- [Netflix Prime Video Movies and TV Shows](https://www.kaggle.com/shivamb/netflix-shows)
- [Hulu Movies and TV Shows](https://www.kaggle.com/shivamb/hulu-movies-and-tv-shows)
### Netflix Movies and TV Shows
> **About this Dataset:** *[Netflix](https://en.wikipedia.org/wiki/Netflix) is one of the most popular media and video streaming platforms. They have over 8000 movies or tv shows available on their platform, as of mid-2021, they have over 200M Subscribers globally. This tabular dataset consists of listings of all the movies and tv shows available on Netflix, along with details such as - cast, directors, ratings, release year, duration, etc.*
Featured Notebooks: [Click Here to View Featured Notebooks](https://www.kaggle.com/shivamb/netflix-shows/discussion/279376)
Milestone: Oct 18th, 2021: [Most Upvoted Dataset on Kaggle by an Individual Contributor](https://www.kaggle.com/shivamb/netflix-shows/discussion/279377)
### Interesting Task Ideas
> 1. Understanding what content is available in different countries
> 2. Identifying similar content by matching text-based features
> 3. Network analysis of Actors / Directors and find interesting insights
> 4. Does Netflix has more focus on TV Shows than movies in recent years.
[Check my Other Datasets](https://www.kaggle.com/shivamb/datasets)
Kaggle dataset identifier: netflix-shows
<jupyter_script># # Netflix Data set
# ## Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("//kaggle//input//netflix-shows//netflix_titles.csv")
data
data.dtypes
# # 1. Finding the Null values
data.head(2)
data.isnull().sum()
data.dropna(inplace=True)
data.isnull().sum()
# # 2. How many TV show and Movies are there
data.head(2)
types = data["type"].value_counts()
types.to_frame()
# ## Ploting TV Show and Movies are there
x_values, y_values = types.values, types.index
plt.figure(figsize=(7, 5))
sns.barplot(data=types, x=x_values, y=y_values)
plt.xlabel("TV Show Movies")
# # 3. Top 10 Director
data.head(2)
director = data["director"].value_counts().head(10)
director.to_frame()
# ## Ploting top 10 director
x_values = director.values
y_values = director.index
plt.figure(figsize=(7, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("director")
# # 4. The release year
data.head(2)
data.dtypes
release_year = data["release_year"]
release_year.to_frame()
# ## Ploting
x_values = release_year.values
plt.figure(figsize=(10, 5))
sns.countplot(x=x_values)
plt.xlabel("year")
plt.xticks(rotation=90)
# # 5. How many rating are there
data.head(2)
rating = data["rating"]
rating.to_frame()
# ## Ploting
x_values = rating.values
y_values = rating.index
plt.figure(figsize=(10, 5))
sns.countplot(x=x_values)
plt.xlabel("year")
plt.xticks(rotation=90)
# # 6. Top 10 Movies produing Countries
data.head(2)
country = data["country"].value_counts().head(10)
country.to_frame()
# ## Ploting
x_values = country.values
y_values = country.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Country")
plt.xticks(rotation=90)
# # 7. Top 10 cast
data.head(2)
cast = data["cast"].value_counts().head(10)
cast.to_frame()
# ## Ploting
x_values = cast.values
y_values = cast.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.xlabel("Number of Cast")
plt.ylabel("Cast")
plt.xticks(rotation=90)
# # 8. How many Months of Moives or TV Shows?
data.head(2)
data["date_added"] = pd.to_datetime(data["date_added"])
data.dtypes
data["date_added"] = data["date_added"].dt.month
month = data["date_added"].value_counts()
month.to_frame()
# ## Ploting
x_values = month.values
y_values = month.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Number of Months")
plt.xlabel("Number of Movies or TV Shows")
plt.xticks(rotation=90)
# # 9. Top 10 duration
data.head(2)
duration = data["duration"].value_counts().head(10)
duration.to_frame()
# # Ploting
x_values = duration.values
y_values = duration.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Top 10 duration")
plt.xlabel("Number of Minutes")
plt.xticks(rotation=90)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/003/129003546.ipynb
|
netflix-shows
|
shivamb
|
[{"Id": 129003546, "ScriptId": 38267872, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14633433, "CreationDate": "05/10/2023 08:42:34", "VersionNumber": 1.0, "Title": "Netflix_Data_set", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 158.0, "LinesInsertedFromPrevious": 158.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 184678126, "KernelVersionId": 129003546, "SourceDatasetVersionId": 2654038}]
|
[{"Id": 2654038, "DatasetId": 434238, "DatasourceVersionId": 2698094, "CreatorUserId": 1571785, "LicenseName": "CC0: Public Domain", "CreationDate": "09/27/2021 04:44:36", "VersionNumber": 5.0, "Title": "Netflix Movies and TV Shows", "Slug": "netflix-shows", "Subtitle": "Listings of movies and tv shows on Netflix - Regularly Updated", "Description": "### Other Platform's Datasets (Click on the logos to view) \n\n> \n[![alt text][1]][2] [![alt text][3]][4] [![alt text][5]][6] [![alt text][7]][8]\n[1]: https://i.imgur.com/As0PMcL.jpg =75x20\n[2]: https://www.kaggle.com/shivamb/netflix-shows\n[3]: https://i.imgur.com/r5t3MpQ.jpg =75x20\n[4]: https://www.kaggle.com/shivamb/amazon-prime-movies-and-tv-shows\n[5]: https://i.imgur.com/4a4ZMuy.png =75x30\n[6]: https://www.kaggle.com/shivamb/disney-movies-and-tv-shows\n[7]: https://i.imgur.com/nCL8Skc.png?1 =75x32\n[8]: https://www.kaggle.com/shivamb/hulu-movies-and-tv-shows\n\n- [Amazon Prime Video Movies and TV Shows](https://www.kaggle.com/shivamb/amazon-prime-movies-and-tv-shows)\n- [Disney+ Movies and TV Shows](https://www.kaggle.com/shivamb/disney-movies-and-tv-shows)\n- [Netflix Prime Video Movies and TV Shows](https://www.kaggle.com/shivamb/netflix-shows)\n- [Hulu Movies and TV Shows](https://www.kaggle.com/shivamb/hulu-movies-and-tv-shows)\n\n### Netflix Movies and TV Shows \n\n> **About this Dataset:** *[Netflix](https://en.wikipedia.org/wiki/Netflix) is one of the most popular media and video streaming platforms. They have over 8000 movies or tv shows available on their platform, as of mid-2021, they have over 200M Subscribers globally. This tabular dataset consists of listings of all the movies and tv shows available on Netflix, along with details such as - cast, directors, ratings, release year, duration, etc.* \n\nFeatured Notebooks: [Click Here to View Featured Notebooks](https://www.kaggle.com/shivamb/netflix-shows/discussion/279376)\nMilestone: Oct 18th, 2021: [Most Upvoted Dataset on Kaggle by an Individual Contributor](https://www.kaggle.com/shivamb/netflix-shows/discussion/279377)\n\n### Interesting Task Ideas \n\n> 1. Understanding what content is available in different countries\n> 2. Identifying similar content by matching text-based features \n> 3. Network analysis of Actors / Directors and find interesting insights \n> 4. Does Netflix has more focus on TV Shows than movies in recent years.\n\n[Check my Other Datasets](https://www.kaggle.com/shivamb/datasets)", "VersionNotes": "Data Update 2021/09/27", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 434238, "CreatorUserId": 1571785, "OwnerUserId": 1571785.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 2654038.0, "CurrentDatasourceVersionId": 2698094.0, "ForumId": 446914, "Type": 2, "CreationDate": "12/04/2019 05:57:54", "LastActivityDate": "12/04/2019", "TotalViews": 2438410, "TotalDownloads": 358670, "TotalVotes": 7671, "TotalKernels": 1385}]
|
[{"Id": 1571785, "UserName": "shivamb", "DisplayName": "Shivam Bansal", "RegisterDate": "01/22/2018", "PerformanceTier": 4}]
|
# # Netflix Data set
# ## Importing Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
data = pd.read_csv("//kaggle//input//netflix-shows//netflix_titles.csv")
data
data.dtypes
# # 1. Finding the Null values
data.head(2)
data.isnull().sum()
data.dropna(inplace=True)
data.isnull().sum()
# # 2. How many TV show and Movies are there
data.head(2)
types = data["type"].value_counts()
types.to_frame()
# ## Ploting TV Show and Movies are there
x_values, y_values = types.values, types.index
plt.figure(figsize=(7, 5))
sns.barplot(data=types, x=x_values, y=y_values)
plt.xlabel("TV Show Movies")
# # 3. Top 10 Director
data.head(2)
director = data["director"].value_counts().head(10)
director.to_frame()
# ## Ploting top 10 director
x_values = director.values
y_values = director.index
plt.figure(figsize=(7, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("director")
# # 4. The release year
data.head(2)
data.dtypes
release_year = data["release_year"]
release_year.to_frame()
# ## Ploting
x_values = release_year.values
plt.figure(figsize=(10, 5))
sns.countplot(x=x_values)
plt.xlabel("year")
plt.xticks(rotation=90)
# # 5. How many rating are there
data.head(2)
rating = data["rating"]
rating.to_frame()
# ## Ploting
x_values = rating.values
y_values = rating.index
plt.figure(figsize=(10, 5))
sns.countplot(x=x_values)
plt.xlabel("year")
plt.xticks(rotation=90)
# # 6. Top 10 Movies produing Countries
data.head(2)
country = data["country"].value_counts().head(10)
country.to_frame()
# ## Ploting
x_values = country.values
y_values = country.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Country")
plt.xticks(rotation=90)
# # 7. Top 10 cast
data.head(2)
cast = data["cast"].value_counts().head(10)
cast.to_frame()
# ## Ploting
x_values = cast.values
y_values = cast.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.xlabel("Number of Cast")
plt.ylabel("Cast")
plt.xticks(rotation=90)
# # 8. How many Months of Moives or TV Shows?
data.head(2)
data["date_added"] = pd.to_datetime(data["date_added"])
data.dtypes
data["date_added"] = data["date_added"].dt.month
month = data["date_added"].value_counts()
month.to_frame()
# ## Ploting
x_values = month.values
y_values = month.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Number of Months")
plt.xlabel("Number of Movies or TV Shows")
plt.xticks(rotation=90)
# # 9. Top 10 duration
data.head(2)
duration = data["duration"].value_counts().head(10)
duration.to_frame()
# # Ploting
x_values = duration.values
y_values = duration.index
plt.figure(figsize=(10, 5))
sns.barplot(x=x_values, y=y_values, palette="rainbow")
plt.ylabel("Top 10 duration")
plt.xlabel("Number of Minutes")
plt.xticks(rotation=90)
| false | 0 | 1,099 | 0 | 1,845 | 1,099 |
||
129179823
|
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
openai_api_key = user_secrets.get_secret("OPENAI_API_KEY")
# not required, comment this out if you don't need it
openai_organization = user_secrets.get_secret("OPENAI_ORG_ID")
from langchain import Wikipedia
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.agents.react.base import DocstoreExplorer
docstore = DocstoreExplorer(Wikipedia())
tools = [
Tool(
name="Search",
func=docstore.search,
description="useful for when you need to ask with search",
),
Tool(
name="Lookup",
func=docstore.lookup,
description="useful for when you need to ask with lookup",
),
]
llm = ChatOpenAI(
temperature=0,
model_name="gpt-3.5-turbo",
openai_api_key=openai_api_key,
openai_organization=openai_organization, # can comment this out if you don't need it
)
react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)
# sometimes you'll get an OutputParserException, but it should give the right answer
question = "Who directed the movie about Alexander Supertramp?"
react.run(question)
from wandb.integration.langchain import WandbTracer
# again, you may get an error, but the tracing will still work!
wandb_config = {"project": "wandb_prompts_react_demo"}
react.run(question, callbacks=[WandbTracer(wandb_config)])
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/179/129179823.ipynb
| null | null |
[{"Id": 129179823, "ScriptId": 38377515, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3046263, "CreationDate": "05/11/2023 15:21:15", "VersionNumber": 1.0, "Title": "W&B + Langchain", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 55.0, "LinesInsertedFromPrevious": 55.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 4}]
| null | null | null | null |
from kaggle_secrets import UserSecretsClient
user_secrets = UserSecretsClient()
openai_api_key = user_secrets.get_secret("OPENAI_API_KEY")
# not required, comment this out if you don't need it
openai_organization = user_secrets.get_secret("OPENAI_ORG_ID")
from langchain import Wikipedia
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.agents import initialize_agent, Tool
from langchain.agents import AgentType
from langchain.agents.react.base import DocstoreExplorer
docstore = DocstoreExplorer(Wikipedia())
tools = [
Tool(
name="Search",
func=docstore.search,
description="useful for when you need to ask with search",
),
Tool(
name="Lookup",
func=docstore.lookup,
description="useful for when you need to ask with lookup",
),
]
llm = ChatOpenAI(
temperature=0,
model_name="gpt-3.5-turbo",
openai_api_key=openai_api_key,
openai_organization=openai_organization, # can comment this out if you don't need it
)
react = initialize_agent(tools, llm, agent=AgentType.REACT_DOCSTORE, verbose=True)
# sometimes you'll get an OutputParserException, but it should give the right answer
question = "Who directed the movie about Alexander Supertramp?"
react.run(question)
from wandb.integration.langchain import WandbTracer
# again, you may get an error, but the tracing will still work!
wandb_config = {"project": "wandb_prompts_react_demo"}
react.run(question, callbacks=[WandbTracer(wandb_config)])
| false | 0 | 435 | 4 | 435 | 435 |
||
129179663
|
<jupyter_start><jupyter_text>UNR-IDD Intrusion Detection Dataset
**This manuscript has been published in the 2023 IEEE Consumer Communications and Networking Conference.**
**License**
You may redistribute, republish, and mirror the UNR-IDD dataset in any form. However, any use or redistribution of the data must include a citation to the UNR-IDD dataset using the information provided:
@inproceedings{das2023unr,
title={UNR-IDD: Intrusion Detection Dataset using Network Port Statistics},
author={Das, Tapadhir and Hamdan, Osama Abu and Shukla, Raj Mani and Sengupta, Shamik and Arslan, Engin},
booktitle={2023 IEEE 20th Consumer Communications \& Networking Conference (CCNC)},
pages={497--500},
year={2023},
organization={IEEE}
**MORE COMPREHENSIVE INFORMATION CAN BE LOCATED AT THE DATASET WEBPAGE @ **
https://www.tapadhirdas.com/unr-idd-dataset
Kaggle dataset identifier: unridd-intrusion-detection-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
# # Exploratory Data Analysis (EDA)
df = pd.read_csv("/kaggle/input/unridd-intrusion-detection-dataset/UNR-IDD.csv")
df.info()
df.head()
df.tail()
df.describe()
df.apply(pd.Series.value_counts)
sns.countplot(data=df, x="Binary Label")
sns.countplot(x=df["Label"])
df["Binary Label"].unique()
df["Label"].unique()
df["Port Number"].unique()
df["Switch ID"].unique()
df["Label"].value_counts()
df.info()
df.hist(bins=50, figsize=(15, 15))
# # Data Pre-processing
# ['Attack', 'Normal']=[1,0]
df_a = df[df["Binary Label"] == "Attack"]
df_n = df[df["Binary Label"] == "Normal"]
df_a
print(df_n["Label"].nunique())
# no need to use df_n as it has nomal
df_n.info()
df_a = df_a.drop("Binary Label", axis=1)
df_a["Port Number"] = df_a["Port Number"].replace(
["Port#:1", "Port#:2", "Port#:3", "Port#:4"], [1, 2, 3, 4]
)
df_a["Switch ID"] = df_a["Switch ID"].replace(
[
"of:000000000000000c",
"of:000000000000000a",
"of:000000000000000b",
"of:0000000000000003",
"of:0000000000000004",
"of:0000000000000001",
"of:0000000000000002",
"of:0000000000000007",
"of:0000000000000008",
"of:0000000000000005",
"of:0000000000000006",
"of:0000000000000009",
],
[12, 10, 11, 3, 4, 1, 2, 7, 8, 5, 6, 9],
)
df_a["Label"] = df_a["Label"].replace(
["TCP-SYN", "Blackhole", "Diversion", "Overflow", "PortScan"], [0, 1, 2, 3, 4]
)
df_a.info()
df["Label"].value_counts().plot(kind="pie", autopct="%1.2f%%")
plt.title("Hacking Count")
plt.show()
plt.figure(figsize=(15, 15))
sns.heatmap(df_a.corr(), annot=True)
plt.show()
# pairplot for particular features
# plt_df = df_a[['Switch ID', 'Port Number', 'Received Packets', 'Received Bytes',
# 'Sent Bytes', 'Sent Packets', 'Port alive Duration (S)',
# 'Packets Rx Dropped', 'Packets Tx Dropped', 'Packets Rx Errors',
# 'Packets Tx Errors', 'Delta Received Packets', 'Delta Received Bytes',
# 'Delta Sent Bytes', 'Delta Sent Packets',
# 'Delta Port alive Duration (S)', 'Delta Packets Rx Dropped',
# ' Delta Packets Tx Dropped', 'Delta Packets Rx Errors',
# 'Delta Packets Tx Errors', 'Connection Point', 'Total Load/Rate',
# 'Total Load/Latest', 'Unknown Load/Rate', 'Unknown Load/Latest',
# 'Latest bytes counter', 'is_valid', 'Table ID', 'Active Flow Entries',
# 'Packets Looked Up', 'Packets Matched', 'Max Size', 'Label']]
# fig =sns.pairplot(data = plt_df,hue="Label",corner=True);
# fig.savefig("out.png")
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import warnings
warnings.filterwarnings("ignore")
df_a.info()
# Splitting the dataset into dependant and independant fetature
X = df_a.drop(
[
"Label",
"Packets Rx Dropped",
"Packets Tx Dropped",
"Packets Rx Errors",
"Packets Tx Errors",
"Delta Packets Rx Dropped",
" Delta Packets Tx Dropped",
"Delta Packets Rx Errors",
"Delta Packets Tx Errors",
"is_valid",
"Table ID",
"Max Size",
],
axis=1,
)
y = df_a["Label"]
y.unique()
X.shape, y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# ## Model Building & Training:
# Supervised machine learning is one of the most commonly used and successful types of machine learning. Supervised learning is used whenever we want to predict a certain outcome/label from a given set of features, and we have examples of features-label pairs. We build a machine learning model from these features-label pairs, which comprise our training set. Our goal is to make accurate predictions for new, never-before-seen data.
# There are two major types of supervised machine learning problems, called classification and regression. Our data set comes under regression problem, as the prediction of suicide rate is a continuous number, or a floating-point number in programming terms. The supervised machine learning models (regression) considered to train the dataset in this notebook are:
# 1. Logistic Regression
# 2. k-Nearest Neighbors
# 3. Support Vector Clasifier
# 4. Naive Bayes
# 5. Decision Tree
# 6. Random Forest
# 7. Gradient Boosting
# 8. Catboost
# 9. Xgboost
# 10. Multilayer Perceptrons
#
# The metrics considered to evaluate the model performance are Accuracy & F1 score.
# Creating holders to store the model performance results
from sklearn import metrics
ML_Model = []
accuracy = []
f1_score = []
recall = []
precision = []
# function to call for storing the results
def storeResults(model, a, b, c, d):
ML_Model.append(model)
accuracy.append(round(a, 3))
f1_score.append(round(b, 3))
recall.append(round(c, 3))
precision.append(round(d, 3))
def model_report(modelname, y_train, y_test, p_train, p_test):
# computing the accuracy, f1_score, Recall, precision of the model performance
# computing the classification report of the model
# storing the results. The below mentioned order of parameter passing is important
print("Model:{}\n".format(modelname))
acc_train = metrics.accuracy_score(y_train, p_train)
acc_test = metrics.accuracy_score(y_test, p_test)
print("Accuracy on training Data: {:.3f}".format(acc_train))
print("Accuracy on test Data: {:.3f}\n".format(acc_test))
f1_score_train = metrics.f1_score(y_train, p_train, average="micro")
f1_score_test = metrics.f1_score(y_test, p_test, average="micro")
print("f1_score on training Data: {:.3f}".format(f1_score_train))
print("f1_score on test Data: {:.3f}\n".format(f1_score_test))
recall_score_train = metrics.recall_score(y_train, p_train, average="micro")
recall_score_test = metrics.recall_score(y_test, p_test, average="micro")
print("Recall on training Data: {:.3f}".format(recall_score_train))
print("Recall on test Data: {:.3f}\n".format(recall_score_test))
precision_score_train = metrics.precision_score(y_train, p_train, average="micro")
precision_score_test = metrics.precision_score(y_test, p_test, average="micro")
print("Precision on training Data: {:.3f}".format(precision_score_train))
print("Precision on test Data: {:.3f}\n".format(precision_score_test))
# computing the classification report of the model
print("Classification Report")
print(metrics.classification_report(y_test, p_test))
# storing the results
storeResults(
modelname, acc_test, f1_score_test, recall_score_test, precision_score_test
)
# ## Logistic Regression
# Logistic regression predicts the output of a categorical dependent variable. Therefore the outcome must be a categorical or discrete value. Logistic Regression is much similar to the Linear Regression except that how they are used. Linear Regression is used for solving Regression problems, whereas Logistic regression is used for solving the classification problems.
# Linear regression model
from sklearn.linear_model import LogisticRegression
# from sklearn.pipeline import Pipeline
# instantiate the model
log = LogisticRegression()
# fit the model
log.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_log = log.predict(X_train)
p_test_log = log.predict(X_test)
model_report(str(log), y_train, y_test, p_train_log, p_test_log)
# ## K-Nearest Neighbors : Classifier
# K-Nearest Neighbour is one of the simplest Machine Learning algorithms based on Supervised Learning technique. K-NN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most similar to the available categories.
# K-Nearest Neighbors Classifier model
from sklearn.neighbors import KNeighborsClassifier
# instantiate the model
knn = KNeighborsClassifier(n_neighbors=5)
# fit the model
knn.fit(X_train, y_train)
training_accuracy = []
test_accuracy = []
# try max_depth from 1 to 20
depth = range(1, 20)
for n in depth:
knn1 = KNeighborsClassifier(n_neighbors=n)
knn1.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(knn1.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(knn1.score(X_test, y_test))
# plotting the training & testing accuracy for n_estimators from 1 to 20
plt.plot(depth, training_accuracy, label="training accuracy")
plt.plot(depth, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
# predicting the target value from the model for the samples
p_train_knn = knn.predict(X_train)
p_test_knn = knn.predict(X_test)
model_report(str(knn), y_train, y_test, p_train_knn, p_test_knn)
# ## Naive Bayes : Classifier
# Naïve Bayes algorithm is a supervised learning algorithm, which is based on Bayes theorem and used for solving classification problems.It is mainly used in text, image classification that includes a high-dimensional training dataset. Naïve Bayes Classifier is one of the simple and most effective Classification algorithms which helps in building the fast machine learning models that can make quick predictions.
# Naive Bayes Classifier Model
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
# instantiate the model
nb = GaussianNB()
# fit the model
nb.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_nb = nb.predict(X_train)
p_test_nb = nb.predict(X_test)
model_report(str(nb), y_train, y_test, p_train_nb, p_test_nb)
# ## Decision Trees : Classifier
# Decision Tree is a Supervised learning technique that can be used for both classification and Regression problems, but mostly it is preferred for solving Classification problems. It is a tree-structured classifier, where internal nodes represent the features of a dataset, branches represent the decision rules and each leaf node represents the outcome.
# Decision Tree Classifier model
from sklearn.tree import DecisionTreeClassifier
# instantiate the model
tree = DecisionTreeClassifier(max_depth=30)
# fit the model
tree.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_tree = tree.predict(X_train)
p_test_tree = tree.predict(X_test)
model_report(str(tree), y_train, y_test, p_train_nb, p_test_tree)
training_accuracy = []
test_accuracy = []
# try max_depth from 1 to 30
depth = range(1, 30)
for n in depth:
tree_test = DecisionTreeClassifier(max_depth=n)
tree_test.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(tree_test.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(tree_test.score(X_test, y_test))
# plotting the training & testing accuracy for max_depth from 1 to 30
plt.plot(depth, training_accuracy, label="training accuracy")
plt.plot(depth, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("max_depth")
plt.legend()
# ## Random Forest : Classifier
# Random Forest is a popular machine learning algorithm that belongs to the supervised learning technique. It can be used for both Classification and Regression problems in ML. It is based on the concept of ensemble learning, which is a process of combining multiple classifiers to solve a complex problem and to improve the performance of the model.
# Random Forest Classifier Model
from sklearn.ensemble import RandomForestClassifier
# instantiate the model
forest = RandomForestClassifier(n_estimators=10)
# fit the model
forest.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_forest = forest.predict(X_train)
p_test_forest = forest.predict(X_test)
model_report(str(forest), y_train, y_test, p_train_nb, p_test_forest)
# ## Gradient Boosting Classifier
# Gradient boosting classifiers are a group of machine learning algorithms that combine many weak learning models together to create a strong predictive model. Decision trees are usually used when doing gradient boosting. Boosting algorithms play a crucial role in dealing with bias variance trade-off. Unlike bagging algorithms, which only controls for high variance in a model, boosting controls both the aspects (bias & variance), and is considered to be more effective.
# Gradient Boosting Classifier Model
from sklearn.ensemble import GradientBoostingClassifier
# instantiate the model
gbc = GradientBoostingClassifier(max_depth=4, learning_rate=0.7)
# fit the model
gbc.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_gbc = gbc.predict(X_train)
p_test_gbc = gbc.predict(X_test)
model_report(str(gbc), y_train, y_test, p_train_nb, p_test_gbc)
# ## CatBoost Classifier
# CatBoost is a recently open-sourced machine learning algorithm from Yandex. It can easily integrate with deep learning frameworks like Google’s TensorFlow and Apple’s Core ML. It can work with diverse data types to help solve a wide range of problems that businesses face today.
# catboost Classifier Model
from catboost import CatBoostClassifier
# instantiate the model
cat = CatBoostClassifier(learning_rate=0.1)
# fit the model
cat.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_cat = cat.predict(X_train)
p_test_cat = cat.predict(X_test)
model_report(str(cat), y_train, y_test, p_train_cat, p_test_cat)
# ## XGBoost Classifier
# XGBoost is an implementation of gradient boosted decision trees designed for speed and performance that is dominative competitive machine learning. In this post you will discover how you can install and create your first XGBoost model in Python
# XGBoost Classifier Model
from xgboost import XGBClassifier
# instantiate the model
xgb = XGBClassifier()
# fit the model
xgb.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_xgb = xgb.predict(X_train)
p_test_xgb = xgb.predict(X_test)
model_report(str(xgb), y_train, y_test, p_train_xgb, p_test_xgb)
# ## Multi-layer Perceptron classifier
# MLPClassifier stands for Multi-layer Perceptron classifier which in the name itself connects to a Neural Network. Unlike other classification algorithms such as Support Vectors or Naive Bayes Classifier, MLPClassifier relies on an underlying Neural Network to perform the task of classification.
#
# Multi-layer Perceptron Classifier Model
from sklearn.neural_network import MLPClassifier
# instantiate the model
mlp = MLPClassifier()
# mlp = GridSearchCV(mlpc, parameter_space)
# fit the model
mlp.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_mlp = mlp.predict(X_train)
p_test_mlp = mlp.predict(X_test)
model_report(str(mlp), y_train, y_test, p_train_mlp, p_test_mlp)
# ## Comparision of Models
# To compare the models performance, a dataframe is created. The columns of this dataframe are the lists created to store the results of the model.
# creating dataframe
result = pd.DataFrame(
{
"ML Model": ML_Model,
"Accuracy": accuracy,
"f1_score": f1_score,
"Recall": recall,
"Precision": precision,
}
)
# Sorting the datafram on accuracy
sorted_result = result.sort_values(
by=["Accuracy", "f1_score"], ascending=False
).reset_index(drop=True)
sorted_result
# ## Storing High Score Model
## high_score_model ---> XGBoost Classifier Model
import pickle
high_score_model = XGBClassifier()
high_score_model.fit(X_train, y_train)
# dump information to that file
# pickle.dump(high_score_model, open('pickle/model.pkl', 'wb'))
pickle.dump(high_score_model, open("model.pkl", "wb"))
# checking the feature improtance in the model
plt.figure(figsize=(9, 7))
n_features = X_train.shape[1]
plt.barh(range(n_features), gbc.feature_importances_, align="center")
plt.yticks(np.arange(n_features), X_train.columns)
plt.title("Feature importances using permutation on full model")
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/179/129179663.ipynb
|
unridd-intrusion-detection-dataset
|
tapadhirdas
|
[{"Id": 129179663, "ScriptId": 38404310, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2871082, "CreationDate": "05/11/2023 15:19:56", "VersionNumber": 1.0, "Title": "Intrusion Detection || Model Comparison", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 451.0, "LinesInsertedFromPrevious": 451.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 6}]
|
[{"Id": 184997883, "KernelVersionId": 129179663, "SourceDatasetVersionId": 3727401}]
|
[{"Id": 3727401, "DatasetId": 2228769, "DatasourceVersionId": 3781766, "CreatorUserId": 5609278, "LicenseName": "CC BY-SA 4.0", "CreationDate": "05/31/2022 22:38:43", "VersionNumber": 1.0, "Title": "UNR-IDD Intrusion Detection Dataset", "Slug": "unridd-intrusion-detection-dataset", "Subtitle": "Network Intrusion Detection Dataset using Network Port Statistics", "Description": "**This manuscript has been published in the 2023 IEEE Consumer Communications and Networking Conference.**\n\n**License**\nYou may redistribute, republish, and mirror the UNR-IDD dataset in any form. However, any use or redistribution of the data must include a citation to the UNR-IDD dataset using the information provided: \n\n@inproceedings{das2023unr,\n title={UNR-IDD: Intrusion Detection Dataset using Network Port Statistics},\n author={Das, Tapadhir and Hamdan, Osama Abu and Shukla, Raj Mani and Sengupta, Shamik and Arslan, Engin},\n booktitle={2023 IEEE 20th Consumer Communications \\& Networking Conference (CCNC)},\n pages={497--500},\n year={2023},\n organization={IEEE}\n\n**MORE COMPREHENSIVE INFORMATION CAN BE LOCATED AT THE DATASET WEBPAGE @ ** \nhttps://www.tapadhirdas.com/unr-idd-dataset", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2228769, "CreatorUserId": 5609278, "OwnerUserId": 5609278.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5795572.0, "CurrentDatasourceVersionId": 5872258.0, "ForumId": 2255020, "Type": 2, "CreationDate": "05/31/2022 22:38:43", "LastActivityDate": "05/31/2022", "TotalViews": 9567, "TotalDownloads": 1084, "TotalVotes": 22, "TotalKernels": 8}]
|
[{"Id": 5609278, "UserName": "tapadhirdas", "DisplayName": "Tapadhir Das", "RegisterDate": "08/11/2020", "PerformanceTier": 0}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
import warnings
warnings.filterwarnings("ignore")
# # Exploratory Data Analysis (EDA)
df = pd.read_csv("/kaggle/input/unridd-intrusion-detection-dataset/UNR-IDD.csv")
df.info()
df.head()
df.tail()
df.describe()
df.apply(pd.Series.value_counts)
sns.countplot(data=df, x="Binary Label")
sns.countplot(x=df["Label"])
df["Binary Label"].unique()
df["Label"].unique()
df["Port Number"].unique()
df["Switch ID"].unique()
df["Label"].value_counts()
df.info()
df.hist(bins=50, figsize=(15, 15))
# # Data Pre-processing
# ['Attack', 'Normal']=[1,0]
df_a = df[df["Binary Label"] == "Attack"]
df_n = df[df["Binary Label"] == "Normal"]
df_a
print(df_n["Label"].nunique())
# no need to use df_n as it has nomal
df_n.info()
df_a = df_a.drop("Binary Label", axis=1)
df_a["Port Number"] = df_a["Port Number"].replace(
["Port#:1", "Port#:2", "Port#:3", "Port#:4"], [1, 2, 3, 4]
)
df_a["Switch ID"] = df_a["Switch ID"].replace(
[
"of:000000000000000c",
"of:000000000000000a",
"of:000000000000000b",
"of:0000000000000003",
"of:0000000000000004",
"of:0000000000000001",
"of:0000000000000002",
"of:0000000000000007",
"of:0000000000000008",
"of:0000000000000005",
"of:0000000000000006",
"of:0000000000000009",
],
[12, 10, 11, 3, 4, 1, 2, 7, 8, 5, 6, 9],
)
df_a["Label"] = df_a["Label"].replace(
["TCP-SYN", "Blackhole", "Diversion", "Overflow", "PortScan"], [0, 1, 2, 3, 4]
)
df_a.info()
df["Label"].value_counts().plot(kind="pie", autopct="%1.2f%%")
plt.title("Hacking Count")
plt.show()
plt.figure(figsize=(15, 15))
sns.heatmap(df_a.corr(), annot=True)
plt.show()
# pairplot for particular features
# plt_df = df_a[['Switch ID', 'Port Number', 'Received Packets', 'Received Bytes',
# 'Sent Bytes', 'Sent Packets', 'Port alive Duration (S)',
# 'Packets Rx Dropped', 'Packets Tx Dropped', 'Packets Rx Errors',
# 'Packets Tx Errors', 'Delta Received Packets', 'Delta Received Bytes',
# 'Delta Sent Bytes', 'Delta Sent Packets',
# 'Delta Port alive Duration (S)', 'Delta Packets Rx Dropped',
# ' Delta Packets Tx Dropped', 'Delta Packets Rx Errors',
# 'Delta Packets Tx Errors', 'Connection Point', 'Total Load/Rate',
# 'Total Load/Latest', 'Unknown Load/Rate', 'Unknown Load/Latest',
# 'Latest bytes counter', 'is_valid', 'Table ID', 'Active Flow Entries',
# 'Packets Looked Up', 'Packets Matched', 'Max Size', 'Label']]
# fig =sns.pairplot(data = plt_df,hue="Label",corner=True);
# fig.savefig("out.png")
from sklearn.model_selection import train_test_split
from sklearn import preprocessing
from sklearn.ensemble import RandomForestRegressor
from sklearn.pipeline import make_pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import warnings
warnings.filterwarnings("ignore")
df_a.info()
# Splitting the dataset into dependant and independant fetature
X = df_a.drop(
[
"Label",
"Packets Rx Dropped",
"Packets Tx Dropped",
"Packets Rx Errors",
"Packets Tx Errors",
"Delta Packets Rx Dropped",
" Delta Packets Tx Dropped",
"Delta Packets Rx Errors",
"Delta Packets Tx Errors",
"is_valid",
"Table ID",
"Max Size",
],
axis=1,
)
y = df_a["Label"]
y.unique()
X.shape, y.shape
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42
)
X_train.shape, y_train.shape, X_test.shape, y_test.shape
# ## Model Building & Training:
# Supervised machine learning is one of the most commonly used and successful types of machine learning. Supervised learning is used whenever we want to predict a certain outcome/label from a given set of features, and we have examples of features-label pairs. We build a machine learning model from these features-label pairs, which comprise our training set. Our goal is to make accurate predictions for new, never-before-seen data.
# There are two major types of supervised machine learning problems, called classification and regression. Our data set comes under regression problem, as the prediction of suicide rate is a continuous number, or a floating-point number in programming terms. The supervised machine learning models (regression) considered to train the dataset in this notebook are:
# 1. Logistic Regression
# 2. k-Nearest Neighbors
# 3. Support Vector Clasifier
# 4. Naive Bayes
# 5. Decision Tree
# 6. Random Forest
# 7. Gradient Boosting
# 8. Catboost
# 9. Xgboost
# 10. Multilayer Perceptrons
#
# The metrics considered to evaluate the model performance are Accuracy & F1 score.
# Creating holders to store the model performance results
from sklearn import metrics
ML_Model = []
accuracy = []
f1_score = []
recall = []
precision = []
# function to call for storing the results
def storeResults(model, a, b, c, d):
ML_Model.append(model)
accuracy.append(round(a, 3))
f1_score.append(round(b, 3))
recall.append(round(c, 3))
precision.append(round(d, 3))
def model_report(modelname, y_train, y_test, p_train, p_test):
# computing the accuracy, f1_score, Recall, precision of the model performance
# computing the classification report of the model
# storing the results. The below mentioned order of parameter passing is important
print("Model:{}\n".format(modelname))
acc_train = metrics.accuracy_score(y_train, p_train)
acc_test = metrics.accuracy_score(y_test, p_test)
print("Accuracy on training Data: {:.3f}".format(acc_train))
print("Accuracy on test Data: {:.3f}\n".format(acc_test))
f1_score_train = metrics.f1_score(y_train, p_train, average="micro")
f1_score_test = metrics.f1_score(y_test, p_test, average="micro")
print("f1_score on training Data: {:.3f}".format(f1_score_train))
print("f1_score on test Data: {:.3f}\n".format(f1_score_test))
recall_score_train = metrics.recall_score(y_train, p_train, average="micro")
recall_score_test = metrics.recall_score(y_test, p_test, average="micro")
print("Recall on training Data: {:.3f}".format(recall_score_train))
print("Recall on test Data: {:.3f}\n".format(recall_score_test))
precision_score_train = metrics.precision_score(y_train, p_train, average="micro")
precision_score_test = metrics.precision_score(y_test, p_test, average="micro")
print("Precision on training Data: {:.3f}".format(precision_score_train))
print("Precision on test Data: {:.3f}\n".format(precision_score_test))
# computing the classification report of the model
print("Classification Report")
print(metrics.classification_report(y_test, p_test))
# storing the results
storeResults(
modelname, acc_test, f1_score_test, recall_score_test, precision_score_test
)
# ## Logistic Regression
# Logistic regression predicts the output of a categorical dependent variable. Therefore the outcome must be a categorical or discrete value. Logistic Regression is much similar to the Linear Regression except that how they are used. Linear Regression is used for solving Regression problems, whereas Logistic regression is used for solving the classification problems.
# Linear regression model
from sklearn.linear_model import LogisticRegression
# from sklearn.pipeline import Pipeline
# instantiate the model
log = LogisticRegression()
# fit the model
log.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_log = log.predict(X_train)
p_test_log = log.predict(X_test)
model_report(str(log), y_train, y_test, p_train_log, p_test_log)
# ## K-Nearest Neighbors : Classifier
# K-Nearest Neighbour is one of the simplest Machine Learning algorithms based on Supervised Learning technique. K-NN algorithm assumes the similarity between the new case/data and available cases and put the new case into the category that is most similar to the available categories.
# K-Nearest Neighbors Classifier model
from sklearn.neighbors import KNeighborsClassifier
# instantiate the model
knn = KNeighborsClassifier(n_neighbors=5)
# fit the model
knn.fit(X_train, y_train)
training_accuracy = []
test_accuracy = []
# try max_depth from 1 to 20
depth = range(1, 20)
for n in depth:
knn1 = KNeighborsClassifier(n_neighbors=n)
knn1.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(knn1.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(knn1.score(X_test, y_test))
# plotting the training & testing accuracy for n_estimators from 1 to 20
plt.plot(depth, training_accuracy, label="training accuracy")
plt.plot(depth, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("n_neighbors")
plt.legend()
# predicting the target value from the model for the samples
p_train_knn = knn.predict(X_train)
p_test_knn = knn.predict(X_test)
model_report(str(knn), y_train, y_test, p_train_knn, p_test_knn)
# ## Naive Bayes : Classifier
# Naïve Bayes algorithm is a supervised learning algorithm, which is based on Bayes theorem and used for solving classification problems.It is mainly used in text, image classification that includes a high-dimensional training dataset. Naïve Bayes Classifier is one of the simple and most effective Classification algorithms which helps in building the fast machine learning models that can make quick predictions.
# Naive Bayes Classifier Model
from sklearn.naive_bayes import GaussianNB
from sklearn.pipeline import Pipeline
# instantiate the model
nb = GaussianNB()
# fit the model
nb.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_nb = nb.predict(X_train)
p_test_nb = nb.predict(X_test)
model_report(str(nb), y_train, y_test, p_train_nb, p_test_nb)
# ## Decision Trees : Classifier
# Decision Tree is a Supervised learning technique that can be used for both classification and Regression problems, but mostly it is preferred for solving Classification problems. It is a tree-structured classifier, where internal nodes represent the features of a dataset, branches represent the decision rules and each leaf node represents the outcome.
# Decision Tree Classifier model
from sklearn.tree import DecisionTreeClassifier
# instantiate the model
tree = DecisionTreeClassifier(max_depth=30)
# fit the model
tree.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_tree = tree.predict(X_train)
p_test_tree = tree.predict(X_test)
model_report(str(tree), y_train, y_test, p_train_nb, p_test_tree)
training_accuracy = []
test_accuracy = []
# try max_depth from 1 to 30
depth = range(1, 30)
for n in depth:
tree_test = DecisionTreeClassifier(max_depth=n)
tree_test.fit(X_train, y_train)
# record training set accuracy
training_accuracy.append(tree_test.score(X_train, y_train))
# record generalization accuracy
test_accuracy.append(tree_test.score(X_test, y_test))
# plotting the training & testing accuracy for max_depth from 1 to 30
plt.plot(depth, training_accuracy, label="training accuracy")
plt.plot(depth, test_accuracy, label="test accuracy")
plt.ylabel("Accuracy")
plt.xlabel("max_depth")
plt.legend()
# ## Random Forest : Classifier
# Random Forest is a popular machine learning algorithm that belongs to the supervised learning technique. It can be used for both Classification and Regression problems in ML. It is based on the concept of ensemble learning, which is a process of combining multiple classifiers to solve a complex problem and to improve the performance of the model.
# Random Forest Classifier Model
from sklearn.ensemble import RandomForestClassifier
# instantiate the model
forest = RandomForestClassifier(n_estimators=10)
# fit the model
forest.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_forest = forest.predict(X_train)
p_test_forest = forest.predict(X_test)
model_report(str(forest), y_train, y_test, p_train_nb, p_test_forest)
# ## Gradient Boosting Classifier
# Gradient boosting classifiers are a group of machine learning algorithms that combine many weak learning models together to create a strong predictive model. Decision trees are usually used when doing gradient boosting. Boosting algorithms play a crucial role in dealing with bias variance trade-off. Unlike bagging algorithms, which only controls for high variance in a model, boosting controls both the aspects (bias & variance), and is considered to be more effective.
# Gradient Boosting Classifier Model
from sklearn.ensemble import GradientBoostingClassifier
# instantiate the model
gbc = GradientBoostingClassifier(max_depth=4, learning_rate=0.7)
# fit the model
gbc.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_gbc = gbc.predict(X_train)
p_test_gbc = gbc.predict(X_test)
model_report(str(gbc), y_train, y_test, p_train_nb, p_test_gbc)
# ## CatBoost Classifier
# CatBoost is a recently open-sourced machine learning algorithm from Yandex. It can easily integrate with deep learning frameworks like Google’s TensorFlow and Apple’s Core ML. It can work with diverse data types to help solve a wide range of problems that businesses face today.
# catboost Classifier Model
from catboost import CatBoostClassifier
# instantiate the model
cat = CatBoostClassifier(learning_rate=0.1)
# fit the model
cat.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_cat = cat.predict(X_train)
p_test_cat = cat.predict(X_test)
model_report(str(cat), y_train, y_test, p_train_cat, p_test_cat)
# ## XGBoost Classifier
# XGBoost is an implementation of gradient boosted decision trees designed for speed and performance that is dominative competitive machine learning. In this post you will discover how you can install and create your first XGBoost model in Python
# XGBoost Classifier Model
from xgboost import XGBClassifier
# instantiate the model
xgb = XGBClassifier()
# fit the model
xgb.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_xgb = xgb.predict(X_train)
p_test_xgb = xgb.predict(X_test)
model_report(str(xgb), y_train, y_test, p_train_xgb, p_test_xgb)
# ## Multi-layer Perceptron classifier
# MLPClassifier stands for Multi-layer Perceptron classifier which in the name itself connects to a Neural Network. Unlike other classification algorithms such as Support Vectors or Naive Bayes Classifier, MLPClassifier relies on an underlying Neural Network to perform the task of classification.
#
# Multi-layer Perceptron Classifier Model
from sklearn.neural_network import MLPClassifier
# instantiate the model
mlp = MLPClassifier()
# mlp = GridSearchCV(mlpc, parameter_space)
# fit the model
mlp.fit(X_train, y_train)
# predicting the target value from the model for the samples
p_train_mlp = mlp.predict(X_train)
p_test_mlp = mlp.predict(X_test)
model_report(str(mlp), y_train, y_test, p_train_mlp, p_test_mlp)
# ## Comparision of Models
# To compare the models performance, a dataframe is created. The columns of this dataframe are the lists created to store the results of the model.
# creating dataframe
result = pd.DataFrame(
{
"ML Model": ML_Model,
"Accuracy": accuracy,
"f1_score": f1_score,
"Recall": recall,
"Precision": precision,
}
)
# Sorting the datafram on accuracy
sorted_result = result.sort_values(
by=["Accuracy", "f1_score"], ascending=False
).reset_index(drop=True)
sorted_result
# ## Storing High Score Model
## high_score_model ---> XGBoost Classifier Model
import pickle
high_score_model = XGBClassifier()
high_score_model.fit(X_train, y_train)
# dump information to that file
# pickle.dump(high_score_model, open('pickle/model.pkl', 'wb'))
pickle.dump(high_score_model, open("model.pkl", "wb"))
# checking the feature improtance in the model
plt.figure(figsize=(9, 7))
n_features = X_train.shape[1]
plt.barh(range(n_features), gbc.feature_importances_, align="center")
plt.yticks(np.arange(n_features), X_train.columns)
plt.title("Feature importances using permutation on full model")
plt.xlabel("Feature importance")
plt.ylabel("Feature")
plt.show()
| false | 1 | 4,997 | 6 | 5,274 | 4,997 |
||
129009262
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # 🚀Spaceship Titanic -📊EDA + 27 different models📈
# installing liabraries
from IPython.display import clear_output
clear_output()
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, train_test_split
# from lightgbm import LGBMClassifier
# import lazypredict
# from lazypredict.Supervised import LazyClassifier
import time
import warnings
warnings.filterwarnings("ignore")
# ## data loading
train = pd.read_csv("../input/spaceship-titanic/train.csv")
test = pd.read_csv("../input/spaceship-titanic/test.csv")
submission = pd.read_csv("../input/spaceship-titanic/sample_submission.csv")
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = "median"
# # EDA
train.head()
# data info 및 결측치 확인
print(f"\033[94mNumber of rows in train data: {train.shape[0]}")
print(f"\033[94mNumber of columns in train data: {train.shape[1]}")
print(f"\033[94mNumber of values in train data: {train.count().sum()}")
print(f"\033[94mNumber missing values in train data: {sum(train.isna().sum())}")
# 결측치 확인
print(f"\033[94m")
print(train.isna().sum().sort_values(ascending=False)) # 내림차순
train.describe()
# ### testset도 마찬가지로 확인
test.head()
print(f"\033[94mNumber of rows in test data: {test.shape[0]}")
print(f"\033[94mNumber of columns in test data: {test.shape[1]}")
print(f"\033[94mNumber of values in train data: {test.count().sum()}")
print(f"\033[94mNo of rows with missing values in test data: {sum(test.isna().sum())}")
print(f"\033[94m")
print((test.isna().sum().sort_values(ascending=False)))
test.describe()
# ### submission 파일도 확인
submission.head()
# ## data visualization
train.drop(["PassengerId"], axis=1, inplace=True)
test.drop(["PassengerId"], axis=1, inplace=True)
TARGET = "Transported"
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
train.iloc[:, :-1].describe().T.sort_values(
by="std", ascending=False
).style.background_gradient(cmap="GnBu").bar(subset=["max"], color="#BB0000").bar(
subset=[
"mean",
],
color="green",
)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/009/129009262.ipynb
| null | null |
[{"Id": 129009262, "ScriptId": 38349115, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11686298, "CreationDate": "05/10/2023 09:31:06", "VersionNumber": 2.0, "Title": "Spaceship Titanic", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 110.0, "LinesInsertedFromPrevious": 48.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 62.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # 🚀Spaceship Titanic -📊EDA + 27 different models📈
# installing liabraries
from IPython.display import clear_output
clear_output()
import numpy as np
import pandas as pd
import seaborn as sns
import plotly.express as px
import matplotlib.pyplot as plt
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.impute import SimpleImputer
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold, train_test_split
# from lightgbm import LGBMClassifier
# import lazypredict
# from lazypredict.Supervised import LazyClassifier
import time
import warnings
warnings.filterwarnings("ignore")
# ## data loading
train = pd.read_csv("../input/spaceship-titanic/train.csv")
test = pd.read_csv("../input/spaceship-titanic/test.csv")
submission = pd.read_csv("../input/spaceship-titanic/sample_submission.csv")
RANDOM_STATE = 12
FOLDS = 5
STRATEGY = "median"
# # EDA
train.head()
# data info 및 결측치 확인
print(f"\033[94mNumber of rows in train data: {train.shape[0]}")
print(f"\033[94mNumber of columns in train data: {train.shape[1]}")
print(f"\033[94mNumber of values in train data: {train.count().sum()}")
print(f"\033[94mNumber missing values in train data: {sum(train.isna().sum())}")
# 결측치 확인
print(f"\033[94m")
print(train.isna().sum().sort_values(ascending=False)) # 내림차순
train.describe()
# ### testset도 마찬가지로 확인
test.head()
print(f"\033[94mNumber of rows in test data: {test.shape[0]}")
print(f"\033[94mNumber of columns in test data: {test.shape[1]}")
print(f"\033[94mNumber of values in train data: {test.count().sum()}")
print(f"\033[94mNo of rows with missing values in test data: {sum(test.isna().sum())}")
print(f"\033[94m")
print((test.isna().sum().sort_values(ascending=False)))
test.describe()
# ### submission 파일도 확인
submission.head()
# ## data visualization
train.drop(["PassengerId"], axis=1, inplace=True)
test.drop(["PassengerId"], axis=1, inplace=True)
TARGET = "Transported"
FEATURES = [col for col in train.columns if col != TARGET]
RANDOM_STATE = 12
train.iloc[:, :-1].describe().T.sort_values(
by="std", ascending=False
).style.background_gradient(cmap="GnBu").bar(subset=["max"], color="#BB0000").bar(
subset=[
"mean",
],
color="green",
)
| false | 0 | 968 | 0 | 968 | 968 |
||
129009155
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
print("success")
# # Mount google drive
from google.colab import drive
drive.mount("/content/drive")
# # Load train and validation data from Facebook HMD dataset
folder_path_train = "/content/drive/MyDrive/HMD_project/train.jsonl"
folder_path_dev = "/content/drive/MyDrive/HMD_project/dev.jsonl"
df_train = pd.read_json(folder_path_train, lines=True)
df_dev = pd.read_json(folder_path_dev, lines=True)
print(df_dev.tail())
# # check distribution of data in train and validation dataset
# distribution of movies based on certificate
df_train["label"].value_counts().plot(
kind="bar", figsize=(6, 6), width=0.2, title="Training data"
)
print("Distribution of training dataset\n", df_train.label.value_counts(), "\n")
print("Distribution of validation dataset\n", df_dev.label.value_counts())
# # Check null values
print(df_train.isna().sum())
print("\n\n", df_dev.isna().sum())
# # form the text file load the text corresponding to the augmented images
with open("/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt", "r") as f:
lines = f.readlines()
file_names = []
for i in lines:
file_names.append(i[: i.find("\n")])
print(type(file_names[0]))
# # add augmented text in the training dataframe
for i in range(len(file_names)):
df_train.loc[len(df_train.index)] = [1, "img/1", 1, file_names[i]]
data = df_train
print(
"Distribution of training dataset after augmentation\n", data.label.value_counts()
)
# # load pretrained word embedding model from gensim library
import gensim.downloader
print(list(gensim.downloader.info()["models"].keys()))
# import gensim.downloader as api
# wv=api.load('glove-twitter-200')
# wv.save('/content/drive/MyDrive/HMD_project/glove-twitter-200')
from gensim.models import KeyedVectors
wv = KeyedVectors.load("/content/drive/MyDrive/HMD_project/glove-twitter-200")
# # word to vector conversion using gensim 'glove-twitter-200' model
w = wv["hate"]
print(w)
print("\n\nlength of word vector", len(w))
print("\n\n type of word vector model ", type(wv))
print("\n\n word vector type", type(w))
# # Import spacy library for text preprocessing
import spacy
# # use sapcy 'en_core_web_sm" model fpr preporocessing
import spacy.cli
spacy.cli.download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
# # tokenize each word in a sentence and apply lemmatization on it, remove punctuation, space, brackets if any
def preprocess(text):
doc = nlp(text)
filtered_token = []
for token in doc:
if token.is_punct or token.is_space or token.is_bracket or token.is_stop:
continue
else:
token = token.lemma_
filtered_token.append(token)
return filtered_token
# # example showing text preprocessing
tokens = preprocess(
"My best friend Anu, (who is three months older than me) is coming to my house tonight!!!."
)
tokens
# # Apply preprocessing on the text column of training dataset and validation dataset
df_dev["processed_text_val"] = df_dev["text"].apply(lambda x: preprocess(x))
data["processed_text"] = data["text"].apply(lambda x: preprocess(x))
data.head()
# # use gensim pretrained model to vectorize each token in the preprocessed text and take the average of vectors to keep the dimension same
import numpy as np
def gensim_vector(token):
vec_size = wv.vector_size
wv_final = np.zeros(vec_size) # take a vector consisting '0s' having size of wv
count = 1
for t in token:
if t in wv:
count += 1
wv_final += wv[t] # vectorize word and add to previous value
return wv_final / count # take the average
# # Apply the vectorization process on processed text column of the validation and training dataset
data["text_vector"] = data["processed_text"].apply(gensim_vector)
df_dev["text_vector_val"] = df_dev["processed_text_val"].apply(gensim_vector)
print(data.head(), "\n\n")
print(df_dev.head())
len(data.text_vector.iloc[0])
# Save text vector in a numpy file
text_vector = data["text_vector"]
text_vector = np.stack(text_vector)
print(text_vector.shape)
print(text_vector[0].shape)
np.save(
"/content/drive/MyDrive/HMD_project/new/twitter_embedding_train_text.npy",
text_vector,
)
text_vector_val = df_dev["text_vector_val"]
text_vector_val = np.stack(text_vector_val)
print(text_vector_val.shape)
print(text_vector_val[0].shape)
np.save(
"/content/drive/MyDrive/HMD_project/new/twitter_embedding_val_text.npy",
text_vector_val,
)
# # Use Latent Semantic analysis for text embedding
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
# # TFIDF vectorizer
# # topic encoded text vector using SVD (LSA) on TFIDF vectorizer
vectorizer = TfidfVectorizer(stop_words="english")
svd = TruncatedSVD(n_components=1000)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_tfidf = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_tfidf.shape)
# for valiadation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_tfidf_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_tfidf_val.shape)
# training dataset
lsa_text = svd.fit_transform(text_vec_tfidf)
print("\nvariance_captured_by 1000 components", svd.explained_variance_ratio_.sum())
# validation dataset
lsa_text_val = svd.transform(text_vec_tfidf_val)
print("\n", lsa_text.shape)
print(lsa_text_val.shape)
# # save embedded tfidf-lsa vector as npy file
np.save("/content/drive/MyDrive/HMD_project/new/lsa_tfidf_train_text.npy", lsa_text)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_tfidf_val_text.npy", lsa_text_val)
# # topic encoded text vector using SVD (LSA) on BOW vectorizer
vectorizer = CountVectorizer(stop_words="english")
svd2 = TruncatedSVD(n_components=1000)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_bow = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_bow.shape)
# for validation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_bow_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_bow_val.shape)
lsa_bow_text = svd2.fit_transform(text_vec_bow)
## variance captured by 1000 components
print("\nvariance_captured_by 1000 components", svd2.explained_variance_ratio_.sum())
lsa_bow_text_val = svd2.transform(text_vec_bow_val)
print("\n", lsa_bow_text.shape)
print(lsa_bow_text_val.shape)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy", lsa_bow_text)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy", lsa_bow_text_val)
# # TFIDF vectorizer
# # topic encoded text vector using NMF on TFIDF vectorizer
from sklearn.decomposition import NMF
vectorizer = TfidfVectorizer()
nmf = NMF(n_components=100)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_tfidf = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_tfidf.shape)
# for valiadation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_tfidf_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_tfidf_val.shape)
# training dataset
nmf_text = nmf.fit_transform(text_vec_tfidf)
# validation dataset
nmf_text_val = nmf.transform(text_vec_tfidf_val)
print(nmf_text.shape)
print(nmf_text_val.shape)
np.save("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy", nmf_text)
np.save("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy", nmf_text_val)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/009/129009155.ipynb
| null | null |
[{"Id": 129009155, "ScriptId": 38349456, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13939396, "CreationDate": "05/10/2023 09:30:14", "VersionNumber": 1.0, "Title": "Topic_modeling_NMF", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 279.0, "LinesInsertedFromPrevious": 279.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
print("success")
# # Mount google drive
from google.colab import drive
drive.mount("/content/drive")
# # Load train and validation data from Facebook HMD dataset
folder_path_train = "/content/drive/MyDrive/HMD_project/train.jsonl"
folder_path_dev = "/content/drive/MyDrive/HMD_project/dev.jsonl"
df_train = pd.read_json(folder_path_train, lines=True)
df_dev = pd.read_json(folder_path_dev, lines=True)
print(df_dev.tail())
# # check distribution of data in train and validation dataset
# distribution of movies based on certificate
df_train["label"].value_counts().plot(
kind="bar", figsize=(6, 6), width=0.2, title="Training data"
)
print("Distribution of training dataset\n", df_train.label.value_counts(), "\n")
print("Distribution of validation dataset\n", df_dev.label.value_counts())
# # Check null values
print(df_train.isna().sum())
print("\n\n", df_dev.isna().sum())
# # form the text file load the text corresponding to the augmented images
with open("/content/drive/MyDrive/HMD_project/new/text_aug_norm.txt", "r") as f:
lines = f.readlines()
file_names = []
for i in lines:
file_names.append(i[: i.find("\n")])
print(type(file_names[0]))
# # add augmented text in the training dataframe
for i in range(len(file_names)):
df_train.loc[len(df_train.index)] = [1, "img/1", 1, file_names[i]]
data = df_train
print(
"Distribution of training dataset after augmentation\n", data.label.value_counts()
)
# # load pretrained word embedding model from gensim library
import gensim.downloader
print(list(gensim.downloader.info()["models"].keys()))
# import gensim.downloader as api
# wv=api.load('glove-twitter-200')
# wv.save('/content/drive/MyDrive/HMD_project/glove-twitter-200')
from gensim.models import KeyedVectors
wv = KeyedVectors.load("/content/drive/MyDrive/HMD_project/glove-twitter-200")
# # word to vector conversion using gensim 'glove-twitter-200' model
w = wv["hate"]
print(w)
print("\n\nlength of word vector", len(w))
print("\n\n type of word vector model ", type(wv))
print("\n\n word vector type", type(w))
# # Import spacy library for text preprocessing
import spacy
# # use sapcy 'en_core_web_sm" model fpr preporocessing
import spacy.cli
spacy.cli.download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
# # tokenize each word in a sentence and apply lemmatization on it, remove punctuation, space, brackets if any
def preprocess(text):
doc = nlp(text)
filtered_token = []
for token in doc:
if token.is_punct or token.is_space or token.is_bracket or token.is_stop:
continue
else:
token = token.lemma_
filtered_token.append(token)
return filtered_token
# # example showing text preprocessing
tokens = preprocess(
"My best friend Anu, (who is three months older than me) is coming to my house tonight!!!."
)
tokens
# # Apply preprocessing on the text column of training dataset and validation dataset
df_dev["processed_text_val"] = df_dev["text"].apply(lambda x: preprocess(x))
data["processed_text"] = data["text"].apply(lambda x: preprocess(x))
data.head()
# # use gensim pretrained model to vectorize each token in the preprocessed text and take the average of vectors to keep the dimension same
import numpy as np
def gensim_vector(token):
vec_size = wv.vector_size
wv_final = np.zeros(vec_size) # take a vector consisting '0s' having size of wv
count = 1
for t in token:
if t in wv:
count += 1
wv_final += wv[t] # vectorize word and add to previous value
return wv_final / count # take the average
# # Apply the vectorization process on processed text column of the validation and training dataset
data["text_vector"] = data["processed_text"].apply(gensim_vector)
df_dev["text_vector_val"] = df_dev["processed_text_val"].apply(gensim_vector)
print(data.head(), "\n\n")
print(df_dev.head())
len(data.text_vector.iloc[0])
# Save text vector in a numpy file
text_vector = data["text_vector"]
text_vector = np.stack(text_vector)
print(text_vector.shape)
print(text_vector[0].shape)
np.save(
"/content/drive/MyDrive/HMD_project/new/twitter_embedding_train_text.npy",
text_vector,
)
text_vector_val = df_dev["text_vector_val"]
text_vector_val = np.stack(text_vector_val)
print(text_vector_val.shape)
print(text_vector_val[0].shape)
np.save(
"/content/drive/MyDrive/HMD_project/new/twitter_embedding_val_text.npy",
text_vector_val,
)
# # Use Latent Semantic analysis for text embedding
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import TruncatedSVD
# # TFIDF vectorizer
# # topic encoded text vector using SVD (LSA) on TFIDF vectorizer
vectorizer = TfidfVectorizer(stop_words="english")
svd = TruncatedSVD(n_components=1000)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_tfidf = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_tfidf.shape)
# for valiadation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_tfidf_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_tfidf_val.shape)
# training dataset
lsa_text = svd.fit_transform(text_vec_tfidf)
print("\nvariance_captured_by 1000 components", svd.explained_variance_ratio_.sum())
# validation dataset
lsa_text_val = svd.transform(text_vec_tfidf_val)
print("\n", lsa_text.shape)
print(lsa_text_val.shape)
# # save embedded tfidf-lsa vector as npy file
np.save("/content/drive/MyDrive/HMD_project/new/lsa_tfidf_train_text.npy", lsa_text)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_tfidf_val_text.npy", lsa_text_val)
# # topic encoded text vector using SVD (LSA) on BOW vectorizer
vectorizer = CountVectorizer(stop_words="english")
svd2 = TruncatedSVD(n_components=1000)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_bow = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_bow.shape)
# for validation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_bow_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_bow_val.shape)
lsa_bow_text = svd2.fit_transform(text_vec_bow)
## variance captured by 1000 components
print("\nvariance_captured_by 1000 components", svd2.explained_variance_ratio_.sum())
lsa_bow_text_val = svd2.transform(text_vec_bow_val)
print("\n", lsa_bow_text.shape)
print(lsa_bow_text_val.shape)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_bow_train_text.npy", lsa_bow_text)
np.save("/content/drive/MyDrive/HMD_project/new/lsa_bow_val_text.npy", lsa_bow_text_val)
# # TFIDF vectorizer
# # topic encoded text vector using NMF on TFIDF vectorizer
from sklearn.decomposition import NMF
vectorizer = TfidfVectorizer()
nmf = NMF(n_components=100)
# for training dataset
processed_text = data["processed_text"].apply(lambda x: " ".join(x))
text_vec_tfidf = vectorizer.fit_transform(processed_text)
print("CountVectorizer shape_training dataset", text_vec_tfidf.shape)
# for valiadation dataset
processed_text_val = df_dev["processed_text_val"].apply(lambda x: " ".join(x))
text_vec_tfidf_val = vectorizer.transform(processed_text_val)
print("CountVectorizer shape_val dataset", text_vec_tfidf_val.shape)
# training dataset
nmf_text = nmf.fit_transform(text_vec_tfidf)
# validation dataset
nmf_text_val = nmf.transform(text_vec_tfidf_val)
print(nmf_text.shape)
print(nmf_text_val.shape)
np.save("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_train_text.npy", nmf_text)
np.save("/content/drive/MyDrive/HMD_project/new/nmf_tfidf_val_text.npy", nmf_text_val)
| false | 0 | 2,618 | 0 | 2,618 | 2,618 |
||
129173816
|
<jupyter_start><jupyter_text>German Credit Risk
# Context
The original dataset contains 1000 entries with 20 categorial/symbolic attributes prepared by Prof. Hofmann. In this dataset, each entry represents a person who takes a credit by a bank. Each person is classified as good or bad credit risks according to the set of attributes. The link to the original dataset can be found below.
# Content
It is almost impossible to understand the original dataset due to its complicated system of categories and symbols. Thus, I wrote a small Python script to convert it into a readable CSV file. Several columns are simply ignored, because in my opinion either they are not important or their descriptions are obscure. The selected attributes are:
1. Age (numeric)
2. Sex (text: male, female)
3. Job (numeric: 0 - unskilled and non-resident, 1 - unskilled and resident, 2 - skilled, 3 - highly skilled)
4. Housing (text: own, rent, or free)
5. Saving accounts (text - little, moderate, quite rich, rich)
6. Checking account (numeric, in DM - Deutsch Mark)
7. Credit amount (numeric, in DM)
8. Duration (numeric, in month)
9. Purpose (text: car, furniture/equipment, radio/TV, domestic appliances, repairs, education, business, vacation/others)
# Acknowledgements
Source: [UCI][1]
[1]: https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29
Kaggle dataset identifier: german-credit
<jupyter_script>import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
# ## Importing Data
data = pd.read_csv("/kaggle/input/german-credit/german_credit_data.csv")
data.head(-2)
data.info()
print(f"-----\n{data.dtypes.value_counts()}")
# Let's look the NaN data.
data.isna().sum()
# fill them with 'Unknown'
data["Saving accounts"] = data["Saving accounts"].fillna("Unknown")
data["Checking account"] = data["Checking account"].fillna("Unknown")
# Let's check
data.isna().sum()
# unnamed:0 column is a necessary.
data.drop(["Unnamed: 0"], axis=1, inplace=True)
data.sample(5)
# ## EDA
values = data["Purpose"].value_counts()
values
names = data["Purpose"].unique()
names
# First of all we are starting with Purpose
values = data["Purpose"].value_counts()
names = names = data["Purpose"].unique()
fig = px.pie(data, values=values, names=names)
fig.show()
# For age column, Most values: 10 row
ages = data["Age"].value_counts()[:10]
Numbers = ages.values
# it's horizontal!
plt.barh(ages, Numbers, color="#FFB300")
plt.show()
jobs = data["Job"].value_counts()
Namesofjobs = jobs.index
Namesofjobs = Namesofjobs.astype(str)
jobs.values, Namesofjobs
# Target Column: Job
jobs = data["Job"].value_counts()
Namesofjobs = jobs.index
plt.figure(figsize=(5, 7))
plt.bar(Namesofjobs, jobs.values, label="Value of Jobs", width=0.5, color="#FFB300")
plt.xlabel("X-Axis")
plt.ylabel("Y-Axis")
plt.title("Bar Chart")
# for x,y in zip(x1-object,y1-int):
for x, y in zip(Namesofjobs, jobs.values):
plt.text(
x, y - 5, "%d" % y, ha="center", va="bottom"
) # rakamları ortaya, isimleri bar'ın altına yaz diyoruz.
plt.legend()
plt.show()
# Target Column: Credit amount
# For Outlier analysis.
plt.figure(figsize=(22, 14))
sns.displot(data["Credit amount"], kde=True)
# %5 %95 outlier red lines
plt.axvline(x=np.mean(data["Credit amount"]), c="green", ls="--", label="Mean")
plt.axvline(
x=np.percentile(data["Credit amount"], 5), c="red", ls="--", label="Percentile %5th"
)
plt.axvline(
x=np.percentile(data["Credit amount"], 95),
c="red",
ls="--",
label="Percentile %95th",
)
plt.legend()
plt.show()
# #### Bivariate Analysis
# Don't forget it. you can capture correlation relationships using this table.
# 
g = sns.PairGrid(data=data)
g.map(sns.regplot)
plt.figure(figsize=(10, 10))
sns.lineplot(
x=data.groupby(data.index).mean()["Duration"].index,
y=data.groupby(data.index).mean()["Credit amount"],
)
sns.boxplot(data=data, x="Sex", y="Credit amount")
# ## Statistical concepts
def mean(values):
# toplam değişkeni
total = 0
# list her bir değişkeni al ve total'a ekle.
for a in values:
total += a
# total/listİçindekiElemanSayısı
return total / len(values)
def covariance(x_values, y_values):
"""
Calculate the covariance between two lists of values.
Parameters:
x_values (list): A list of numeric values.
y_values (list): A list of numeric values.
Returns:
float: The covariance value.
"""
# Olasılık teorisi ve istatistikte, kovaryans iki değişkenin birlikte ne kadar değiştiklerinin ölçüsüdür.
# Kovaryans, iki değişkenin birlikte nasıl değiştiğini belirtirken, korelasyon katsayısı ise ilişkinin gücünü ve yönünü standartlaştırılmış bir şekilde ifade eder.
N = len(x_values)
xOrtalama = mean(x_values)
yOrtalama = mean(y_values)
# variance'ları hesaplama
sub_x = [i - xOrtalama for i in x_values]
sub_y = [i - yOrtalama for i in y_values]
# covariance formülü
cov = sum([sub_y[i] * sub_x[i] for i in range(N)])
# N-1'e bölerek kovaryans değeri döndürülür.
return cov / (N - 1)
covariance(data["Duration"], data["Credit amount"])
# But The eaist way
np.cov(data["Job"], data["Credit amount"])
"""
You should check this ranking before you are sure of the result;
cov(x,x) cov(x,y)
cov(x,y) cov(y,y)
"""
# Corelation with sns.map
def Corr(data):
plt.figure(figsize=(12, 6))
heatmap = sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=True, cmap="BrBG")
plt.show()
Corr(data)
# Done!
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/173/129173816.ipynb
|
german-credit
| null |
[{"Id": 129173816, "ScriptId": 38402411, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13008811, "CreationDate": "05/11/2023 14:33:04", "VersionNumber": 1.0, "Title": "EDA_german_credit", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 188.0, "LinesInsertedFromPrevious": 188.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
|
[{"Id": 184987553, "KernelVersionId": 129173816, "SourceDatasetVersionId": 1056}]
|
[{"Id": 1056, "DatasetId": 531, "DatasourceVersionId": 1056, "CreatorUserId": 815868, "LicenseName": "Database: Open Database, Contents: Database Contents", "CreationDate": "12/14/2016 21:25:02", "VersionNumber": 1.0, "Title": "German Credit Risk", "Slug": "german-credit", "Subtitle": "Credit classification", "Description": "# Context \nThe original dataset contains 1000 entries with 20 categorial/symbolic attributes prepared by Prof. Hofmann. In this dataset, each entry represents a person who takes a credit by a bank. Each person is classified as good or bad credit risks according to the set of attributes. The link to the original dataset can be found below.\n\n# Content\nIt is almost impossible to understand the original dataset due to its complicated system of categories and symbols. Thus, I wrote a small Python script to convert it into a readable CSV file. Several columns are simply ignored, because in my opinion either they are not important or their descriptions are obscure. The selected attributes are:\n\n 1. Age (numeric)\n 2. Sex (text: male, female)\n 3. Job (numeric: 0 - unskilled and non-resident, 1 - unskilled and resident, 2 - skilled, 3 - highly skilled)\n 4. Housing (text: own, rent, or free)\n 5. Saving accounts (text - little, moderate, quite rich, rich)\n 6. Checking account (numeric, in DM - Deutsch Mark)\n 7. Credit amount (numeric, in DM)\n 8. Duration (numeric, in month)\n 9. Purpose (text: car, furniture/equipment, radio/TV, domestic appliances, repairs, education, business, vacation/others)\n\n# Acknowledgements\n\nSource: [UCI][1]\n\n\n\n [1]: https://archive.ics.uci.edu/ml/datasets/Statlog+%28German+Credit+Data%29", "VersionNotes": "Initial release", "TotalCompressedBytes": 49689.0, "TotalUncompressedBytes": 49689.0}]
|
[{"Id": 531, "CreatorUserId": 815868, "OwnerUserId": NaN, "OwnerOrganizationId": 7.0, "CurrentDatasetVersionId": 1056.0, "CurrentDatasourceVersionId": 1056.0, "ForumId": 2198, "Type": 2, "CreationDate": "12/14/2016 21:25:02", "LastActivityDate": "02/06/2018", "TotalViews": 381048, "TotalDownloads": 44328, "TotalVotes": 393, "TotalKernels": 100}]
| null |
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
import plotly.express as px
import warnings
warnings.filterwarnings("ignore")
# ## Importing Data
data = pd.read_csv("/kaggle/input/german-credit/german_credit_data.csv")
data.head(-2)
data.info()
print(f"-----\n{data.dtypes.value_counts()}")
# Let's look the NaN data.
data.isna().sum()
# fill them with 'Unknown'
data["Saving accounts"] = data["Saving accounts"].fillna("Unknown")
data["Checking account"] = data["Checking account"].fillna("Unknown")
# Let's check
data.isna().sum()
# unnamed:0 column is a necessary.
data.drop(["Unnamed: 0"], axis=1, inplace=True)
data.sample(5)
# ## EDA
values = data["Purpose"].value_counts()
values
names = data["Purpose"].unique()
names
# First of all we are starting with Purpose
values = data["Purpose"].value_counts()
names = names = data["Purpose"].unique()
fig = px.pie(data, values=values, names=names)
fig.show()
# For age column, Most values: 10 row
ages = data["Age"].value_counts()[:10]
Numbers = ages.values
# it's horizontal!
plt.barh(ages, Numbers, color="#FFB300")
plt.show()
jobs = data["Job"].value_counts()
Namesofjobs = jobs.index
Namesofjobs = Namesofjobs.astype(str)
jobs.values, Namesofjobs
# Target Column: Job
jobs = data["Job"].value_counts()
Namesofjobs = jobs.index
plt.figure(figsize=(5, 7))
plt.bar(Namesofjobs, jobs.values, label="Value of Jobs", width=0.5, color="#FFB300")
plt.xlabel("X-Axis")
plt.ylabel("Y-Axis")
plt.title("Bar Chart")
# for x,y in zip(x1-object,y1-int):
for x, y in zip(Namesofjobs, jobs.values):
plt.text(
x, y - 5, "%d" % y, ha="center", va="bottom"
) # rakamları ortaya, isimleri bar'ın altına yaz diyoruz.
plt.legend()
plt.show()
# Target Column: Credit amount
# For Outlier analysis.
plt.figure(figsize=(22, 14))
sns.displot(data["Credit amount"], kde=True)
# %5 %95 outlier red lines
plt.axvline(x=np.mean(data["Credit amount"]), c="green", ls="--", label="Mean")
plt.axvline(
x=np.percentile(data["Credit amount"], 5), c="red", ls="--", label="Percentile %5th"
)
plt.axvline(
x=np.percentile(data["Credit amount"], 95),
c="red",
ls="--",
label="Percentile %95th",
)
plt.legend()
plt.show()
# #### Bivariate Analysis
# Don't forget it. you can capture correlation relationships using this table.
# 
g = sns.PairGrid(data=data)
g.map(sns.regplot)
plt.figure(figsize=(10, 10))
sns.lineplot(
x=data.groupby(data.index).mean()["Duration"].index,
y=data.groupby(data.index).mean()["Credit amount"],
)
sns.boxplot(data=data, x="Sex", y="Credit amount")
# ## Statistical concepts
def mean(values):
# toplam değişkeni
total = 0
# list her bir değişkeni al ve total'a ekle.
for a in values:
total += a
# total/listİçindekiElemanSayısı
return total / len(values)
def covariance(x_values, y_values):
"""
Calculate the covariance between two lists of values.
Parameters:
x_values (list): A list of numeric values.
y_values (list): A list of numeric values.
Returns:
float: The covariance value.
"""
# Olasılık teorisi ve istatistikte, kovaryans iki değişkenin birlikte ne kadar değiştiklerinin ölçüsüdür.
# Kovaryans, iki değişkenin birlikte nasıl değiştiğini belirtirken, korelasyon katsayısı ise ilişkinin gücünü ve yönünü standartlaştırılmış bir şekilde ifade eder.
N = len(x_values)
xOrtalama = mean(x_values)
yOrtalama = mean(y_values)
# variance'ları hesaplama
sub_x = [i - xOrtalama for i in x_values]
sub_y = [i - yOrtalama for i in y_values]
# covariance formülü
cov = sum([sub_y[i] * sub_x[i] for i in range(N)])
# N-1'e bölerek kovaryans değeri döndürülür.
return cov / (N - 1)
covariance(data["Duration"], data["Credit amount"])
# But The eaist way
np.cov(data["Job"], data["Credit amount"])
"""
You should check this ranking before you are sure of the result;
cov(x,x) cov(x,y)
cov(x,y) cov(y,y)
"""
# Corelation with sns.map
def Corr(data):
plt.figure(figsize=(12, 6))
heatmap = sns.heatmap(data.corr(), vmin=-1, vmax=1, annot=True, cmap="BrBG")
plt.show()
Corr(data)
# Done!
| false | 0 | 1,465 | 2 | 1,855 | 1,465 |
||
129173041
|
# ***Given the names and grades for each student, store them in a nested list and print the name(s) of any student(s) having the second lowest grade.***
def second_lowest(name_grade):
"""
This function takes one argument as a nested list, with names of the students and their respective scores, as lists inside a list, and it prints the name /s with the second-lowest score
1. It takes an empty list, to be filled with the scores of the student
2. It traverses through the length of the given nested list, and appends all the scores to the empty list
3. It determnes the lowest score from the score list, with the help of "min()" method
4. It takes another empty list, to be filled with all the scores, which are not equal to the lowest score, i.e. getting rid of all the scores, which are equal to the lowest score
5. It traverses through the score list, and appends to the empty list with the scores, which are not equal to the lowest score.
6. It then determines the second lowest score, i.e. the lowest score from the list "second_score_list"
7. It takes an empty list, to be filled with the student name /s, with the second lowest score
8. It traverses through the given nested list, and appends to the empty list "student_names" with the student name \s, which score is equal to the lowest score
9. It checks if the length of the list "student_names" is greather than 1 or not, if the length is greater than 1, then it checks if the name at index 0 is alphabetically ordered or not, i.e. it checks the ascii values of the first letter of each element.
10. If the list is not alphabetically ordered, then it prints the value of the latter element first, and if it's alphabetically ordered, then it prints the element at the index 0 first.
11. If the length of the list "student_names" is equal to 1, then it just prints the element.
"""
# empty score list, to be filled with the scores of the students
score_list = []
# traverse through the length of the given list to append the scores of the students
for index in range(len(name_grade)):
score_list.append(name_grade[index][1])
# determining the lowest score from the "score_list" list
lowest_score = min(score_list)
# another empty score list, to be filled with scores, which are not equal to the lowest score
second_score_list = []
# traverse through the "score_list" to append all the scores, which are not equal to the lowest score, i.e. get rid of all the elements, which are equal to the lowest score
for item in score_list:
if item != lowest_score:
second_score_list.append(item)
# determining the second lowest score from the list "second_score_list"
second_lowest_score = min(second_score_list)
# empty student name list, to be filled with the names of the students, whose score is equal to the second lowest score
student_names = []
# traverse through the length of the list "name_grade" to determine the name /s of the students, whose score is equal to the second lowest score, and append those names to the empty list "student_names"
for index in range(len(name_grade)):
if name_grade[index][1] == second_lowest_score:
student_names.append(name_grade[index][0])
# checking if the length of the now filled "student_names" list is more than 1, then print the names alphabetically
if len(student_names) > 1:
# if student names are not in alphabetical order, i.e. the ascii value of the letter at index 0 is greater than the index at latter positions, then print the latter name before than the name at index 0
if student_names[0] > student_names[1]:
print(
"The names of the students with the second lowest score are as follows - "
)
print(student_names[1])
print(student_names[0])
else:
print(
"The names of the students with the second lowest score are as follows - "
)
print(student_names[0])
print(student_names[1])
# if the list "student_names" contains only 1 element, just print the name
else:
print(
f"The name of the student with the second lowest score is - \n{student_names[0]}"
)
"""
1. The prompt asks the user to enter the number of students
2. It takes an empty list, to be filled with the student names and their respective scores, to make it a nested list
3. It traverses through the given range of the number of students, and propmts the user to enter the name of the students, along with their scores
4. It appends all the names and scores to the empty list making it a nested list
5. It calls the function "second_lowest()" and passes the nested list to the function
"""
# the number of students
number_of_students = int(input("Enter the number of students - \n"))
print("==================")
print("Enter the students' names and scores here - \n")
# empty list to be filled with the student names, and their scores
record = []
# traverses through the range of the given number of students, to enter the names and scores, and to be appended to the empty list, making the list as a nested list
for index in range(number_of_students):
name = input("Enter the name of the student \n")
score = float(input("Enter the score - \n"))
record.append([name, score])
print(f"The student names with their scores are - \n{record}")
print("====================")
# calling the function "second_lowest()"
second_lowest(record)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/173/129173041.ipynb
| null | null |
[{"Id": 129173041, "ScriptId": 38398716, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 2950066, "CreationDate": "05/11/2023 14:27:38", "VersionNumber": 1.0, "Title": "Second lowest score from a given nested list", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 116.0, "LinesInsertedFromPrevious": 116.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ***Given the names and grades for each student, store them in a nested list and print the name(s) of any student(s) having the second lowest grade.***
def second_lowest(name_grade):
"""
This function takes one argument as a nested list, with names of the students and their respective scores, as lists inside a list, and it prints the name /s with the second-lowest score
1. It takes an empty list, to be filled with the scores of the student
2. It traverses through the length of the given nested list, and appends all the scores to the empty list
3. It determnes the lowest score from the score list, with the help of "min()" method
4. It takes another empty list, to be filled with all the scores, which are not equal to the lowest score, i.e. getting rid of all the scores, which are equal to the lowest score
5. It traverses through the score list, and appends to the empty list with the scores, which are not equal to the lowest score.
6. It then determines the second lowest score, i.e. the lowest score from the list "second_score_list"
7. It takes an empty list, to be filled with the student name /s, with the second lowest score
8. It traverses through the given nested list, and appends to the empty list "student_names" with the student name \s, which score is equal to the lowest score
9. It checks if the length of the list "student_names" is greather than 1 or not, if the length is greater than 1, then it checks if the name at index 0 is alphabetically ordered or not, i.e. it checks the ascii values of the first letter of each element.
10. If the list is not alphabetically ordered, then it prints the value of the latter element first, and if it's alphabetically ordered, then it prints the element at the index 0 first.
11. If the length of the list "student_names" is equal to 1, then it just prints the element.
"""
# empty score list, to be filled with the scores of the students
score_list = []
# traverse through the length of the given list to append the scores of the students
for index in range(len(name_grade)):
score_list.append(name_grade[index][1])
# determining the lowest score from the "score_list" list
lowest_score = min(score_list)
# another empty score list, to be filled with scores, which are not equal to the lowest score
second_score_list = []
# traverse through the "score_list" to append all the scores, which are not equal to the lowest score, i.e. get rid of all the elements, which are equal to the lowest score
for item in score_list:
if item != lowest_score:
second_score_list.append(item)
# determining the second lowest score from the list "second_score_list"
second_lowest_score = min(second_score_list)
# empty student name list, to be filled with the names of the students, whose score is equal to the second lowest score
student_names = []
# traverse through the length of the list "name_grade" to determine the name /s of the students, whose score is equal to the second lowest score, and append those names to the empty list "student_names"
for index in range(len(name_grade)):
if name_grade[index][1] == second_lowest_score:
student_names.append(name_grade[index][0])
# checking if the length of the now filled "student_names" list is more than 1, then print the names alphabetically
if len(student_names) > 1:
# if student names are not in alphabetical order, i.e. the ascii value of the letter at index 0 is greater than the index at latter positions, then print the latter name before than the name at index 0
if student_names[0] > student_names[1]:
print(
"The names of the students with the second lowest score are as follows - "
)
print(student_names[1])
print(student_names[0])
else:
print(
"The names of the students with the second lowest score are as follows - "
)
print(student_names[0])
print(student_names[1])
# if the list "student_names" contains only 1 element, just print the name
else:
print(
f"The name of the student with the second lowest score is - \n{student_names[0]}"
)
"""
1. The prompt asks the user to enter the number of students
2. It takes an empty list, to be filled with the student names and their respective scores, to make it a nested list
3. It traverses through the given range of the number of students, and propmts the user to enter the name of the students, along with their scores
4. It appends all the names and scores to the empty list making it a nested list
5. It calls the function "second_lowest()" and passes the nested list to the function
"""
# the number of students
number_of_students = int(input("Enter the number of students - \n"))
print("==================")
print("Enter the students' names and scores here - \n")
# empty list to be filled with the student names, and their scores
record = []
# traverses through the range of the given number of students, to enter the names and scores, and to be appended to the empty list, making the list as a nested list
for index in range(number_of_students):
name = input("Enter the name of the student \n")
score = float(input("Enter the score - \n"))
record.append([name, score])
print(f"The student names with their scores are - \n{record}")
print("====================")
# calling the function "second_lowest()"
second_lowest(record)
| false | 0 | 1,367 | 0 | 1,367 | 1,367 |
||
129173256
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# * train.csv - Personal records for about two-thirds (~8700) of the passengers, to be used as training data.
# * PassengerId - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always.
# * HomePlanet - The planet the passenger departed from, typically their planet of permanent residence.
# * CryoSleep - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
# * Cabin - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard.
# * Destination - The planet the passenger will be debarking to.
# * Age - The age of the passenger.
# * VIP - Whether the passenger has paid for special VIP service during the voyage.
# * RoomService, FoodCourt, ShoppingMall, Spa, VRDeck - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities.
# * Name - The first and last names of the passenger.
# * Transported - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
# * test.csv - Personal records for the remaining one-third (~4300) of the passengers, to be used as test data. Your task is to predict the value of Transported for the passengers in this set.
# * sample_submission.csv - A submission file in the correct format.
# * PassengerId - Id for each passenger in the test set.
# sns.heatmap(data=flight_data, annot=True/** Transported - The target. For each passenger, predict either True or False.
tr = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
ts = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
def prep(data):
data = data.drop(
["VIP", "Name", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"],
axis=1,
)
data["CryoSleep"] = data["CryoSleep"].astype(bool)
data = data.join(pd.get_dummies(data.Destination)).drop(["Destination"], axis=1)
data = data.assign(
P=data["Cabin"].str.endswith("P"), S=data["Cabin"].str.endswith("S")
)
data["P"] = data["P"].astype(bool)
data["S"] = data["S"].astype(bool)
data = data.drop(["Cabin"], axis=1)
data = data.join(pd.get_dummies(data.HomePlanet)).drop(["HomePlanet"], axis=1)
data = data.drop(
["Mars", "Europa", "Earth", "TRAPPIST-1e", "PSO J318.5-22", "55 Cancri e"],
axis=1,
)
data["Age"] = data["Age"].interpolate()
return data
# Data analysis and visualization
sns.histplot(tr["Age"])
# Many transported aged 18 to 30
sns.histplot(tr["HomePlanet"])
sns.countplot(x="CryoSleep", hue="Transported", data=tr)
plt.show()
# Сравниваем спящих с пропавшими
# Those who was CryoSleep were more likely to be transported
sns.heatmap(tr.corr(), annot=True, cmap="coolwarm")
# Delete columns VIP, Name, RoomService ,FoodCourt ,ShoppingMall ,Spa ,VRDeck because thanks to heatmap i think these columns not important
tr1 = tr.drop(
["VIP", "Name", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], axis=1
)
tr1.head()
tr1["CryoSleep"] = tr1["CryoSleep"].astype(bool)
tr1 = tr1.join(pd.get_dummies(tr1.Destination)).drop(["Destination"], axis=1)
tr1 = tr1.assign(P=tr1["Cabin"].str.endswith("P"), S=tr1["Cabin"].str.endswith("S"))
tr1["P"] = tr1["P"].astype(bool)
tr1["S"] = tr1["S"].astype(bool)
tr1 = tr1.drop(["Cabin"], axis=1)
tr1 = tr1.join(pd.get_dummies(tr1.HomePlanet)).drop(["HomePlanet"], axis=1)
plt.figure(figsize=(8, 6))
sns.heatmap(tr1.corr(), annot=True, cmap="coolwarm")
tr1 = tr1.drop(
["Mars", "Europa", "Earth", "TRAPPIST-1e", "PSO J318.5-22", "55 Cancri e"], axis=1
)
tr1["Age"] = tr1["Age"].interpolate()
# Separate dataset on train and validation
X = tr1.drop(["Transported"], axis=1) # Данные без этого столбца
y = tr1["Transported"] # Данные где только этот столбец
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
tr1
# Train model (use RandomForestClassifier)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=20, max_features=2)
clf.fit(x_train, y_train)
clf.score(x_val, y_val)
ts = prep(ts)
res = pd.DataFrame({"PassengerId": ts["PassengerId"], "Transported": clf.predict(ts)})
print(res)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/173/129173256.ipynb
| null | null |
[{"Id": 129173256, "ScriptId": 37087035, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13312177, "CreationDate": "05/11/2023 14:29:10", "VersionNumber": 1.0, "Title": "Missing passengers", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 120.0, "LinesInsertedFromPrevious": 120.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 2}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# * train.csv - Personal records for about two-thirds (~8700) of the passengers, to be used as training data.
# * PassengerId - A unique Id for each passenger. Each Id takes the form gggg_pp where gggg indicates a group the passenger is travelling with and pp is their number within the group. People in a group are often family members, but not always.
# * HomePlanet - The planet the passenger departed from, typically their planet of permanent residence.
# * CryoSleep - Indicates whether the passenger elected to be put into suspended animation for the duration of the voyage. Passengers in cryosleep are confined to their cabins.
# * Cabin - The cabin number where the passenger is staying. Takes the form deck/num/side, where side can be either P for Port or S for Starboard.
# * Destination - The planet the passenger will be debarking to.
# * Age - The age of the passenger.
# * VIP - Whether the passenger has paid for special VIP service during the voyage.
# * RoomService, FoodCourt, ShoppingMall, Spa, VRDeck - Amount the passenger has billed at each of the Spaceship Titanic's many luxury amenities.
# * Name - The first and last names of the passenger.
# * Transported - Whether the passenger was transported to another dimension. This is the target, the column you are trying to predict.
# * test.csv - Personal records for the remaining one-third (~4300) of the passengers, to be used as test data. Your task is to predict the value of Transported for the passengers in this set.
# * sample_submission.csv - A submission file in the correct format.
# * PassengerId - Id for each passenger in the test set.
# sns.heatmap(data=flight_data, annot=True/** Transported - The target. For each passenger, predict either True or False.
tr = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
ts = pd.read_csv("/kaggle/input/spaceship-titanic/test.csv")
def prep(data):
data = data.drop(
["VIP", "Name", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"],
axis=1,
)
data["CryoSleep"] = data["CryoSleep"].astype(bool)
data = data.join(pd.get_dummies(data.Destination)).drop(["Destination"], axis=1)
data = data.assign(
P=data["Cabin"].str.endswith("P"), S=data["Cabin"].str.endswith("S")
)
data["P"] = data["P"].astype(bool)
data["S"] = data["S"].astype(bool)
data = data.drop(["Cabin"], axis=1)
data = data.join(pd.get_dummies(data.HomePlanet)).drop(["HomePlanet"], axis=1)
data = data.drop(
["Mars", "Europa", "Earth", "TRAPPIST-1e", "PSO J318.5-22", "55 Cancri e"],
axis=1,
)
data["Age"] = data["Age"].interpolate()
return data
# Data analysis and visualization
sns.histplot(tr["Age"])
# Many transported aged 18 to 30
sns.histplot(tr["HomePlanet"])
sns.countplot(x="CryoSleep", hue="Transported", data=tr)
plt.show()
# Сравниваем спящих с пропавшими
# Those who was CryoSleep were more likely to be transported
sns.heatmap(tr.corr(), annot=True, cmap="coolwarm")
# Delete columns VIP, Name, RoomService ,FoodCourt ,ShoppingMall ,Spa ,VRDeck because thanks to heatmap i think these columns not important
tr1 = tr.drop(
["VIP", "Name", "RoomService", "FoodCourt", "ShoppingMall", "Spa", "VRDeck"], axis=1
)
tr1.head()
tr1["CryoSleep"] = tr1["CryoSleep"].astype(bool)
tr1 = tr1.join(pd.get_dummies(tr1.Destination)).drop(["Destination"], axis=1)
tr1 = tr1.assign(P=tr1["Cabin"].str.endswith("P"), S=tr1["Cabin"].str.endswith("S"))
tr1["P"] = tr1["P"].astype(bool)
tr1["S"] = tr1["S"].astype(bool)
tr1 = tr1.drop(["Cabin"], axis=1)
tr1 = tr1.join(pd.get_dummies(tr1.HomePlanet)).drop(["HomePlanet"], axis=1)
plt.figure(figsize=(8, 6))
sns.heatmap(tr1.corr(), annot=True, cmap="coolwarm")
tr1 = tr1.drop(
["Mars", "Europa", "Earth", "TRAPPIST-1e", "PSO J318.5-22", "55 Cancri e"], axis=1
)
tr1["Age"] = tr1["Age"].interpolate()
# Separate dataset on train and validation
X = tr1.drop(["Transported"], axis=1) # Данные без этого столбца
y = tr1["Transported"] # Данные где только этот столбец
x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.2)
tr1
# Train model (use RandomForestClassifier)
from sklearn.ensemble import RandomForestClassifier
clf = RandomForestClassifier(n_estimators=20, max_features=2)
clf.fit(x_train, y_train)
clf.score(x_val, y_val)
ts = prep(ts)
res = pd.DataFrame({"PassengerId": ts["PassengerId"], "Transported": clf.predict(ts)})
print(res)
| false | 0 | 1,691 | 2 | 1,691 | 1,691 |
||
129104177
|
# Published on May 11, 2023 by Marília Prata, mpwolke
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dask.dataframe as dd
import dask.array as da
from dask.diagnostics import ProgressBar
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #Competition Citation
# @misc{asl-fingerspelling,
# author = {Ashley Chow, Glenn Cameron, HCL-Jevster, Manfred Georg, Mark
#
# Sherwood, Phil Culliton, Sam Sepah, Sohier Dane, Thad Starner},
#
# title = {Google - American Sign Language Fingerspelling Recognition},
#
# publisher = {Kaggle},
#
# year = {2023},
#
# url = {https://kaggle.com/competitions/asl-fingerspelling}
# }
# https://www.signlanguageforum.com/asl/fingerspelling/alphabet/
# "Finger spelling is a building block in which you or your child uses hands and fingers to spell out words. Hand shapes represent the letters in the alphabet. Finger spelling is used with many other building blocks; it is almost never used by itself. It is most often used with American Sign Language (ASL), Conceptually Accurate Signed English (CASE), and Manually Coded English (MCE) to spell out words that don’t have a sign — such as the names of places or people."
# https://www.cdc.gov/ncbddd/hearingloss/parentsguide/building/finger-spelling.html#:~:text=Finger%20spelling%20is%20a%20building,almost%20never%20used%20by%20itself.
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
BASE_DIR = "../input/asl-fingerspelling/"
train = pd.read_csv(f"{BASE_DIR}/train.csv")
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
# Train.csv has the path to each parquet file, the particpant id, sequence_id and sign.
train.head()
# #How to Spell with one Finger an URL? Probably is with more than one :D
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(8, 8))
train["phrase"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", ax=ax, title="Top 10 Signs in Training Dataset"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Let's work with Supplemental Metadata
# By the way, that is my style of having Fun and work. Check phrase on line 6:
# "All work and no play."
df = pd.read_csv(
"/kaggle/input/asl-fingerspelling/supplemental_metadata.csv",
delimiter=",",
encoding="UTF-8",
)
pd.set_option("display.max_columns", None)
df.head(7)
# #And the winner is: "Why do ask silly questions?" Fingerspelt 117 Times
# Find a nearby parking spot requires a car. No Fun.
df["phrase"].value_counts()
silly = df[(df["phrase"] == "why do you ask silly questions")].reset_index(drop=True)
silly.head()
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["phrase"].value_counts().head(3).sort_values(ascending=True).plot(
kind="barh",
color="g",
ax=ax,
title="Top 3 Fingerspelling: Why do you ask silly questions?",
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Pull an example parquet file data
# Rob Mulla pulled an example landmark file for the phrase "hentaihubs.com"
example_fn = train.query('phrase == "hentaihubs.com"')["path"].values[0]
example_landmark = pd.read_parquet(f"{BASE_DIR}/{example_fn}")
example_landmark.head()
unique_frames = example_landmark["frame"].nunique()
# unique_types = example_landmark["type"].nunique()
# types_in_video = example_landmark["type"].unique()
print(f"The file has {unique_frames} unique frames")
# #Really? 672 unique frames. I've No clue how to deal with that
# #Read only one file. At least.
ddf = dd.read_parquet(
"/kaggle/input/asl-fingerspelling/train_landmarks/1019715464.parquet"
)
len(ddf)
ddf.head()
ddf.x_face_0.describe().compute()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
ddf.where(ddf.x_face_0 > 50).visualize()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
h, bins = da.histogram(
ddf[["x_face_0"]].to_dask_array(), bins=(0, 1, 2, 5, 10, 20, 50, 100, 1000)
)
with ProgressBar():
hd = h.compute()
bins = bins.compute()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
plt.style.use("fivethirtyeight")
sns.barplot(x=bins[1:], y=hd)
# #Another way of reading parquet
# https://www.kaggle.com/code/sohier/reading-the-data-with-python
# By Sohier Dane https://www.kaggle.com/code/sohier/reading-the-data-with-python
import pyarrow.parquet as pq
train = pq.read_pandas(
"../input/asl-fingerspelling/train_landmarks/1019715464.parquet"
).to_pandas()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/104/129104177.ipynb
| null | null |
[{"Id": 129104177, "ScriptId": 38377538, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 3012786, "CreationDate": "05/11/2023 03:33:18", "VersionNumber": 1.0, "Title": "Fingerspelling : Why do You ask Silly Questions?", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 170.0, "LinesInsertedFromPrevious": 170.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 25}]
| null | null | null | null |
# Published on May 11, 2023 by Marília Prata, mpwolke
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import dask.dataframe as dd
import dask.array as da
from dask.diagnostics import ProgressBar
import matplotlib.pyplot as plt
import seaborn as sns
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #Competition Citation
# @misc{asl-fingerspelling,
# author = {Ashley Chow, Glenn Cameron, HCL-Jevster, Manfred Georg, Mark
#
# Sherwood, Phil Culliton, Sam Sepah, Sohier Dane, Thad Starner},
#
# title = {Google - American Sign Language Fingerspelling Recognition},
#
# publisher = {Kaggle},
#
# year = {2023},
#
# url = {https://kaggle.com/competitions/asl-fingerspelling}
# }
# https://www.signlanguageforum.com/asl/fingerspelling/alphabet/
# "Finger spelling is a building block in which you or your child uses hands and fingers to spell out words. Hand shapes represent the letters in the alphabet. Finger spelling is used with many other building blocks; it is almost never used by itself. It is most often used with American Sign Language (ASL), Conceptually Accurate Signed English (CASE), and Manually Coded English (MCE) to spell out words that don’t have a sign — such as the names of places or people."
# https://www.cdc.gov/ncbddd/hearingloss/parentsguide/building/finger-spelling.html#:~:text=Finger%20spelling%20is%20a%20building,almost%20never%20used%20by%20itself.
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
BASE_DIR = "../input/asl-fingerspelling/"
train = pd.read_csv(f"{BASE_DIR}/train.csv")
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
# Train.csv has the path to each parquet file, the particpant id, sequence_id and sign.
train.head()
# #How to Spell with one Finger an URL? Probably is with more than one :D
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(8, 8))
train["phrase"].value_counts().head(10).sort_values(ascending=True).plot(
kind="barh", ax=ax, title="Top 10 Signs in Training Dataset"
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Let's work with Supplemental Metadata
# By the way, that is my style of having Fun and work. Check phrase on line 6:
# "All work and no play."
df = pd.read_csv(
"/kaggle/input/asl-fingerspelling/supplemental_metadata.csv",
delimiter=",",
encoding="UTF-8",
)
pd.set_option("display.max_columns", None)
df.head(7)
# #And the winner is: "Why do ask silly questions?" Fingerspelt 117 Times
# Find a nearby parking spot requires a car. No Fun.
df["phrase"].value_counts()
silly = df[(df["phrase"] == "why do you ask silly questions")].reset_index(drop=True)
silly.head()
# By Rob Mulla https://www.kaggle.com/code/robikscube/sign-language-recognition-eda-twitch-stream
fig, ax = plt.subplots(figsize=(4, 4))
df["phrase"].value_counts().head(3).sort_values(ascending=True).plot(
kind="barh",
color="g",
ax=ax,
title="Top 3 Fingerspelling: Why do you ask silly questions?",
)
ax.set_xlabel("Number of Training Examples")
plt.show()
# #Pull an example parquet file data
# Rob Mulla pulled an example landmark file for the phrase "hentaihubs.com"
example_fn = train.query('phrase == "hentaihubs.com"')["path"].values[0]
example_landmark = pd.read_parquet(f"{BASE_DIR}/{example_fn}")
example_landmark.head()
unique_frames = example_landmark["frame"].nunique()
# unique_types = example_landmark["type"].nunique()
# types_in_video = example_landmark["type"].unique()
print(f"The file has {unique_frames} unique frames")
# #Really? 672 unique frames. I've No clue how to deal with that
# #Read only one file. At least.
ddf = dd.read_parquet(
"/kaggle/input/asl-fingerspelling/train_landmarks/1019715464.parquet"
)
len(ddf)
ddf.head()
ddf.x_face_0.describe().compute()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
ddf.where(ddf.x_face_0 > 50).visualize()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
h, bins = da.histogram(
ddf[["x_face_0"]].to_dask_array(), bins=(0, 1, 2, 5, 10, 20, 50, 100, 1000)
)
with ProgressBar():
hd = h.compute()
bins = bins.compute()
# https://www.kaggle.com/code/luxcem/dask-on-large-parquet-dataset/notebook
plt.style.use("fivethirtyeight")
sns.barplot(x=bins[1:], y=hd)
# #Another way of reading parquet
# https://www.kaggle.com/code/sohier/reading-the-data-with-python
# By Sohier Dane https://www.kaggle.com/code/sohier/reading-the-data-with-python
import pyarrow.parquet as pq
train = pq.read_pandas(
"../input/asl-fingerspelling/train_landmarks/1019715464.parquet"
).to_pandas()
| false | 0 | 1,815 | 25 | 1,815 | 1,815 |
||
129104448
|
<jupyter_start><jupyter_text>New Zealand Police District Boundaries 2021
This is a collection of files that map the boundaries of the 12 police districts in New Zealand.
This data was obtained from Koordinates.com in 2021. The original dataset is no longer available on Koordinates. These files are unaltered from the original dataset.
Kaggle dataset identifier: new-zealand-police-district-boundaries-2021
<jupyter_script># # Using the New Zealand Police District Data
# Use [GeoPandas](https://geopandas.org/en/stable/) to open and interact with the New Zealand police district mapping dataset.
# ### Import GeoPandas
import geopandas as gpd
# **The dataset has several files that are required for Geopandas to map New Zealand.** The shapefile `nz-police-district-boundaries.shx` is the one that we'll open with Geopandas specifically.
# The code below lists all the files that were originally provided by Koordinates.com and that are necessary to get our mapping code to work.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### Open the `.shx` shapefile into a GeoPandas dataframe.
new_zealand_map = gpd.read_file(
"/kaggle/input/new-zealand-police-district-boundaries-2021/nz-police-district-boundaries.shx"
)
# ### Making a Basic Map of New Zealand
new_zealand_map.plot()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/104/129104448.ipynb
|
new-zealand-police-district-boundaries-2021
|
protobioengineering
|
[{"Id": 129104448, "ScriptId": 38375280, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14517655, "CreationDate": "05/11/2023 03:36:03", "VersionNumber": 1.0, "Title": "NZ Police Districts - Opening the Data", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 24.0, "LinesInsertedFromPrevious": 24.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184864458, "KernelVersionId": 129104448, "SourceDatasetVersionId": 5659139}]
|
[{"Id": 5659139, "DatasetId": 3252467, "DatasourceVersionId": 5734561, "CreatorUserId": 14517655, "LicenseName": "Unknown", "CreationDate": "05/10/2023 23:27:09", "VersionNumber": 1.0, "Title": "New Zealand Police District Boundaries 2021", "Slug": "new-zealand-police-district-boundaries-2021", "Subtitle": "Geospatial data for mapping the 12 police districts in New Zealand as of 2021", "Description": "This is a collection of files that map the boundaries of the 12 police districts in New Zealand.\n\nThis data was obtained from Koordinates.com in 2021. The original dataset is no longer available on Koordinates. These files are unaltered from the original dataset.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3252467, "CreatorUserId": 14517655, "OwnerUserId": 14517655.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5659139.0, "CurrentDatasourceVersionId": 5734561.0, "ForumId": 3317904, "Type": 2, "CreationDate": "05/10/2023 23:27:09", "LastActivityDate": "05/10/2023", "TotalViews": 144, "TotalDownloads": 15, "TotalVotes": 1, "TotalKernels": 2}]
|
[{"Id": 14517655, "UserName": "protobioengineering", "DisplayName": "Proto Bioengineering", "RegisterDate": "04/06/2023", "PerformanceTier": 0}]
|
# # Using the New Zealand Police District Data
# Use [GeoPandas](https://geopandas.org/en/stable/) to open and interact with the New Zealand police district mapping dataset.
# ### Import GeoPandas
import geopandas as gpd
# **The dataset has several files that are required for Geopandas to map New Zealand.** The shapefile `nz-police-district-boundaries.shx` is the one that we'll open with Geopandas specifically.
# The code below lists all the files that were originally provided by Koordinates.com and that are necessary to get our mapping code to work.
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# ### Open the `.shx` shapefile into a GeoPandas dataframe.
new_zealand_map = gpd.read_file(
"/kaggle/input/new-zealand-police-district-boundaries-2021/nz-police-district-boundaries.shx"
)
# ### Making a Basic Map of New Zealand
new_zealand_map.plot()
| false | 0 | 287 | 3 | 400 | 287 |
||
129104429
|
<jupyter_start><jupyter_text>Suicide Attempts in Shandong, China
```
Data on serious suicide attempts in Shandong, China
A data frame with 2571 observations on the following 11 variables.
```
| Column | Description |
| --- | --- |
| Person_ID | ID number of victims |
| Hospitalised | Hospitalized? (no or yes) |
| Died | Died? (no or yes) |
| Urban | Urban area? (no, unknown, or yes) |
| Year | Year (2009, 2010, or 2011) |
| Month | Month (1=Jan through 12=December) |
| Sex | Sex (female or male) |
| Age | Age (years) |
| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |
| Occupation | One of ten occupation categories |
| method | One of nine possible methods |
### Details
Data from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.
## Source
Sun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) "Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study," BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762
Kaggle dataset identifier: suicide-attempts-in-shandong-china
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### **Import dataset and libraries**
# ****
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
rc = {
"axes.facecolor": "#FFF9ED",
"figure.facecolor": "#FFF9ED",
"axes.edgecolor": "#000000",
"grid.color": "#EBEBE7",
"font.family": "serif",
"axes.labelcolor": "#000000",
"xtick.color": "#000000",
"ytick.color": "#000000",
"grid.alpha": 0.4,
}
sns.set(rc=rc)
from colorama import Style, Fore
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
mgt = Style.BRIGHT + Fore.MAGENTA
gld = Style.BRIGHT + Fore.YELLOW
res = Style.RESET_ALL
df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv")
df.head()
df = df.iloc[:, 2:]
df.head()
desc = pd.DataFrame(df.describe(include="all").transpose())
def summary_stats(df):
print(f"The shape of the data is: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=["data type"])
summary["Number of missing values"] = df.isnull().sum().values
summary["% of missing values"] = df.isnull().sum().values / len(df) * 100
summary["min value"] = desc["min"].values
summary["mean value"] = desc["mean"].values
summary["max value"] = desc["max"].values
summary["skewness"] = df.skew()
return summary
summary_stats(df)
#
# - There are 2571 suicide attempts in Shandong, China recorded.
# - There are no missing values in the data.
# ## **Exploratory data analysis**
# ****
fig2 = px.pie(
df,
names="Sex",
height=400,
width=600,
hole=0.7,
title="Suicide attempts in Shandong according to gender",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
year_counts = df.groupby("Year").size().reset_index(name="Count")
sns.lineplot(x="Year", y="Count", data=year_counts)
plt.xticks(year_counts["Year"])
plt.show()
#
# Number of suicide attempts went up by around 20% from 2009 to 2010 then decreased back down slightly above 850 in 2011.
#
fig2 = px.pie(
df,
names="Education",
height=400,
width=600,
hole=0.7,
title="Suicide attempts in Shandong according to education level",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# - Majority of suicide (49.8%) are attempted by people who only have secondary education level. This is followed by people who only have primary education level (25.6%).
# - People with tetiary education level have a very low suicide attempt rate (Less than 1%).
# - Since more than 80% of suicide attempts are attempted by people with secondary education level and lower, we can infer that financial background such as income might be a cause for their suicide attempts since people with secondary level education and lower tend to work in jobs with meagre salary and hence, they might not be able to support their family.
#
sns.countplot(y="Occupation", data=df)
plt.title("Suicide attempts in Shandong according to occupation")
#
# - Majority of suicide (49.8%) are attempted by people who only have secondary education level. Hence, they could only work in jobs that require little educational background such as farming.
# - Farming pays meagre salary to workers and hence, financial stress might be a reason for most suicide attempts.
#
sns.countplot(x="Urban", data=df)
plt.xlabel("Are they staying in urban areas")
plt.title("Suicide attempts in Shandong according to whether they stay in urban areas")
#
# - Residential properties in urban areas tend to cost much more. Since majority of the suicide attempts were done by people with secondary education level and lower, they could only work in jobs that pay meagre salary. This also means that they are most likely not able to afford houses in the urban areas.
# - Hence, most suicide attempts are conducted by people who stay in rural areas, where property prices are much lower.
#
fig2 = px.pie(
df,
names="method",
height=400,
width=600,
hole=0.7,
title="Method used in suicide attempts",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Since most suicide attempts are done by people who work in farming, they have easy access to pesticide. This explains why most suicide attempts are performed using pesticides.
#
fig2 = px.pie(
df,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts that led to death",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
df_pesticide = df[df["method"] == "Pesticide"]
fig2 = px.pie(
df_pesticide,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts that led to death",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Slightly less than half of the suicide attempts done using pesticides (43.6%) resulted in death.
#
fig2 = px.pie(
df_pesticide,
names="Hospitalised",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts by pesticide that were hospitalized",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Most of the people who used pesticide as a suicide means were able to have a chance at revival (67.8%) as they were found in time to be sent to the hospital.
#
df_hospitalised = df_pesticide[df_pesticide["Hospitalised"] == "yes"]
fig2 = px.pie(
df_hospitalised,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts by pesticide that died after hospitalization",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/104/129104429.ipynb
|
suicide-attempts-in-shandong-china
|
utkarshx27
|
[{"Id": 129104429, "ScriptId": 38378234, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 7408242, "CreationDate": "05/11/2023 03:35:53", "VersionNumber": 2.0, "Title": "Suicide attempts in Shandong, China - EDA", "EvaluationDate": "05/11/2023", "IsChange": true, "TotalLines": 234.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 233.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 3}]
|
[{"Id": 184864443, "KernelVersionId": 129104429, "SourceDatasetVersionId": 5617993}]
|
[{"Id": 5617993, "DatasetId": 3230370, "DatasourceVersionId": 5693173, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/06/2023 11:54:22", "VersionNumber": 1.0, "Title": "Suicide Attempts in Shandong, China", "Slug": "suicide-attempts-in-shandong-china", "Subtitle": "Serious Suicide Attempts in Shandong, China: Three-Year Study", "Description": "```\nData on serious suicide attempts in Shandong, China\nA data frame with 2571 observations on the following 11 variables.\n```\n\n| Column | Description |\n| --- | --- |\n| Person_ID | ID number of victims |\n| Hospitalised | Hospitalized? (no or yes) |\n| Died | Died? (no or yes) |\n| Urban | Urban area? (no, unknown, or yes) |\n| Year | Year (2009, 2010, or 2011) |\n| Month | Month (1=Jan through 12=December) |\n| Sex | Sex (female or male) |\n| Age | Age (years) |\n| Education | Education level (illiterate, primary, Secondary, Tertiary, or unknown) |\n| Occupation | One of ten occupation categories |\n| method | One of nine possible methods |\n\n### Details \nData from a study of serious suicide attempts over three years in a predominantly rural population in Shandong, China.\n\n## Source\nSun J, Guo X, Zhang J, Wang M, Jia C, Xu A (2015) \"Incidence and fatality of serious suicide attempts in a predominantly rural population in Shandong, China: a public health surveillance study,\" BMJ Open 5(2): e006762. https://doi.org/10.1136/bmjopen-2014-006762", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3230370, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5617993.0, "CurrentDatasourceVersionId": 5693173.0, "ForumId": 3295509, "Type": 2, "CreationDate": "05/06/2023 11:54:22", "LastActivityDate": "05/06/2023", "TotalViews": 8885, "TotalDownloads": 1402, "TotalVotes": 42, "TotalKernels": 12}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# #### **Import dataset and libraries**
# ****
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import plotly.express as px
import statsmodels.api as sm
from statsmodels.formula.api import ols
import scipy.stats as stats
import warnings
warnings.filterwarnings("ignore")
rc = {
"axes.facecolor": "#FFF9ED",
"figure.facecolor": "#FFF9ED",
"axes.edgecolor": "#000000",
"grid.color": "#EBEBE7",
"font.family": "serif",
"axes.labelcolor": "#000000",
"xtick.color": "#000000",
"ytick.color": "#000000",
"grid.alpha": 0.4,
}
sns.set(rc=rc)
from colorama import Style, Fore
red = Style.BRIGHT + Fore.RED
blu = Style.BRIGHT + Fore.BLUE
mgt = Style.BRIGHT + Fore.MAGENTA
gld = Style.BRIGHT + Fore.YELLOW
res = Style.RESET_ALL
df = pd.read_csv("/kaggle/input/suicide-attempts-in-shandong-china/SuicideChina.csv")
df.head()
df = df.iloc[:, 2:]
df.head()
desc = pd.DataFrame(df.describe(include="all").transpose())
def summary_stats(df):
print(f"The shape of the data is: {df.shape}")
summary = pd.DataFrame(df.dtypes, columns=["data type"])
summary["Number of missing values"] = df.isnull().sum().values
summary["% of missing values"] = df.isnull().sum().values / len(df) * 100
summary["min value"] = desc["min"].values
summary["mean value"] = desc["mean"].values
summary["max value"] = desc["max"].values
summary["skewness"] = df.skew()
return summary
summary_stats(df)
#
# - There are 2571 suicide attempts in Shandong, China recorded.
# - There are no missing values in the data.
# ## **Exploratory data analysis**
# ****
fig2 = px.pie(
df,
names="Sex",
height=400,
width=600,
hole=0.7,
title="Suicide attempts in Shandong according to gender",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
year_counts = df.groupby("Year").size().reset_index(name="Count")
sns.lineplot(x="Year", y="Count", data=year_counts)
plt.xticks(year_counts["Year"])
plt.show()
#
# Number of suicide attempts went up by around 20% from 2009 to 2010 then decreased back down slightly above 850 in 2011.
#
fig2 = px.pie(
df,
names="Education",
height=400,
width=600,
hole=0.7,
title="Suicide attempts in Shandong according to education level",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# - Majority of suicide (49.8%) are attempted by people who only have secondary education level. This is followed by people who only have primary education level (25.6%).
# - People with tetiary education level have a very low suicide attempt rate (Less than 1%).
# - Since more than 80% of suicide attempts are attempted by people with secondary education level and lower, we can infer that financial background such as income might be a cause for their suicide attempts since people with secondary level education and lower tend to work in jobs with meagre salary and hence, they might not be able to support their family.
#
sns.countplot(y="Occupation", data=df)
plt.title("Suicide attempts in Shandong according to occupation")
#
# - Majority of suicide (49.8%) are attempted by people who only have secondary education level. Hence, they could only work in jobs that require little educational background such as farming.
# - Farming pays meagre salary to workers and hence, financial stress might be a reason for most suicide attempts.
#
sns.countplot(x="Urban", data=df)
plt.xlabel("Are they staying in urban areas")
plt.title("Suicide attempts in Shandong according to whether they stay in urban areas")
#
# - Residential properties in urban areas tend to cost much more. Since majority of the suicide attempts were done by people with secondary education level and lower, they could only work in jobs that pay meagre salary. This also means that they are most likely not able to afford houses in the urban areas.
# - Hence, most suicide attempts are conducted by people who stay in rural areas, where property prices are much lower.
#
fig2 = px.pie(
df,
names="method",
height=400,
width=600,
hole=0.7,
title="Method used in suicide attempts",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Since most suicide attempts are done by people who work in farming, they have easy access to pesticide. This explains why most suicide attempts are performed using pesticides.
#
fig2 = px.pie(
df,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts that led to death",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
df_pesticide = df[df["method"] == "Pesticide"]
fig2 = px.pie(
df_pesticide,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts that led to death",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Slightly less than half of the suicide attempts done using pesticides (43.6%) resulted in death.
#
fig2 = px.pie(
df_pesticide,
names="Hospitalised",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts by pesticide that were hospitalized",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
#
# Most of the people who used pesticide as a suicide means were able to have a chance at revival (67.8%) as they were found in time to be sent to the hospital.
#
df_hospitalised = df_pesticide[df_pesticide["Hospitalised"] == "yes"]
fig2 = px.pie(
df_hospitalised,
names="Died",
height=400,
width=600,
hole=0.7,
title="% of suicide attempts by pesticide that died after hospitalization",
color_discrete_sequence=["#4c78a8", "#72b7b2"],
)
fig2.update_traces(
hovertemplate=None, textposition="outside", textinfo="percent+label", rotation=0
)
fig2.update_layout(
margin=dict(t=100, b=30, l=0, r=0),
showlegend=False,
plot_bgcolor="#fafafa",
paper_bgcolor="#fafafa",
title_font=dict(size=20, color="#555", family="Lato, sans-serif"),
font=dict(size=17, color="#8a8d93"),
hoverlabel=dict(bgcolor="#444", font_size=13, font_family="Lato, sans-serif"),
)
fig2.show()
| false | 1 | 3,191 | 3 | 3,604 | 3,191 |
||
129090391
|
# # Google - 🤟American Sign Language Fingerspelling Recognition🖖
# Train fast and accurate American Sign Language fingerspelling recognition models
# ***
# # (ಠಿ_ಠ) Overview
# ⚪ The goal of the competition is to detect and translate American Sign Language (ASL) fingerspelling into text.
# ⚪ The competition is based on a large dataset of over three million fingerspelled characters produced by over 100 Deaf signers captured via the selfie camera of a smartphone with a variety of backgrounds and lighting conditions.
# ⚪ The competition aims to improve sign language recognition technology, making it more accessible for the Deaf and Hard of Hearing community.
# ⚪ Fingerspelling is an important part of ASL and is often used for communicating names, addresses, phone numbers, and other information commonly entered on a mobile phone.
# ⚪ ASL fingerspelling can be substantially faster than typing on a smartphone’s virtual keyboard.
# ⚪ Sign language recognition AI for text entry lags far behind voice-to-text or even gesture-based typing, as robust datasets didn't previously exist.
# ⚪ Participating in this competition could help provide Deaf and Hard of Hearing users with the option to fingerspell words instead of using a keyboard, enabling them to communicate with hearing non-signers more quickly and smoothly.
# ####
# # Table of contents
# * [0. Import all dependencies](#0)
# * [1. Data overview](#1)
# # 0. Import all dependencies
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
#
# # 1. Data overview
# ⚪ The goal of this competition is to detect and translate American Sign Language (ASL) fingerspelling into text.
# ⚪ [train/supplemental_metadata].csv
# * path - The path to the landmark file.
# * file_id - A unique identifier for the data file.
# * sequence_id - A unique identifier for the landmark sequence. Each data file may contain many sequences.
# * phrase - The labels for the landmark sequence. The train and test datasets contain randomly generated addresses, phone numbers, and urls derived from components of real addresses/phone numbers/urls. Any overlap with real addresses, phone numbers, or urls is purely accidental. The supplemental dataset consists of fingerspelled sentences. Note that some of the urls include adult content. The intent of this competition is to support the Deaf and Hard of Hearing community in engaging with technology on an equal footing with other adults.
# ⚪ [train/supplemental]_landmarks/ - the landmark data. The landmarks were extracted from raw videos with the MediaPipe holistic model. Not all of the frames necessarily had visible hands or hands that could be detected by the model.
# ⚪ The landmark files contain the same data as in the ASL Signs competition (minus the row ID column) but reshaped into a wide format. This allows you to take advantage of the Parquet format to entirely skip loading landmarks that you aren't using.
# * sequence_id - A unique identifier for the landmark sequence. Most landmark files contain 1,000 sequences. The sequence ID is used as the dataframe index.
# * frame - The frame number within a landmark sequence.
# * sequence_id - A unique identifier for the landmark sequence. Each data file may contain many sequences.
# * [x/y/z]_[type]_[landmark_index] - There are now 1,629 spatial coordinate columns for the x, y and z coordinates for each of the 543 landmarks. The type of landmark is one of ['face', 'left_hand', 'pose', 'right_hand'].
# ## 1.1 American sign language (ASL)
# ⚪ ASL Basics: American Sign Language (ASL) is a complete, natural language that has the same linguistic properties as spoken languages, with grammar that differs from English.
# ⚪ Sign Language in Different Countries: There is no universal sign language. Different sign languages are used in different countries or regions. Some countries adopt features of ASL in their sign languages.
# ⚪ Origins of ASL: No person or committee invented ASL. The exact beginnings of ASL are not clear, but some suggest that it arose more than 200 years ago from the intermixing of local sign languages and French Sign Language (LSF, or Langue des Signes Française).
# ⚪ ASL Compared to Spoken Language: ASL is a language completely separate and distinct from English. It contains all the fundamental features of language, with its own rules for pronunciation, word formation, and word order. Fingerspelling is part of ASL and is used to spell out English words.
# ⚪ Neurobiology of Language Development: Study of sign language can also help scientists understand the neurobiology of language development. Better understanding of the neurobiology of language could provide a translational foundation for treating injury to the language system, for employing signs or gestures in therapy for children or adults, and for diagnosing language impairment in individuals who are deaf.
# ⚪ Sign Languages Created Among Small Communities: The NIDCD is also funding research on sign languages created among small communities of people with little to no outside influence.
# ASL finger alphabet
# ## 1.2 MediaPipe holistic model
# ⚪ MediaPipe Holistic Solution is a powerful, easy-to-use software tool that can detect and track multiple human body parts and gestures in real-time video streams. It is open-source and can run on a variety of platforms, including mobile devices, making it an ideal solution for our competition.
# ⚪ Real-time Perception: Real-time, simultaneous perception of human pose, face landmarks, and hand tracking can enable impactful applications like fitness analysis, gesture control, and sign language recognition.
# ⚪ Open-Source Framework: MediaPipe is an open-source framework designed for complex perception pipelines.
# ⚪ State-of-the-Art Solution: MediaPipe Holistic is a solution that provides a state-of-the-art human pose topology, consisting of optimized pose, face, and hand components that each run in real-time.
# ⚪ Unified Topology: MediaPipe Holistic provides a unified topology for 540+ keypoints and is available on-device for mobile and desktop.
# ⚪ Separate ML Models For Separate Tasks: The pipeline integrates separate models for pose, face, and hand components, treating the different regions using a region-appropriate image resolution.
# ⚪ Significant Model Coordination: MediaPipe Holistic requires coordination between up to 8 models per frame and optimized machine learning models and pre- and post-processing algorithms for performance benefits.
# ⚪ Performance Benefits: The multi-stage nature of the pipeline provides performance benefits, as models are mostly independent and can be replaced with lighter or heavier versions.
# Example of MediaPipe Holistic
# MediaPipe Landmarks for Hands
# # 2. Supplemental overview
supplemental_metadata_df = pd.read_csv(
"/kaggle/input/asl-fingerspelling/supplemental_metadata.csv"
)
supplemental_metadata_df.head()
supplemental_metadata_df.describe()
path_to_sign = (
"/kaggle/input/asl-fingerspelling/supplemental_landmarks/1032110484.parquet"
)
sign = pd.read_parquet(path_to_sign)
sign
sign[sign.index == 1617884228]
# TODO: Add animated visualisation and map it to pharase
# TODO: Plot NaN distributions for some features
# TODO: Text length distribution
# TODO: Text lenght distribution
# TODO: Create participant_id distribution
# TODO: Frames distribution
#
# # 3. Train overview
train_df = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv")
train_df.head()
train_df.describe()
path_to_sign = "/kaggle/input/asl-fingerspelling/train_landmarks/1019715464.parquet"
sign = pd.read_parquet(path_to_sign)
sign
sign[sign.index == 1975433633]
# TODO: Add animated visualisation and map it to pharase
# ( ꈍᴗꈍ) WORK STILL IN PROGRESS
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/090/129090391.ipynb
| null | null |
[{"Id": 129090391, "ScriptId": 38374023, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 4158783, "CreationDate": "05/10/2023 23:58:35", "VersionNumber": 2.0, "Title": "[EDA] \ud83e\udd1fASLFR\ud83d\udd96 - Basic breakdown", "EvaluationDate": "05/10/2023", "IsChange": true, "TotalLines": 149.0, "LinesInsertedFromPrevious": 1.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 148.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 27}]
| null | null | null | null |
# # Google - 🤟American Sign Language Fingerspelling Recognition🖖
# Train fast and accurate American Sign Language fingerspelling recognition models
# ***
# # (ಠಿ_ಠ) Overview
# ⚪ The goal of the competition is to detect and translate American Sign Language (ASL) fingerspelling into text.
# ⚪ The competition is based on a large dataset of over three million fingerspelled characters produced by over 100 Deaf signers captured via the selfie camera of a smartphone with a variety of backgrounds and lighting conditions.
# ⚪ The competition aims to improve sign language recognition technology, making it more accessible for the Deaf and Hard of Hearing community.
# ⚪ Fingerspelling is an important part of ASL and is often used for communicating names, addresses, phone numbers, and other information commonly entered on a mobile phone.
# ⚪ ASL fingerspelling can be substantially faster than typing on a smartphone’s virtual keyboard.
# ⚪ Sign language recognition AI for text entry lags far behind voice-to-text or even gesture-based typing, as robust datasets didn't previously exist.
# ⚪ Participating in this competition could help provide Deaf and Hard of Hearing users with the option to fingerspell words instead of using a keyboard, enabling them to communicate with hearing non-signers more quickly and smoothly.
# ####
# # Table of contents
# * [0. Import all dependencies](#0)
# * [1. Data overview](#1)
# # 0. Import all dependencies
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
from IPython.display import HTML
#
# # 1. Data overview
# ⚪ The goal of this competition is to detect and translate American Sign Language (ASL) fingerspelling into text.
# ⚪ [train/supplemental_metadata].csv
# * path - The path to the landmark file.
# * file_id - A unique identifier for the data file.
# * sequence_id - A unique identifier for the landmark sequence. Each data file may contain many sequences.
# * phrase - The labels for the landmark sequence. The train and test datasets contain randomly generated addresses, phone numbers, and urls derived from components of real addresses/phone numbers/urls. Any overlap with real addresses, phone numbers, or urls is purely accidental. The supplemental dataset consists of fingerspelled sentences. Note that some of the urls include adult content. The intent of this competition is to support the Deaf and Hard of Hearing community in engaging with technology on an equal footing with other adults.
# ⚪ [train/supplemental]_landmarks/ - the landmark data. The landmarks were extracted from raw videos with the MediaPipe holistic model. Not all of the frames necessarily had visible hands or hands that could be detected by the model.
# ⚪ The landmark files contain the same data as in the ASL Signs competition (minus the row ID column) but reshaped into a wide format. This allows you to take advantage of the Parquet format to entirely skip loading landmarks that you aren't using.
# * sequence_id - A unique identifier for the landmark sequence. Most landmark files contain 1,000 sequences. The sequence ID is used as the dataframe index.
# * frame - The frame number within a landmark sequence.
# * sequence_id - A unique identifier for the landmark sequence. Each data file may contain many sequences.
# * [x/y/z]_[type]_[landmark_index] - There are now 1,629 spatial coordinate columns for the x, y and z coordinates for each of the 543 landmarks. The type of landmark is one of ['face', 'left_hand', 'pose', 'right_hand'].
# ## 1.1 American sign language (ASL)
# ⚪ ASL Basics: American Sign Language (ASL) is a complete, natural language that has the same linguistic properties as spoken languages, with grammar that differs from English.
# ⚪ Sign Language in Different Countries: There is no universal sign language. Different sign languages are used in different countries or regions. Some countries adopt features of ASL in their sign languages.
# ⚪ Origins of ASL: No person or committee invented ASL. The exact beginnings of ASL are not clear, but some suggest that it arose more than 200 years ago from the intermixing of local sign languages and French Sign Language (LSF, or Langue des Signes Française).
# ⚪ ASL Compared to Spoken Language: ASL is a language completely separate and distinct from English. It contains all the fundamental features of language, with its own rules for pronunciation, word formation, and word order. Fingerspelling is part of ASL and is used to spell out English words.
# ⚪ Neurobiology of Language Development: Study of sign language can also help scientists understand the neurobiology of language development. Better understanding of the neurobiology of language could provide a translational foundation for treating injury to the language system, for employing signs or gestures in therapy for children or adults, and for diagnosing language impairment in individuals who are deaf.
# ⚪ Sign Languages Created Among Small Communities: The NIDCD is also funding research on sign languages created among small communities of people with little to no outside influence.
# ASL finger alphabet
# ## 1.2 MediaPipe holistic model
# ⚪ MediaPipe Holistic Solution is a powerful, easy-to-use software tool that can detect and track multiple human body parts and gestures in real-time video streams. It is open-source and can run on a variety of platforms, including mobile devices, making it an ideal solution for our competition.
# ⚪ Real-time Perception: Real-time, simultaneous perception of human pose, face landmarks, and hand tracking can enable impactful applications like fitness analysis, gesture control, and sign language recognition.
# ⚪ Open-Source Framework: MediaPipe is an open-source framework designed for complex perception pipelines.
# ⚪ State-of-the-Art Solution: MediaPipe Holistic is a solution that provides a state-of-the-art human pose topology, consisting of optimized pose, face, and hand components that each run in real-time.
# ⚪ Unified Topology: MediaPipe Holistic provides a unified topology for 540+ keypoints and is available on-device for mobile and desktop.
# ⚪ Separate ML Models For Separate Tasks: The pipeline integrates separate models for pose, face, and hand components, treating the different regions using a region-appropriate image resolution.
# ⚪ Significant Model Coordination: MediaPipe Holistic requires coordination between up to 8 models per frame and optimized machine learning models and pre- and post-processing algorithms for performance benefits.
# ⚪ Performance Benefits: The multi-stage nature of the pipeline provides performance benefits, as models are mostly independent and can be replaced with lighter or heavier versions.
# Example of MediaPipe Holistic
# MediaPipe Landmarks for Hands
# # 2. Supplemental overview
supplemental_metadata_df = pd.read_csv(
"/kaggle/input/asl-fingerspelling/supplemental_metadata.csv"
)
supplemental_metadata_df.head()
supplemental_metadata_df.describe()
path_to_sign = (
"/kaggle/input/asl-fingerspelling/supplemental_landmarks/1032110484.parquet"
)
sign = pd.read_parquet(path_to_sign)
sign
sign[sign.index == 1617884228]
# TODO: Add animated visualisation and map it to pharase
# TODO: Plot NaN distributions for some features
# TODO: Text length distribution
# TODO: Text lenght distribution
# TODO: Create participant_id distribution
# TODO: Frames distribution
#
# # 3. Train overview
train_df = pd.read_csv("/kaggle/input/asl-fingerspelling/train.csv")
train_df.head()
train_df.describe()
path_to_sign = "/kaggle/input/asl-fingerspelling/train_landmarks/1019715464.parquet"
sign = pd.read_parquet(path_to_sign)
sign
sign[sign.index == 1975433633]
# TODO: Add animated visualisation and map it to pharase
# ( ꈍᴗꈍ) WORK STILL IN PROGRESS
| false | 0 | 2,024 | 27 | 2,024 | 2,024 |
||
129814933
|
<jupyter_start><jupyter_text>CIFAKE: Real and AI-Generated Synthetic Images
# CIFAKE: Real and AI-Generated Synthetic Images
The quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness.
CIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI?
Further information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)

## Dataset details
The dataset contains two classes - REAL and FAKE.
For REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html)
For the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4
There are 100,000 images for training (50k per class) and 20,000 for testing (10k per class)
## Papers with Code
The dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)
[https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)
## References
If you use this dataset, you **must** cite the following sources
[Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl)
[Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)
Real images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published.
## Notes
The updates to the dataset on the 28th of March 2023 did not change anything; the file formats ".jpeg" were renamed ".jpg" and the root folder was uploaded to meet Kaggle's usability requirements.
## License
This dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE):
*Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:*
*The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.*
*THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*
Kaggle dataset identifier: cifake-real-and-ai-generated-synthetic-images
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Summary**
# The project aims to classify images into "Fake" and "Real" categories using the VGG16 model and transfer learning on the CIFAKE Dataset.
# The steps followed in the project can be summarized as follows:
# Dataset Preparation: The project utilized a dataset containing images of both real and fake images. The dataset was split into training and testing sets.For convenience I have trained on 4000 images and tested on 400 images.
# Model Architecture: The VGG16 model, pre-trained on the ImageNet dataset, was used as the base model. The last few layers were modified to suit the binary classification task. A fully connected layer with a sigmoid activation function was added as the output layer.
# Model Training: The model was compiled with appropriate loss and optimization functions. Data augmentation techniques such as rescaling, shearing, zooming, and horizontal flipping were applied to the training data to improve model generalization. The model was trained on the augmented training data.
# Model Evaluation: The trained model was evaluated on the testing data. Predictions were made on the test set, and performance metrics such as accuracy, confusion matrix, classification report, and mean average precision (mAP) were calculated to assess the model's performance.
# Visualization: Various visualizations were created to understand the model's predictions better. These included a confusion matrix, loss plot, precision-recall curve, and F1 curve. Additionally, a sample of test images was selected randomly to display the predicted labels and class probabilities.
# The project aimed to develop a model that can accurately classify images as either "Fake" or "Real" based on transfer learning from the VGG16 model. The evaluation metrics and visualizations provided insights into the model's performance and its ability to distinguish between real and fake images.
# **References**:
# *Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.*
# *Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.*
# *Real images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on ArXiv and this description will be updated when the paper is published.**
# Creating a train and test dataset from the CIFAKE dataset.
import os
import random
import shutil
# Set the paths to your dataset folders
dataset_dir = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train"
real_dir = os.path.join(dataset_dir, "REAL")
fake_dir = os.path.join(dataset_dir, "FAKE")
# Set the paths to the new directories that will contain the selected images
train_dir = "/kaggle/working/train"
real_train_dir = os.path.join(train_dir, "REAL")
fake_train_dir = os.path.join(train_dir, "FAKE")
# Create the new directories if they don't exist
if not os.path.exists(real_train_dir):
os.makedirs(real_train_dir)
if not os.path.exists(fake_train_dir):
os.makedirs(fake_train_dir)
# Set the number of images to select from each folder
num_images = 2000
# Randomly select the required number of images from the REAL folder and copy them to the new directory
real_images = os.listdir(real_dir)
selected_real_images = random.sample(real_images, num_images)
for image_name in selected_real_images:
source_path = os.path.join(real_dir, image_name)
dest_path = os.path.join(real_train_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Randomly select the required number of images from the FAKE folder and copy them to the new directory
fake_images = os.listdir(fake_dir)
selected_fake_images = random.sample(fake_images, num_images)
for image_name in selected_fake_images:
source_path = os.path.join(fake_dir, image_name)
dest_path = os.path.join(fake_train_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Set the paths to your dataset folders
dataset_dir_test = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test"
real_dir = os.path.join(dataset_dir_test, "REAL")
fake_dir = os.path.join(dataset_dir_test, "FAKE")
# Set the paths to the new directories that will contain the selected images
test_dir = "/kaggle/working/test"
real_test_dir = os.path.join(test_dir, "REAL")
fake_test_dir = os.path.join(test_dir, "FAKE")
# Create the new directories if they don't exist
if not os.path.exists(real_test_dir):
os.makedirs(real_test_dir)
if not os.path.exists(fake_test_dir):
os.makedirs(fake_test_dir)
# Set the number of images to select from each folder
num_images = 200
# Randomly select the required number of images from the REAL folder and copy them to the new directory
real_images = os.listdir(real_dir)
selected_real_images = random.sample(real_images, num_images)
for image_name in selected_real_images:
source_path = os.path.join(real_dir, image_name)
dest_path = os.path.join(real_test_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Randomly select the required number of images from the FAKE folder and copy them to the new directory
fake_images = os.listdir(fake_dir)
selected_fake_images = random.sample(fake_images, num_images)
for image_name in selected_fake_images:
source_path = os.path.join(fake_dir, image_name)
dest_path = os.path.join(fake_test_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Model creation and Evaluation
import tensorflow as tf
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import cv2
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
# Set the paths to the train and test directories
train_dir = "/kaggle/working/train"
test_dir = "/kaggle/working/test"
# Set up the model
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(32, 32, 3))
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
batch_size = 32
# Compile the model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# Perform data augmentation
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
# Load the training data
train_generator = train_datagen.flow_from_directory(
train_dir, target_size=(32, 32), batch_size=batch_size, class_mode="binary"
)
# Train the model
history = model.fit(
train_generator, steps_per_epoch=train_generator.n // batch_size, epochs=50
)
# Load the test data
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(32, 32),
batch_size=batch_size,
class_mode="binary",
shuffle=False,
)
# Make predictions on the test data
predictions = model.predict(test_generator)
labels = [0 if pred < 0.5 else 1 for pred in predictions]
# Calculate accuracy
accuracy = np.sum(np.array(test_generator.labels) == np.array(labels)) / len(labels)
# Print the accuracy
print("\nAccuracy:", accuracy)
cm = confusion_matrix(test_generator.labels, labels)
print("\nConfusion Matrix:")
print(cm)
# Compute the classification report
class_names = test_generator.class_indices.keys()
classification_rep = classification_report(
test_generator.labels, labels, target_names=class_names
)
print("\nClassification Report:")
print(classification_rep)
# Calculate the average precision (mAP)
mAP = average_precision_score(test_generator.labels, predictions)
print("\nMean Average Precision (mAP):", mAP)
# **Result Visualisation**
import seaborn as sns
# Confusion matrix
cm = confusion_matrix(test_generator.labels, labels)
plt.figure(figsize=(8, 6))
sns.heatmap(
cm,
annot=True,
cmap="Blues",
fmt="d",
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.title("Confusion Matrix")
plt.show()
# Loss plot
plt.figure(figsize=(8, 6))
plt.plot(history.history["loss"], label="Training Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training Loss")
plt.legend()
plt.show()
from sklearn.metrics import precision_recall_curve
# Calculate precision and recall
precision, recall, _ = precision_recall_curve(test_generator.labels, predictions)
# Plot precision-recall curve
plt.plot(recall, precision)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision-Recall Curve")
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, auc
# Calculate precision, recall, and thresholds
precision, recall, thresholds = precision_recall_curve(
test_generator.labels, predictions
)
# Calculate F1-score
f1_scores = 2 * (precision * recall) / (precision + recall)
# Calculate area under the curve (AUC)
auc_score = auc(recall, precision)
# Plot the F1 curve
plt.plot(recall, precision, label="F1 curve (AUC = {:.2f})".format(auc_score))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("F1 Curve")
plt.legend()
plt.show()
# Confusion matrix
cm = confusion_matrix(test_generator.labels, labels)
cm_percent = cm / cm.sum(axis=1).reshape(-1, 1) * 100
plt.figure(figsize=(8, 6))
sns.heatmap(
cm_percent,
annot=True,
cmap="Blues",
fmt=".1f",
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.title("Confusion Matrix")
plt.show()
# Select random samples from the test data
sample_indices = np.random.choice(len(test_generator), size=10, replace=False)
sample_images = []
sample_actual_labels = []
sample_predicted_labels = []
sample_probabilities = []
for i in sample_indices:
image, actual_labels = test_generator[i]
predicted_label = labels[i]
probability = predictions[i][0]
sample_images.append(image[0]) # Access the first image in the batch
sample_actual_labels.append(
actual_labels[0]
) # Access the actual label for the first image
sample_predicted_labels.append(predicted_label)
sample_probabilities.append(probability)
# Calculate the subplot layout based on the number of sample images
num_images = len(sample_images)
num_rows = int(np.ceil(num_images / 2))
num_cols = min(num_images, 2)
# Plot the sample images with labels and probabilities
plt.figure(figsize=(12, 6))
for i in range(len(sample_images)):
plt.subplot(num_rows, num_cols, i + 1)
plt.imshow(sample_images[i])
actual_label = "FAKE" if sample_actual_labels[i] == 0 else "REAL"
predicted_label = "FAKE" if sample_predicted_labels[i] == 0 else "REAL"
plt.title(
f"Actual: {actual_label}, Predicted: {predicted_label}\nProbability: {sample_probabilities[i]:.2f}"
)
plt.axis("off")
plt.tight_layout()
plt.show()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/814/129814933.ipynb
|
cifake-real-and-ai-generated-synthetic-images
|
birdy654
|
[{"Id": 129814933, "ScriptId": 38450781, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 11988597, "CreationDate": "05/16/2023 16:53:35", "VersionNumber": 4.0, "Title": "Fake vs. Real Image Classification using VGG16", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 306.0, "LinesInsertedFromPrevious": 31.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 275.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186190337, "KernelVersionId": 129814933, "SourceDatasetVersionId": 5256696}]
|
[{"Id": 5256696, "DatasetId": 3041726, "DatasourceVersionId": 5329502, "CreatorUserId": 2039603, "LicenseName": "Other (specified in description)", "CreationDate": "03/28/2023 16:00:29", "VersionNumber": 3.0, "Title": "CIFAKE: Real and AI-Generated Synthetic Images", "Slug": "cifake-real-and-ai-generated-synthetic-images", "Subtitle": "Can Computer Vision detect when images have been generated by AI?", "Description": "# CIFAKE: Real and AI-Generated Synthetic Images\nThe quality of AI-generated images has rapidly increased, leading to concerns of authenticity and trustworthiness.\n\nCIFAKE is a dataset that contains 60,000 synthetically-generated images and 60,000 real images (collected from CIFAR-10). Can computer vision techniques be used to detect when an image is real or has been generated by AI?\n\nFurther information on this dataset can be found here: [Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\n\n\n## Dataset details\nThe dataset contains two classes - REAL and FAKE. \n\nFor REAL, we collected the images from Krizhevsky & Hinton's [CIFAR-10 dataset](https://www.cs.toronto.edu/~kriz/cifar.html)\n\nFor the FAKE images, we generated the equivalent of CIFAR-10 with Stable Diffusion version 1.4\n\nThere are 100,000 images for training (50k per class) and 20,000 for testing (10k per class)\n\n## Papers with Code\nThe dataset and all studies using it are linked using [Papers with Code](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n[https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images](https://paperswithcode.com/dataset/cifake-real-and-ai-generated-synthetic-images)\n\n\n## References\nIf you use this dataset, you **must** cite the following sources\n\n[Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.](https://www.cs.toronto.edu/~kriz/learning-features-2009-TR.pdfl)\n\n[Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.](https://arxiv.org/abs/2303.14126)\n\nReal images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on [ArXiv](https://arxiv.org/abs/2303.14126) and this description will be updated when the paper is published.\n\n## Notes\n\nThe updates to the dataset on the 28th of March 2023 did not change anything; the file formats \".jpeg\" were renamed \".jpg\" and the root folder was uploaded to meet Kaggle's usability requirements.\n\n## License\nThis dataset is published under the [same MIT license as CIFAR-10](https://github.com/wichtounet/cifar-10/blob/master/LICENSE):\n\n*Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the \"Software\"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:*\n\n*The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.*\n\n*THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.*", "VersionNotes": "Kaggle compatibility fix (no actual changes)", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3041726, "CreatorUserId": 2039603, "OwnerUserId": 2039603.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5256696.0, "CurrentDatasourceVersionId": 5329502.0, "ForumId": 3081274, "Type": 2, "CreationDate": "03/24/2023 13:22:42", "LastActivityDate": "03/24/2023", "TotalViews": 13728, "TotalDownloads": 1803, "TotalVotes": 46, "TotalKernels": 15}]
|
[{"Id": 2039603, "UserName": "birdy654", "DisplayName": "Jordan J. Bird", "RegisterDate": "07/03/2018", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
# for dirname, _, filenames in os.walk('/kaggle/input'):
# for filename in filenames:
# print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# **Summary**
# The project aims to classify images into "Fake" and "Real" categories using the VGG16 model and transfer learning on the CIFAKE Dataset.
# The steps followed in the project can be summarized as follows:
# Dataset Preparation: The project utilized a dataset containing images of both real and fake images. The dataset was split into training and testing sets.For convenience I have trained on 4000 images and tested on 400 images.
# Model Architecture: The VGG16 model, pre-trained on the ImageNet dataset, was used as the base model. The last few layers were modified to suit the binary classification task. A fully connected layer with a sigmoid activation function was added as the output layer.
# Model Training: The model was compiled with appropriate loss and optimization functions. Data augmentation techniques such as rescaling, shearing, zooming, and horizontal flipping were applied to the training data to improve model generalization. The model was trained on the augmented training data.
# Model Evaluation: The trained model was evaluated on the testing data. Predictions were made on the test set, and performance metrics such as accuracy, confusion matrix, classification report, and mean average precision (mAP) were calculated to assess the model's performance.
# Visualization: Various visualizations were created to understand the model's predictions better. These included a confusion matrix, loss plot, precision-recall curve, and F1 curve. Additionally, a sample of test images was selected randomly to display the predicted labels and class probabilities.
# The project aimed to develop a model that can accurately classify images as either "Fake" or "Real" based on transfer learning from the VGG16 model. The evaluation metrics and visualizations provided insights into the model's performance and its ability to distinguish between real and fake images.
# **References**:
# *Krizhevsky, A., & Hinton, G. (2009). Learning multiple layers of features from tiny images.*
# *Bird, J.J., Lotfi, A. (2023). CIFAKE: Image Classification and Explainable Identification of AI-Generated Synthetic Images. arXiv preprint arXiv:2303.14126.*
# *Real images are from Krizhevsky & Hinton (2009), fake images are from Bird & Lotfi (2023). The Bird & Lotfi study is a preprint currently available on ArXiv and this description will be updated when the paper is published.**
# Creating a train and test dataset from the CIFAKE dataset.
import os
import random
import shutil
# Set the paths to your dataset folders
dataset_dir = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/train"
real_dir = os.path.join(dataset_dir, "REAL")
fake_dir = os.path.join(dataset_dir, "FAKE")
# Set the paths to the new directories that will contain the selected images
train_dir = "/kaggle/working/train"
real_train_dir = os.path.join(train_dir, "REAL")
fake_train_dir = os.path.join(train_dir, "FAKE")
# Create the new directories if they don't exist
if not os.path.exists(real_train_dir):
os.makedirs(real_train_dir)
if not os.path.exists(fake_train_dir):
os.makedirs(fake_train_dir)
# Set the number of images to select from each folder
num_images = 2000
# Randomly select the required number of images from the REAL folder and copy them to the new directory
real_images = os.listdir(real_dir)
selected_real_images = random.sample(real_images, num_images)
for image_name in selected_real_images:
source_path = os.path.join(real_dir, image_name)
dest_path = os.path.join(real_train_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Randomly select the required number of images from the FAKE folder and copy them to the new directory
fake_images = os.listdir(fake_dir)
selected_fake_images = random.sample(fake_images, num_images)
for image_name in selected_fake_images:
source_path = os.path.join(fake_dir, image_name)
dest_path = os.path.join(fake_train_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Set the paths to your dataset folders
dataset_dir_test = "/kaggle/input/cifake-real-and-ai-generated-synthetic-images/test"
real_dir = os.path.join(dataset_dir_test, "REAL")
fake_dir = os.path.join(dataset_dir_test, "FAKE")
# Set the paths to the new directories that will contain the selected images
test_dir = "/kaggle/working/test"
real_test_dir = os.path.join(test_dir, "REAL")
fake_test_dir = os.path.join(test_dir, "FAKE")
# Create the new directories if they don't exist
if not os.path.exists(real_test_dir):
os.makedirs(real_test_dir)
if not os.path.exists(fake_test_dir):
os.makedirs(fake_test_dir)
# Set the number of images to select from each folder
num_images = 200
# Randomly select the required number of images from the REAL folder and copy them to the new directory
real_images = os.listdir(real_dir)
selected_real_images = random.sample(real_images, num_images)
for image_name in selected_real_images:
source_path = os.path.join(real_dir, image_name)
dest_path = os.path.join(real_test_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Randomly select the required number of images from the FAKE folder and copy them to the new directory
fake_images = os.listdir(fake_dir)
selected_fake_images = random.sample(fake_images, num_images)
for image_name in selected_fake_images:
source_path = os.path.join(fake_dir, image_name)
dest_path = os.path.join(fake_test_dir, image_name)
shutil.copyfile(source_path, dest_path)
# Model creation and Evaluation
import tensorflow as tf
from tensorflow.keras.applications import VGG16
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import cv2
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.metrics import average_precision_score
import matplotlib.pyplot as plt
# Set the paths to the train and test directories
train_dir = "/kaggle/working/train"
test_dir = "/kaggle/working/test"
# Set up the model
base_model = VGG16(weights="imagenet", include_top=False, input_shape=(32, 32, 3))
for layer in base_model.layers:
layer.trainable = False
model = Sequential()
model.add(base_model)
model.add(Flatten())
model.add(Dense(256, activation="relu"))
model.add(Dropout(0.5))
model.add(Dense(1, activation="sigmoid"))
batch_size = 32
# Compile the model
model.compile(optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"])
# Perform data augmentation
train_datagen = ImageDataGenerator(
rescale=1.0 / 255, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
# Load the training data
train_generator = train_datagen.flow_from_directory(
train_dir, target_size=(32, 32), batch_size=batch_size, class_mode="binary"
)
# Train the model
history = model.fit(
train_generator, steps_per_epoch=train_generator.n // batch_size, epochs=50
)
# Load the test data
test_datagen = ImageDataGenerator(rescale=1.0 / 255)
test_generator = test_datagen.flow_from_directory(
test_dir,
target_size=(32, 32),
batch_size=batch_size,
class_mode="binary",
shuffle=False,
)
# Make predictions on the test data
predictions = model.predict(test_generator)
labels = [0 if pred < 0.5 else 1 for pred in predictions]
# Calculate accuracy
accuracy = np.sum(np.array(test_generator.labels) == np.array(labels)) / len(labels)
# Print the accuracy
print("\nAccuracy:", accuracy)
cm = confusion_matrix(test_generator.labels, labels)
print("\nConfusion Matrix:")
print(cm)
# Compute the classification report
class_names = test_generator.class_indices.keys()
classification_rep = classification_report(
test_generator.labels, labels, target_names=class_names
)
print("\nClassification Report:")
print(classification_rep)
# Calculate the average precision (mAP)
mAP = average_precision_score(test_generator.labels, predictions)
print("\nMean Average Precision (mAP):", mAP)
# **Result Visualisation**
import seaborn as sns
# Confusion matrix
cm = confusion_matrix(test_generator.labels, labels)
plt.figure(figsize=(8, 6))
sns.heatmap(
cm,
annot=True,
cmap="Blues",
fmt="d",
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.title("Confusion Matrix")
plt.show()
# Loss plot
plt.figure(figsize=(8, 6))
plt.plot(history.history["loss"], label="Training Loss")
plt.xlabel("Epochs")
plt.ylabel("Loss")
plt.title("Training Loss")
plt.legend()
plt.show()
from sklearn.metrics import precision_recall_curve
# Calculate precision and recall
precision, recall, _ = precision_recall_curve(test_generator.labels, predictions)
# Plot precision-recall curve
plt.plot(recall, precision)
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("Precision-Recall Curve")
plt.grid(True)
plt.show()
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve, auc
# Calculate precision, recall, and thresholds
precision, recall, thresholds = precision_recall_curve(
test_generator.labels, predictions
)
# Calculate F1-score
f1_scores = 2 * (precision * recall) / (precision + recall)
# Calculate area under the curve (AUC)
auc_score = auc(recall, precision)
# Plot the F1 curve
plt.plot(recall, precision, label="F1 curve (AUC = {:.2f})".format(auc_score))
plt.xlabel("Recall")
plt.ylabel("Precision")
plt.title("F1 Curve")
plt.legend()
plt.show()
# Confusion matrix
cm = confusion_matrix(test_generator.labels, labels)
cm_percent = cm / cm.sum(axis=1).reshape(-1, 1) * 100
plt.figure(figsize=(8, 6))
sns.heatmap(
cm_percent,
annot=True,
cmap="Blues",
fmt=".1f",
xticklabels=class_names,
yticklabels=class_names,
)
plt.xlabel("Predicted Labels")
plt.ylabel("True Labels")
plt.title("Confusion Matrix")
plt.show()
# Select random samples from the test data
sample_indices = np.random.choice(len(test_generator), size=10, replace=False)
sample_images = []
sample_actual_labels = []
sample_predicted_labels = []
sample_probabilities = []
for i in sample_indices:
image, actual_labels = test_generator[i]
predicted_label = labels[i]
probability = predictions[i][0]
sample_images.append(image[0]) # Access the first image in the batch
sample_actual_labels.append(
actual_labels[0]
) # Access the actual label for the first image
sample_predicted_labels.append(predicted_label)
sample_probabilities.append(probability)
# Calculate the subplot layout based on the number of sample images
num_images = len(sample_images)
num_rows = int(np.ceil(num_images / 2))
num_cols = min(num_images, 2)
# Plot the sample images with labels and probabilities
plt.figure(figsize=(12, 6))
for i in range(len(sample_images)):
plt.subplot(num_rows, num_cols, i + 1)
plt.imshow(sample_images[i])
actual_label = "FAKE" if sample_actual_labels[i] == 0 else "REAL"
predicted_label = "FAKE" if sample_predicted_labels[i] == 0 else "REAL"
plt.title(
f"Actual: {actual_label}, Predicted: {predicted_label}\nProbability: {sample_probabilities[i]:.2f}"
)
plt.axis("off")
plt.tight_layout()
plt.show()
| false | 0 | 3,446 | 1 | 4,489 | 3,446 |
||
129814734
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Holdout Cross Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
Y = iris.target
print("Size of Dataset {}".format(len(X)))
logreg = LogisticRegression()
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3, random_state=42
)
logreg.fit(x_train, y_train)
predict = logreg.predict(x_test)
print(
"Accuracy score on training set is {}".format(
accuracy_score(logreg.predict(x_train), y_train)
)
)
print("Accuracy score on test set is {}".format(accuracy_score(predict, y_test)))
# # K-Fold Cross Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, KFold
from sklearn.linear_model import LogisticRegression
iris = load_iris()
X = iris.data
Y = iris.target
logreg = LogisticRegression()
kf = KFold(n_splits=5)
score = cross_val_score(logreg, X, Y, cv=kf)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Stratified K-Fold Cross-Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.linear_model import LogisticRegression
iris = load_iris()
X = iris.data
Y = iris.target
logreg = LogisticRegression()
skf = StratifiedKFold(n_splits=5)
score = cross_val_score(logreg, X, Y, cv=skf)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Leave P Out cross-validation
from sklearn.model_selection import LeavePOut, cross_val_score
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
iris = load_iris()
X = iris.data
Y = iris.target
lpo = LeavePOut(p=2)
lpo.get_n_splits(X)
tree = RandomForestClassifier(n_estimators=10, max_depth=5, n_jobs=-1)
score = cross_val_score(tree, X, Y, cv=lpo)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Time Series Cross Validation
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([1, 2, 3, 4, 5, 6])
time_series = TimeSeriesSplit()
print(time_series)
for train_index, test_index in time_series.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/814/129814734.ipynb
| null | null |
[{"Id": 129814734, "ScriptId": 38606836, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 9768901, "CreationDate": "05/16/2023 16:51:40", "VersionNumber": 1.0, "Title": "crossValidationTechniques", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 92.0, "LinesInsertedFromPrevious": 92.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# # Holdout Cross Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
iris = load_iris()
X = iris.data
Y = iris.target
print("Size of Dataset {}".format(len(X)))
logreg = LogisticRegression()
x_train, x_test, y_train, y_test = train_test_split(
X, Y, test_size=0.3, random_state=42
)
logreg.fit(x_train, y_train)
predict = logreg.predict(x_test)
print(
"Accuracy score on training set is {}".format(
accuracy_score(logreg.predict(x_train), y_train)
)
)
print("Accuracy score on test set is {}".format(accuracy_score(predict, y_test)))
# # K-Fold Cross Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, KFold
from sklearn.linear_model import LogisticRegression
iris = load_iris()
X = iris.data
Y = iris.target
logreg = LogisticRegression()
kf = KFold(n_splits=5)
score = cross_val_score(logreg, X, Y, cv=kf)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Stratified K-Fold Cross-Validation
from sklearn.datasets import load_iris
from sklearn.model_selection import cross_val_score, StratifiedKFold
from sklearn.linear_model import LogisticRegression
iris = load_iris()
X = iris.data
Y = iris.target
logreg = LogisticRegression()
skf = StratifiedKFold(n_splits=5)
score = cross_val_score(logreg, X, Y, cv=skf)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Leave P Out cross-validation
from sklearn.model_selection import LeavePOut, cross_val_score
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
iris = load_iris()
X = iris.data
Y = iris.target
lpo = LeavePOut(p=2)
lpo.get_n_splits(X)
tree = RandomForestClassifier(n_estimators=10, max_depth=5, n_jobs=-1)
score = cross_val_score(tree, X, Y, cv=lpo)
print("Cross Validation Scores are {}".format(score))
print("Average Cross Validation score :{}".format(score.mean()))
# # Time Series Cross Validation
import numpy as np
from sklearn.model_selection import TimeSeriesSplit
X = np.array([[1, 2], [3, 4], [1, 2], [3, 4], [1, 2], [3, 4]])
y = np.array([1, 2, 3, 4, 5, 6])
time_series = TimeSeriesSplit()
print(time_series)
for train_index, test_index in time_series.split(X):
print("TRAIN:", train_index, "TEST:", test_index)
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
| false | 0 | 1,031 | 1 | 1,031 | 1,031 |
||
129814140
|
<jupyter_start><jupyter_text>Dating App Fame & Behavior

```
➡️ There are total 3 datasets containing valuable information.
➡️ Understand people's fame and behavior's on a dating app platform.
```
| Column Name | Description |
|---------------------|------------------------------|
| Age | The age of the user. |
| Number of Users | The total number of users. |
| Percent Want Chats | Percentage of users who want chats. |
| Percent Want Friends| Percentage of users who want friendships. |
| Percent Want Dates | Percentage of users who want romantic dates. |
| Mean Kisses Received| Average number of kisses received by users. |
| Mean Visits Received| Average number of profile visits received by users. |
| Mean Followers | Average number of followers for each user. |
| Mean Languages Known| Average number of languages known by users. |
| Total Want Chats | Total count of users interested in chats. |
| Total Want Friends | Total count of users looking for friendships. |
| Total Want Dates | Total count of users seeking romantic dates. |
| Total Kisses Received| Overall count of kisses received by users. |
| Total Visits Received| Overall count of profile visits received by users. |
| Total Followers | Overall count of followers for all users. |
| Total Languages Spoken| Total count of languages spoken by all users. |
# SUMMARY
When Dating apps like Tinder were becoming viral, people wanted to have the best profile in order to get more matches and more potential encounters.
Unlike other previous dating platforms, those new ones emphasized on the mutuality of attraction before allowing any two people to get in touch and chat. This made it all the more important to create the best profile in order to get the best first impression.
Parallel to that, we Humans have always been in awe before charismatic and inspiring people. The more charismatic people tend to be followed and listened to by more people.
Through their metrics such as the number of friends/followers, social networks give some ways of "measuring" the potential charisma of some people.
In regard to all that, one can then think:
what makes a great user profile ?
how to make the best first impression in order to get more matches (and ultimately find love, or new friendships) ?
what makes a person charismatic ?
how do charismatic people present themselves ?
In order to try and understand those different social questions, I decided to create a dataset of user profile informations using the social network Lovoo when it came out. By using different methodologies, I was able to gather user profile data, as well as some usually unavailable metrics (such as the number of profile visits).
# Content
The dataset contains user profile infos of users of the website Lovoo.
The dataset was gathered during spring 2015 (april, may). At that time, Lovoo was expanding in european countries (among others), while Tinder was trending both in America and in Europe.
At that time the iOS version of the Lovoo app was in version 3.
Accessory image data
The dataset references pictures (field pictureId) of user profiles. These pictures are also available for a fraction of users but have not been uploaded and should be asked separately.
The idea when gathering the profile pictures was to determine whether some correlations could be identified between a profile picture and the reputation or success of a given profile. Since first impression matters, a sound hypothesis to make is that the profile picture might have a great influence on the number of profile visits, matches and so on. Do not forget that only a fraction of a user's profile is seen when browsing through a list of users.
https://s1.dmcdn.net/v/BnWkG1M7WuJDq2PKP/x480
Details about collection methodology
In order to gather the data, I developed a set of tools that would save the data while browsing through profiles and doing searches. Because of this approach (and the constraints that forced me to develop this approach) I could only gather user profiles that were recommended by Lovoo's algorithm for 2 profiles I created for this purpose occasion (male, open to friends & chats & dates). That is why there are only female users in the dataset.
Another work could be done to fetch similar data for both genders or other age ranges.
Regarding the number of user profiles
It turned out that the recommendation algorithm always seemed to output the same set of user profiles. This meant Lovoo's algorithm was probably heavily relying on settings like location (to recommend more people nearby than people in different places or countries) and maybe cookies. This diminished the number of different user profiles that would be presented and included in the dataset.
Inspiration
As mentioned in the introduction, there are a lot of questions we can answer using a dataset such as this one. Some questions are related to
popularity, charisma
census and demographic studies.
Statistics about the interest of people joining dating apps (making friends, finding someone to date, finding true love, ...).
Detecting influencers / potential influencers and studying them
Previously mentioned:
what makes a great user profile ?
how to make the best first impression in order to get more matches (and ultimately find love, or new friendships) ?
what makes a person charismatic ?
how do charismatic people present themselves ?
Other works:
Kaggle dataset identifier: lovoo-dating-app-dataset
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from matplotlib import pyplot as plt
import seaborn as sns
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
df1 = pd.read_csv(
"/kaggle/input/lovoo-dating-app-dataset/interests-of-users-by-age.csv"
)
df2 = pd.read_csv(
"/kaggle/input/lovoo-dating-app-dataset/lovoo_v3_users_api-results.csv"
)
df3 = pd.read_csv("/kaggle/input/lovoo-dating-app-dataset/lovoo_v3_users_instances.csv")
df1 = df1.rename(
columns={
"age": "Age",
"nbusers": "Number of Users",
"percentwantchats": "Percent Want Chats",
"percentwantfriends": "Percent Want Friends",
"percentwantdates": "Percent Want Dates",
"meankissesreceived": "Mean Kisses Received",
"meanvisitsreceived": "Mean Visits Received",
"meanfollowers": "Mean Followers",
"meanlanguagesknown": "Mean Languages Known",
"totalwantchants": "Total Want Chats",
"totalwantfriends": "Total Want Friends",
"totalwantdates": "Total Want Dates",
"totalkissesreceive": "Total Kisses Received",
"totalvisitsreceived": "Total Visits Received",
"totalfollowers": "Total Followers",
"totallanguagesspoken": "Total Languages Spoken",
}
)
# # Data Overview
df1.head()
# # Correlation analysis
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(
df1["Age"], df1["Percent Want Chats"], color="red", label="Percent Want Chats"
)
ax.scatter(
df1["Age"], df1["Percent Want Friends"], color="blue", label="Percent Want Friends"
)
ax.scatter(
df1["Age"], df1["Percent Want Dates"], color="green", label="Percent Want Dates"
)
ax.set_xlabel("Age")
ax.set_ylabel("Percentage")
ax.set_title("User Preferences by Age")
ax.legend()
plt.show()
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(
df1["Number of Users"],
df1["Percent Want Chats"],
color="red",
label="Percent Want Chats",
)
ax.scatter(
df1["Number of Users"],
df1["Percent Want Friends"],
color="blue",
label="Percent Want Friends",
)
ax.scatter(
df1["Number of Users"],
df1["Percent Want Dates"],
color="green",
label="Percent Want Dates",
)
ax.set_xlabel("Number of Users")
ax.set_ylabel("Percentage")
ax.set_title("User Preferences by Number of Users")
ax.legend()
plt.show()
age_user_preferences_corr = df1["Age"].corr(df1["Percent Want Chats"])
kisses_visits_corr = df1["Mean Kisses Received"].corr(df1["Mean Visits Received"])
print("Correlation between age and user preferences: ", age_user_preferences_corr)
print(
"Correlation between mean kisses received and mean visits received: ",
kisses_visits_corr,
)
# # User behavior analysis
percent_chat = df1["Percent Want Chats"].mean()
percent_friends = df1["Percent Want Friends"].mean()
percent_dates = df1["Percent Want Dates"].mean()
print("Percentage of users interested in chats: {:.2f}%".format(percent_chat))
print("Percentage of users interested in friendships: {:.2f}%".format(percent_friends))
print("Percentage of users interested in dates: {:.2f}%".format(percent_dates))
percent_chat = df1["Percent Want Chats"].mean()
percent_friends = df1["Percent Want Friends"].mean()
percent_dates = df1["Percent Want Dates"].mean()
fig, ax = plt.subplots(figsize=(8, 6))
ax.bar(["Chats", "Friends", "Dates"], [percent_chat, percent_friends, percent_dates])
ax.set_ylabel("Percentage")
ax.set_title("User Preferences")
plt.show()
# # Influence and charisma study
corr_followers_kisses = df1["Mean Followers"].corr(df1["Mean Kisses Received"])
corr_followers_visits = df1["Mean Followers"].corr(df1["Mean Visits Received"])
corr_followers_languages = df1["Mean Followers"].corr(df1["Mean Languages Known"])
print(
"Correlation between average number of followers and mean kisses received: ",
corr_followers_kisses,
)
print(
"Correlation between average number of followers and mean visits received: ",
corr_followers_visits,
)
print(
"Correlation between average number of followers and mean languages known: ",
corr_followers_languages,
)
# # Demographic study
fig, ax = plt.subplots(figsize=(10, 6))
sns.kdeplot(data=df1, x="Age", shade=True, ax=ax)
ax.set_xlabel("Age")
ax.set_ylabel("Density")
ax.set_title("Age Distribution (KDE Plot)")
plt.show()
df1 = df1.dropna(subset=["Mean Languages Known"])
df1["Mean Languages Known"] = pd.to_numeric(
df1["Mean Languages Known"], errors="coerce"
)
language_counts = df1["Mean Languages Known"].value_counts().sort_index()
plt.figure(figsize=(10, 6))
plt.bar(language_counts.index.astype(str), language_counts.values)
plt.xlabel("Number of Languages Known")
plt.ylabel("Count")
plt.title("Language Proficiency")
plt.show()
# # User engagement analysis
total_chats = df1["Total Want Chats"].sum()
total_friends = df1["Total Want Friends"].sum()
total_dates = df1["Total Want Dates"].sum()
print("Total users interested in chats: ", total_chats)
print("Total users interested in friendships: ", total_friends)
print("Total users interested in dates: ", total_dates)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/814/129814140.ipynb
|
lovoo-dating-app-dataset
|
utkarshx27
|
[{"Id": 129814140, "ScriptId": 38604134, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13364933, "CreationDate": "05/16/2023 16:45:15", "VersionNumber": 1.0, "Title": "Dating App Users Data Analysis", "EvaluationDate": "05/16/2023", "IsChange": true, "TotalLines": 137.0, "LinesInsertedFromPrevious": 137.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 1}]
|
[{"Id": 186189344, "KernelVersionId": 129814140, "SourceDatasetVersionId": 5682645}]
|
[{"Id": 5682645, "DatasetId": 3266924, "DatasourceVersionId": 5758213, "CreatorUserId": 13364933, "LicenseName": "CC0: Public Domain", "CreationDate": "05/14/2023 12:26:00", "VersionNumber": 1.0, "Title": "Dating App Fame & Behavior", "Slug": "lovoo-dating-app-dataset", "Subtitle": "Understand people's fame and behavior's on a dating app platform", "Description": "\n```\n\u27a1\ufe0f There are total 3 datasets containing valuable information.\n\u27a1\ufe0f Understand people's fame and behavior's on a dating app platform.\n```\n| Column Name | Description |\n|---------------------|------------------------------|\n| Age | The age of the user. |\n| Number of Users | The total number of users. |\n| Percent Want Chats | Percentage of users who want chats. |\n| Percent Want Friends| Percentage of users who want friendships. |\n| Percent Want Dates | Percentage of users who want romantic dates. |\n| Mean Kisses Received| Average number of kisses received by users. |\n| Mean Visits Received| Average number of profile visits received by users. |\n| Mean Followers | Average number of followers for each user. |\n| Mean Languages Known| Average number of languages known by users. |\n| Total Want Chats | Total count of users interested in chats. |\n| Total Want Friends | Total count of users looking for friendships. |\n| Total Want Dates | Total count of users seeking romantic dates. |\n| Total Kisses Received| Overall count of kisses received by users. |\n| Total Visits Received| Overall count of profile visits received by users. |\n| Total Followers | Overall count of followers for all users. |\n| Total Languages Spoken| Total count of languages spoken by all users. |\n\n\n# SUMMARY\nWhen Dating apps like Tinder were becoming viral, people wanted to have the best profile in order to get more matches and more potential encounters.\nUnlike other previous dating platforms, those new ones emphasized on the mutuality of attraction before allowing any two people to get in touch and chat. This made it all the more important to create the best profile in order to get the best first impression.\n\nParallel to that, we Humans have always been in awe before charismatic and inspiring people. The more charismatic people tend to be followed and listened to by more people.\nThrough their metrics such as the number of friends/followers, social networks give some ways of \"measuring\" the potential charisma of some people.\n\nIn regard to all that, one can then think:\n\nwhat makes a great user profile ?\nhow to make the best first impression in order to get more matches (and ultimately find love, or new friendships) ?\nwhat makes a person charismatic ?\nhow do charismatic people present themselves ?\nIn order to try and understand those different social questions, I decided to create a dataset of user profile informations using the social network Lovoo when it came out. By using different methodologies, I was able to gather user profile data, as well as some usually unavailable metrics (such as the number of profile visits).\n\n# Content\nThe dataset contains user profile infos of users of the website Lovoo.\n\nThe dataset was gathered during spring 2015 (april, may). At that time, Lovoo was expanding in european countries (among others), while Tinder was trending both in America and in Europe.\nAt that time the iOS version of the Lovoo app was in version 3.\n\nAccessory image data\nThe dataset references pictures (field pictureId) of user profiles. These pictures are also available for a fraction of users but have not been uploaded and should be asked separately.\n\nThe idea when gathering the profile pictures was to determine whether some correlations could be identified between a profile picture and the reputation or success of a given profile. Since first impression matters, a sound hypothesis to make is that the profile picture might have a great influence on the number of profile visits, matches and so on. Do not forget that only a fraction of a user's profile is seen when browsing through a list of users.\n\nhttps://s1.dmcdn.net/v/BnWkG1M7WuJDq2PKP/x480\n\nDetails about collection methodology\nIn order to gather the data, I developed a set of tools that would save the data while browsing through profiles and doing searches. Because of this approach (and the constraints that forced me to develop this approach) I could only gather user profiles that were recommended by Lovoo's algorithm for 2 profiles I created for this purpose occasion (male, open to friends & chats & dates). That is why there are only female users in the dataset.\nAnother work could be done to fetch similar data for both genders or other age ranges.\n\nRegarding the number of user profiles\nIt turned out that the recommendation algorithm always seemed to output the same set of user profiles. This meant Lovoo's algorithm was probably heavily relying on settings like location (to recommend more people nearby than people in different places or countries) and maybe cookies. This diminished the number of different user profiles that would be presented and included in the dataset.\n\nInspiration\nAs mentioned in the introduction, there are a lot of questions we can answer using a dataset such as this one. Some questions are related to\n\npopularity, charisma\ncensus and demographic studies.\nStatistics about the interest of people joining dating apps (making friends, finding someone to date, finding true love, ...).\nDetecting influencers / potential influencers and studying them\nPreviously mentioned:\n\nwhat makes a great user profile ?\nhow to make the best first impression in order to get more matches (and ultimately find love, or new friendships) ?\nwhat makes a person charismatic ?\nhow do charismatic people present themselves ?\nOther works:", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3266924, "CreatorUserId": 13364933, "OwnerUserId": 13364933.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5682645.0, "CurrentDatasourceVersionId": 5758213.0, "ForumId": 3332540, "Type": 2, "CreationDate": "05/14/2023 12:26:00", "LastActivityDate": "05/14/2023", "TotalViews": 7814, "TotalDownloads": 1010, "TotalVotes": 32, "TotalKernels": 4}]
|
[{"Id": 13364933, "UserName": "utkarshx27", "DisplayName": "Utkarsh Singh", "RegisterDate": "01/21/2023", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
from matplotlib import pyplot as plt
import seaborn as sns
from autoviz.classify_method import data_cleaning_suggestions
from autoviz.AutoViz_Class import AutoViz_Class
AV = AutoViz_Class()
df1 = pd.read_csv(
"/kaggle/input/lovoo-dating-app-dataset/interests-of-users-by-age.csv"
)
df2 = pd.read_csv(
"/kaggle/input/lovoo-dating-app-dataset/lovoo_v3_users_api-results.csv"
)
df3 = pd.read_csv("/kaggle/input/lovoo-dating-app-dataset/lovoo_v3_users_instances.csv")
df1 = df1.rename(
columns={
"age": "Age",
"nbusers": "Number of Users",
"percentwantchats": "Percent Want Chats",
"percentwantfriends": "Percent Want Friends",
"percentwantdates": "Percent Want Dates",
"meankissesreceived": "Mean Kisses Received",
"meanvisitsreceived": "Mean Visits Received",
"meanfollowers": "Mean Followers",
"meanlanguagesknown": "Mean Languages Known",
"totalwantchants": "Total Want Chats",
"totalwantfriends": "Total Want Friends",
"totalwantdates": "Total Want Dates",
"totalkissesreceive": "Total Kisses Received",
"totalvisitsreceived": "Total Visits Received",
"totalfollowers": "Total Followers",
"totallanguagesspoken": "Total Languages Spoken",
}
)
# # Data Overview
df1.head()
# # Correlation analysis
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(
df1["Age"], df1["Percent Want Chats"], color="red", label="Percent Want Chats"
)
ax.scatter(
df1["Age"], df1["Percent Want Friends"], color="blue", label="Percent Want Friends"
)
ax.scatter(
df1["Age"], df1["Percent Want Dates"], color="green", label="Percent Want Dates"
)
ax.set_xlabel("Age")
ax.set_ylabel("Percentage")
ax.set_title("User Preferences by Age")
ax.legend()
plt.show()
fig, ax = plt.subplots(figsize=(10, 6))
ax.scatter(
df1["Number of Users"],
df1["Percent Want Chats"],
color="red",
label="Percent Want Chats",
)
ax.scatter(
df1["Number of Users"],
df1["Percent Want Friends"],
color="blue",
label="Percent Want Friends",
)
ax.scatter(
df1["Number of Users"],
df1["Percent Want Dates"],
color="green",
label="Percent Want Dates",
)
ax.set_xlabel("Number of Users")
ax.set_ylabel("Percentage")
ax.set_title("User Preferences by Number of Users")
ax.legend()
plt.show()
age_user_preferences_corr = df1["Age"].corr(df1["Percent Want Chats"])
kisses_visits_corr = df1["Mean Kisses Received"].corr(df1["Mean Visits Received"])
print("Correlation between age and user preferences: ", age_user_preferences_corr)
print(
"Correlation between mean kisses received and mean visits received: ",
kisses_visits_corr,
)
# # User behavior analysis
percent_chat = df1["Percent Want Chats"].mean()
percent_friends = df1["Percent Want Friends"].mean()
percent_dates = df1["Percent Want Dates"].mean()
print("Percentage of users interested in chats: {:.2f}%".format(percent_chat))
print("Percentage of users interested in friendships: {:.2f}%".format(percent_friends))
print("Percentage of users interested in dates: {:.2f}%".format(percent_dates))
percent_chat = df1["Percent Want Chats"].mean()
percent_friends = df1["Percent Want Friends"].mean()
percent_dates = df1["Percent Want Dates"].mean()
fig, ax = plt.subplots(figsize=(8, 6))
ax.bar(["Chats", "Friends", "Dates"], [percent_chat, percent_friends, percent_dates])
ax.set_ylabel("Percentage")
ax.set_title("User Preferences")
plt.show()
# # Influence and charisma study
corr_followers_kisses = df1["Mean Followers"].corr(df1["Mean Kisses Received"])
corr_followers_visits = df1["Mean Followers"].corr(df1["Mean Visits Received"])
corr_followers_languages = df1["Mean Followers"].corr(df1["Mean Languages Known"])
print(
"Correlation between average number of followers and mean kisses received: ",
corr_followers_kisses,
)
print(
"Correlation between average number of followers and mean visits received: ",
corr_followers_visits,
)
print(
"Correlation between average number of followers and mean languages known: ",
corr_followers_languages,
)
# # Demographic study
fig, ax = plt.subplots(figsize=(10, 6))
sns.kdeplot(data=df1, x="Age", shade=True, ax=ax)
ax.set_xlabel("Age")
ax.set_ylabel("Density")
ax.set_title("Age Distribution (KDE Plot)")
plt.show()
df1 = df1.dropna(subset=["Mean Languages Known"])
df1["Mean Languages Known"] = pd.to_numeric(
df1["Mean Languages Known"], errors="coerce"
)
language_counts = df1["Mean Languages Known"].value_counts().sort_index()
plt.figure(figsize=(10, 6))
plt.bar(language_counts.index.astype(str), language_counts.values)
plt.xlabel("Number of Languages Known")
plt.ylabel("Count")
plt.title("Language Proficiency")
plt.show()
# # User engagement analysis
total_chats = df1["Total Want Chats"].sum()
total_friends = df1["Total Want Friends"].sum()
total_dates = df1["Total Want Dates"].sum()
print("Total users interested in chats: ", total_chats)
print("Total users interested in friendships: ", total_friends)
print("Total users interested in dates: ", total_dates)
| false | 3 | 1,761 | 1 | 3,152 | 1,761 |
||
129863873
|
<jupyter_start><jupyter_text>USA Company Insights: Glassdoor Scraped Data 2023
The "**Glassdoor Company Insights: Scraped Data Collection**" dataset is a comprehensive compilation of information gathered from **Glassdoor**, a leading platform for employee reviews and company ratings. This dataset includes a wide range of valuable data points, including company reviews, employee salaries, ratings, and more. With this dataset, researchers, analysts, and businesses can delve into the wealth of insights to gain a deeper understanding of company culture, employee satisfaction, and industry trends. Whether studying market competitiveness, benchmarking salaries, or conducting sentiment analysis, this dataset offers a valuable resource for exploring the experiences and perceptions of employees across various companies.
Kaggle dataset identifier: glassdoor-company-insightsscraped-data-collection
<jupyter_script># The "Glassdoor Company Insights: Scraped Data Collection" dataset is a comprehensive compilation of information gathered from Glassdoor, a leading platform for employee reviews and company ratings. This dataset includes a wide range of valuable data points, including company reviews, employee salaries, ratings, and more. With this dataset, researchers, analysts, and businesses can delve into the wealth of insights to gain a deeper understanding of company culture, employee satisfaction, and industry trends. Whether studying market competitiveness, benchmarking salaries, or conducting sentiment analysis, this dataset offers a valuable resource for exploring the experiences and perceptions of employees across various companies.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
# Extracting Data
#
# importing required modules
# from zipfile import ZipFile
# specifying the zip file name
# file_name = "glassdoor_comany.csv.zip"
# opening the zip file in READ mode
# with ZipFile(file_name, 'r') as zip:
# printing all the contents of the zip file
# zip.printdir()
# extracting all the files
# print('Extracting all the files now...')
# zip.extractall()
# print('Done!')
# Read CSV File
df = pd.read_csv("glassdoor_comany.csv", encoding="cp1252")
# DataFrame Head
df.head()
# Checking Unique
for i in df.columns:
print(i, "---->", df[i].unique())
# Checking Null Values
for i in df.columns:
print(i, "---->", df[i].isnull().sum())
# Fill NA with "Unknown"
df.fillna("Unknown", inplace=True)
# Drop Dublicates
df.drop_duplicates()
df["Industry Type"]
# Shape
df.shape
df.columns
df.info()
# Exploratory data analysis (EDA)
x = ["Company reviews", "Company salaries"]
for i in x:
df[i] = df[i].apply(lambda x: x.strip("K").replace(",", "")).astype(float) * 1000
plt.figure(figsize=(20, 5))
sns.histplot(data=df)
plt.show()
df.rename(columns={"Industry Type": "Industry_Type"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Industry_Type"], order=df.Industry_Type.value_counts().index
)
plt.xticks(rotation=90)
plt.show()
df.rename(columns={"Number of Employees": "Number_of_Employees"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df,
x=df["Number_of_Employees"],
order=df.Number_of_Employees.value_counts().index,
)
plt.xticks(rotation=90)
plt.show()
df.rename(columns={"Company Jobs": "Company_Jobs"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Company_Jobs"], order=df.Company_Jobs.value_counts().iloc[:20].index
)
plt.xticks(rotation=90)
plt.show()
df["Company salaries"].value_counts()
df.rename(columns={"Company reviews": "Company_reviews"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df,
x=df["Company_reviews"],
order=df.Company_reviews.value_counts().iloc[:20].index,
)
plt.xticks(rotation=90)
plt.show()
# df.drop(columns='Company salaries',inplace=True)
df.head()
df.rename(columns={"Company Name": "Company_Name"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Company_Name"], order=df.Company_Name.value_counts().iloc[:30].index
)
plt.xticks(rotation=70)
plt.show()
Company__reviews = df.sort_values("Company_reviews", ascending=False).head(10)
Company__reviews
plt.figure(figsize=(10, 5))
plt.bar(Company__reviews["Company_Name"], Company__reviews["Company_reviews"])
plt.title("Bottom Top 10 Company")
plt.xticks(rotation=60)
plt.show()
df.rename(columns={"Company salaries": "Company_salaries"}, inplace=True)
Company__salaries = df.sort_values("Company_salaries", ascending=False).head(10)
Company__salaries
plt.figure(figsize=(10, 10))
sns.pairplot(data=df)
plt.show()
plt.figure(figsize=(10, 5))
plt.bar(Company__salaries["Company_Name"], Company__salaries["Company_salaries"])
plt.title("Top 10 company vote against company salaries")
plt.xticks(rotation=60)
plt.show()
# Model Building
from sklearn.preprocessing import LabelEncoder
Model__ = df
labelencoder = LabelEncoder()
output1 = labelencoder.fit_transform(Model__["Number_of_Employees"])
output2 = labelencoder.fit_transform(Model__["Industry_Type"])
Model__.drop("Number_of_Employees", axis=1, inplace=True)
Model__.drop("Industry_Type", axis=1, inplace=True)
Model__
Model__["Number_of_Employees"] = output1
Model__["Industry_Type"] = output2
Model__.head()
plt.figure(figsize=(10, 5))
sns.heatmap(df.corr())
plt.title("Matrix")
plt.xticks(rotation=20, fontsize=10)
plt.yticks(rotation=20, fontsize=10)
plt.show()
##END
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/863/129863873.ipynb
|
glassdoor-company-insightsscraped-data-collection
|
joyshil0599
|
[{"Id": 129863873, "ScriptId": 38625261, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10864913, "CreationDate": "05/17/2023 03:50:17", "VersionNumber": 1.0, "Title": "Glassdoor Company Insights:Scraped Data Collection", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 187.0, "LinesInsertedFromPrevious": 187.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186261612, "KernelVersionId": 129863873, "SourceDatasetVersionId": 5664904}]
|
[{"Id": 5664904, "DatasetId": 3256232, "DatasourceVersionId": 5740372, "CreatorUserId": 13861238, "LicenseName": "CC0: Public Domain", "CreationDate": "05/11/2023 18:03:49", "VersionNumber": 1.0, "Title": "USA Company Insights: Glassdoor Scraped Data 2023", "Slug": "glassdoor-company-insightsscraped-data-collection", "Subtitle": "Unveiling Valuable Insights: Scraped Data from Glassdoor on USA's Top Companies", "Description": "The \"**Glassdoor Company Insights: Scraped Data Collection**\" dataset is a comprehensive compilation of information gathered from **Glassdoor**, a leading platform for employee reviews and company ratings. This dataset includes a wide range of valuable data points, including company reviews, employee salaries, ratings, and more. With this dataset, researchers, analysts, and businesses can delve into the wealth of insights to gain a deeper understanding of company culture, employee satisfaction, and industry trends. Whether studying market competitiveness, benchmarking salaries, or conducting sentiment analysis, this dataset offers a valuable resource for exploring the experiences and perceptions of employees across various companies.", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3256232, "CreatorUserId": 13861238, "OwnerUserId": 13861238.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5664904.0, "CurrentDatasourceVersionId": 5740372.0, "ForumId": 3321716, "Type": 2, "CreationDate": "05/11/2023 18:03:49", "LastActivityDate": "05/11/2023", "TotalViews": 2698, "TotalDownloads": 397, "TotalVotes": 27, "TotalKernels": 4}]
|
[{"Id": 13861238, "UserName": "joyshil0599", "DisplayName": "Joy Shil", "RegisterDate": "02/24/2023", "PerformanceTier": 2}]
|
# The "Glassdoor Company Insights: Scraped Data Collection" dataset is a comprehensive compilation of information gathered from Glassdoor, a leading platform for employee reviews and company ratings. This dataset includes a wide range of valuable data points, including company reviews, employee salaries, ratings, and more. With this dataset, researchers, analysts, and businesses can delve into the wealth of insights to gain a deeper understanding of company culture, employee satisfaction, and industry trends. Whether studying market competitiveness, benchmarking salaries, or conducting sentiment analysis, this dataset offers a valuable resource for exploring the experiences and perceptions of employees across various companies.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
# Import Libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import warnings
warnings.filterwarnings("ignore")
pd.set_option("display.max_columns", 500)
pd.set_option("display.max_rows", 500)
# Extracting Data
#
# importing required modules
# from zipfile import ZipFile
# specifying the zip file name
# file_name = "glassdoor_comany.csv.zip"
# opening the zip file in READ mode
# with ZipFile(file_name, 'r') as zip:
# printing all the contents of the zip file
# zip.printdir()
# extracting all the files
# print('Extracting all the files now...')
# zip.extractall()
# print('Done!')
# Read CSV File
df = pd.read_csv("glassdoor_comany.csv", encoding="cp1252")
# DataFrame Head
df.head()
# Checking Unique
for i in df.columns:
print(i, "---->", df[i].unique())
# Checking Null Values
for i in df.columns:
print(i, "---->", df[i].isnull().sum())
# Fill NA with "Unknown"
df.fillna("Unknown", inplace=True)
# Drop Dublicates
df.drop_duplicates()
df["Industry Type"]
# Shape
df.shape
df.columns
df.info()
# Exploratory data analysis (EDA)
x = ["Company reviews", "Company salaries"]
for i in x:
df[i] = df[i].apply(lambda x: x.strip("K").replace(",", "")).astype(float) * 1000
plt.figure(figsize=(20, 5))
sns.histplot(data=df)
plt.show()
df.rename(columns={"Industry Type": "Industry_Type"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Industry_Type"], order=df.Industry_Type.value_counts().index
)
plt.xticks(rotation=90)
plt.show()
df.rename(columns={"Number of Employees": "Number_of_Employees"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df,
x=df["Number_of_Employees"],
order=df.Number_of_Employees.value_counts().index,
)
plt.xticks(rotation=90)
plt.show()
df.rename(columns={"Company Jobs": "Company_Jobs"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Company_Jobs"], order=df.Company_Jobs.value_counts().iloc[:20].index
)
plt.xticks(rotation=90)
plt.show()
df["Company salaries"].value_counts()
df.rename(columns={"Company reviews": "Company_reviews"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df,
x=df["Company_reviews"],
order=df.Company_reviews.value_counts().iloc[:20].index,
)
plt.xticks(rotation=90)
plt.show()
# df.drop(columns='Company salaries',inplace=True)
df.head()
df.rename(columns={"Company Name": "Company_Name"}, inplace=True)
plt.figure(figsize=(20, 10))
sns.countplot(
data=df, x=df["Company_Name"], order=df.Company_Name.value_counts().iloc[:30].index
)
plt.xticks(rotation=70)
plt.show()
Company__reviews = df.sort_values("Company_reviews", ascending=False).head(10)
Company__reviews
plt.figure(figsize=(10, 5))
plt.bar(Company__reviews["Company_Name"], Company__reviews["Company_reviews"])
plt.title("Bottom Top 10 Company")
plt.xticks(rotation=60)
plt.show()
df.rename(columns={"Company salaries": "Company_salaries"}, inplace=True)
Company__salaries = df.sort_values("Company_salaries", ascending=False).head(10)
Company__salaries
plt.figure(figsize=(10, 10))
sns.pairplot(data=df)
plt.show()
plt.figure(figsize=(10, 5))
plt.bar(Company__salaries["Company_Name"], Company__salaries["Company_salaries"])
plt.title("Top 10 company vote against company salaries")
plt.xticks(rotation=60)
plt.show()
# Model Building
from sklearn.preprocessing import LabelEncoder
Model__ = df
labelencoder = LabelEncoder()
output1 = labelencoder.fit_transform(Model__["Number_of_Employees"])
output2 = labelencoder.fit_transform(Model__["Industry_Type"])
Model__.drop("Number_of_Employees", axis=1, inplace=True)
Model__.drop("Industry_Type", axis=1, inplace=True)
Model__
Model__["Number_of_Employees"] = output1
Model__["Industry_Type"] = output2
Model__.head()
plt.figure(figsize=(10, 5))
sns.heatmap(df.corr())
plt.title("Matrix")
plt.xticks(rotation=20, fontsize=10)
plt.yticks(rotation=20, fontsize=10)
plt.show()
##END
| false | 0 | 1,688 | 0 | 1,878 | 1,688 |
||
129543473
|
# Importando todas as bibliotecas que serão utilizadas ao longo do código
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_log_error
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Descompactando Arquivos
# Verificando quantas colunas a base de dados possui para cada tipo de entrada.
train = pd.read_csv("./train.csv", parse_dates=["timestamp"])
df_dtype = train.dtypes.reset_index()
df_dtype.columns = ["qtd_atributos", "tipo"]
df_dtype.groupby("tipo").aggregate("count").reset_index()
# codificar variáveis categóricas em números ordinais.
encoder = OrdinalEncoder()
# armazenando os dados "object" em uma lista.
cat = [a for a in train.columns if train[a].dtype == "object"]
# Transformando as variáveis da lista cat em números ordinais e atribuindo ao 'train'.
train[cat] = encoder.fit_transform(train[cat])
# Verificando o número de colunas por tipo novamente.
df_dtype = train.dtypes.reset_index()
df_dtype.columns = ["qtd_atributos", "tipo"]
df_dtype.groupby("tipo").aggregate("count").reset_index()
# Verificando as 15 colunas com mais valores NA do dataframe em %.
NA_values = train.isnull().sum() * 100 / len(train)
df_NA = pd.DataFrame({"col": train.columns, "missing_values": NA_values})
df_NA = df_NA[df_NA.missing_values != 0]
df_NA.sort_values("missing_values", inplace=True, ascending=False)
df_NA.head(15)
# Removendo todas as colunas com valores NA acima de 40%
NA_values = df_NA[df_NA.missing_values > 40]
column = NA_values.col.to_list()
train.drop(column, inplace=True, axis=1)
train.shape
# removendo coluna tipo datetime
train.drop("timestamp", inplace=True, axis=1)
imputer = SimpleImputer(strategy="most_frequent")
Ptrain = pd.DataFrame(imputer.fit_transform(train))
Ptrain.columns = train.columns
Ptrain.shape
# Utilizando o método XGBoost para identificar as 20 colunas mais relevantes para o preço do imóvel.
X = Ptrain.drop("price_doc", axis=1)
y = Ptrain["price_doc"]
model = XGBRegressor(n_estimators=200, max_depth=13, random_state=23, eta=0.01)
model.fit(X, y)
important_columns = pd.DataFrame(
{"col": X.columns, "importance": model.feature_importances_}
)
important_columns.sort_values("importance", inplace=True, ascending=False)
important_columns.head(20)
# Salvando todos os valores com importância maior de 0.015% em uma variável.
cols = important_columns[important_columns.importance > 0.015]
# Transformando os valores para a lista.
train = Ptrain[cols.col.to_list()]
# Adicionando a coluna "price_doc".
train["price_doc"] = Ptrain.price_doc.values
train.head()
# Removendo colunas
remove = [
"ecology",
"ID_big_road2",
"cafe_count_3000_price_2500",
"cafe_count_5000_price_2500",
"cafe_count_2000",
"office_sqm_5000",
"cafe_count_1500_price_high",
]
train = Ptrain.drop(remove, axis=1)
train.abs().shape
# Selecionando as colunas que serão utilizadas para treino e teste.
cols = [
"id",
"full_sq",
"culture_objects_top_25",
"female_f",
"build_count_monolith",
"cafe_count_3000",
"sport_count_3000",
"price_doc",
]
df_train = pd.read_csv("./train.csv", usecols=cols)
cols.remove("price_doc")
df_test = pd.read_csv("./test.csv", usecols=cols)
# Verificando as informações referentes aos dados selecionados.
df_test.info()
# Verificando quantas vezes cada valor se repete, da coluna 'build_count_monolith'
df_train["build_count_monolith"].value_counts()
# Substituindo os valores NA por 1, visto que os valores que mais se repetem são '0' e '2'.
df_train["build_count_monolith"].replace(np.NaN, 1.0, inplace=True)
df_test["build_count_monolith"].replace(np.NaN, 1.0, inplace=True)
# Verificando valores unicos, da coluna 'culture_objects_top_25'.
df_train["culture_objects_top_25"].unique()
# Transformando os valores da coluna 'culture_objects_top_25' das bases "train" e "test"
# em uma outra coluna 'culture_objects_top', alterando os valores 'yes' ou 'no' para '1' e '0'.
df_train["culture_objects_top"] = np.where(
df_train["culture_objects_top_25"] == "yes", 1, 0
)
df_train.drop("culture_objects_top_25", axis=1, inplace=True)
df_test["culture_objects_top"] = np.where(
df_test["culture_objects_top_25"] == "yes", 1, 0
)
df_test.drop("culture_objects_top_25", axis=1, inplace=True)
# Preparando as variáveis para treino.
X = df_train.drop(["id", "price_doc"], axis=1)
y = np.log(df_train.price_doc)
# Configurando as características para treino.
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=23
)
# Utilizando método de Regressão Lasso.
lasso = Lasso()
lasso.fit(x_train, y_train)
y_pred = lasso.predict(x_test)
# Calculo de predição em RMSLE.
print("RMSLE: ", mean_squared_log_error(y_test, y_pred, squared=False))
# Realizando a IA para a base de dados Test.
X = df_train.drop(["id", "price_doc"], axis=1)
y = np.log(df_train.price_doc)
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=23
)
xgb = XGBRegressor(n_estimators=286, max_depth=13, random_state=23, eta=0.03)
xgb.fit(x_train, y_train)
y_pred = xgb.predict(x_test)
print("RMSLE: ", mean_squared_log_error(y_test, y_pred, squared=False))
# Configurando os dados para submissão.
test_ids = df_test.id.values
test_data = df_test.drop("id", axis=1)
predictions = np.exp(xgb.predict(test_data))
sub_preview = pd.DataFrame({"id": test_ids, "price_doc": predictions})
sub_preview.head()
# Salvando os resultados no arquivo para submissão.
submission = pd.read_csv("sample_submission.csv")
submission["price_doc"] = predictions
submission.to_csv("submission.csv", index=False)
submission.head()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/543/129543473.ipynb
| null | null |
[{"Id": 129543473, "ScriptId": 38519789, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14612440, "CreationDate": "05/14/2023 17:20:37", "VersionNumber": 1.0, "Title": "AC2 - Regress\u00e3o", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 150.0, "LinesInsertedFromPrevious": 150.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# Importando todas as bibliotecas que serão utilizadas ao longo do código
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.preprocessing import OrdinalEncoder
from sklearn.impute import SimpleImputer
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.linear_model import Lasso
from sklearn.metrics import mean_squared_log_error
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# Descompactando Arquivos
# Verificando quantas colunas a base de dados possui para cada tipo de entrada.
train = pd.read_csv("./train.csv", parse_dates=["timestamp"])
df_dtype = train.dtypes.reset_index()
df_dtype.columns = ["qtd_atributos", "tipo"]
df_dtype.groupby("tipo").aggregate("count").reset_index()
# codificar variáveis categóricas em números ordinais.
encoder = OrdinalEncoder()
# armazenando os dados "object" em uma lista.
cat = [a for a in train.columns if train[a].dtype == "object"]
# Transformando as variáveis da lista cat em números ordinais e atribuindo ao 'train'.
train[cat] = encoder.fit_transform(train[cat])
# Verificando o número de colunas por tipo novamente.
df_dtype = train.dtypes.reset_index()
df_dtype.columns = ["qtd_atributos", "tipo"]
df_dtype.groupby("tipo").aggregate("count").reset_index()
# Verificando as 15 colunas com mais valores NA do dataframe em %.
NA_values = train.isnull().sum() * 100 / len(train)
df_NA = pd.DataFrame({"col": train.columns, "missing_values": NA_values})
df_NA = df_NA[df_NA.missing_values != 0]
df_NA.sort_values("missing_values", inplace=True, ascending=False)
df_NA.head(15)
# Removendo todas as colunas com valores NA acima de 40%
NA_values = df_NA[df_NA.missing_values > 40]
column = NA_values.col.to_list()
train.drop(column, inplace=True, axis=1)
train.shape
# removendo coluna tipo datetime
train.drop("timestamp", inplace=True, axis=1)
imputer = SimpleImputer(strategy="most_frequent")
Ptrain = pd.DataFrame(imputer.fit_transform(train))
Ptrain.columns = train.columns
Ptrain.shape
# Utilizando o método XGBoost para identificar as 20 colunas mais relevantes para o preço do imóvel.
X = Ptrain.drop("price_doc", axis=1)
y = Ptrain["price_doc"]
model = XGBRegressor(n_estimators=200, max_depth=13, random_state=23, eta=0.01)
model.fit(X, y)
important_columns = pd.DataFrame(
{"col": X.columns, "importance": model.feature_importances_}
)
important_columns.sort_values("importance", inplace=True, ascending=False)
important_columns.head(20)
# Salvando todos os valores com importância maior de 0.015% em uma variável.
cols = important_columns[important_columns.importance > 0.015]
# Transformando os valores para a lista.
train = Ptrain[cols.col.to_list()]
# Adicionando a coluna "price_doc".
train["price_doc"] = Ptrain.price_doc.values
train.head()
# Removendo colunas
remove = [
"ecology",
"ID_big_road2",
"cafe_count_3000_price_2500",
"cafe_count_5000_price_2500",
"cafe_count_2000",
"office_sqm_5000",
"cafe_count_1500_price_high",
]
train = Ptrain.drop(remove, axis=1)
train.abs().shape
# Selecionando as colunas que serão utilizadas para treino e teste.
cols = [
"id",
"full_sq",
"culture_objects_top_25",
"female_f",
"build_count_monolith",
"cafe_count_3000",
"sport_count_3000",
"price_doc",
]
df_train = pd.read_csv("./train.csv", usecols=cols)
cols.remove("price_doc")
df_test = pd.read_csv("./test.csv", usecols=cols)
# Verificando as informações referentes aos dados selecionados.
df_test.info()
# Verificando quantas vezes cada valor se repete, da coluna 'build_count_monolith'
df_train["build_count_monolith"].value_counts()
# Substituindo os valores NA por 1, visto que os valores que mais se repetem são '0' e '2'.
df_train["build_count_monolith"].replace(np.NaN, 1.0, inplace=True)
df_test["build_count_monolith"].replace(np.NaN, 1.0, inplace=True)
# Verificando valores unicos, da coluna 'culture_objects_top_25'.
df_train["culture_objects_top_25"].unique()
# Transformando os valores da coluna 'culture_objects_top_25' das bases "train" e "test"
# em uma outra coluna 'culture_objects_top', alterando os valores 'yes' ou 'no' para '1' e '0'.
df_train["culture_objects_top"] = np.where(
df_train["culture_objects_top_25"] == "yes", 1, 0
)
df_train.drop("culture_objects_top_25", axis=1, inplace=True)
df_test["culture_objects_top"] = np.where(
df_test["culture_objects_top_25"] == "yes", 1, 0
)
df_test.drop("culture_objects_top_25", axis=1, inplace=True)
# Preparando as variáveis para treino.
X = df_train.drop(["id", "price_doc"], axis=1)
y = np.log(df_train.price_doc)
# Configurando as características para treino.
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=23
)
# Utilizando método de Regressão Lasso.
lasso = Lasso()
lasso.fit(x_train, y_train)
y_pred = lasso.predict(x_test)
# Calculo de predição em RMSLE.
print("RMSLE: ", mean_squared_log_error(y_test, y_pred, squared=False))
# Realizando a IA para a base de dados Test.
X = df_train.drop(["id", "price_doc"], axis=1)
y = np.log(df_train.price_doc)
x_train, x_test, y_train, y_test = train_test_split(
X, y, test_size=0.3, random_state=23
)
xgb = XGBRegressor(n_estimators=286, max_depth=13, random_state=23, eta=0.03)
xgb.fit(x_train, y_train)
y_pred = xgb.predict(x_test)
print("RMSLE: ", mean_squared_log_error(y_test, y_pred, squared=False))
# Configurando os dados para submissão.
test_ids = df_test.id.values
test_data = df_test.drop("id", axis=1)
predictions = np.exp(xgb.predict(test_data))
sub_preview = pd.DataFrame({"id": test_ids, "price_doc": predictions})
sub_preview.head()
# Salvando os resultados no arquivo para submissão.
submission = pd.read_csv("sample_submission.csv")
submission["price_doc"] = predictions
submission.to_csv("submission.csv", index=False)
submission.head()
| false | 0 | 2,115 | 0 | 2,115 | 2,115 |
||
129543098
|
# ## Импортируем библиотеки
import pandas as pd
import json
import numpy as np
import docx
import requests
from bs4 import BeautifulSoup as bs
import glob
import codecs
import pyLDAvis.sklearn
import pyLDAvis
import pyLDAvis.lda_model
from tqdm import tqdm
# pip install python-docx
# # 1.1 Парсинг данных
# ## 1.1.1 Парсинг компаний из docx файла
name_company = []
doc = docx.Document("Condidates.docx")
all_paragraphs = doc.paragraphs
for paragraph in all_paragraphs:
if paragraph.text != "":
name_company.append(paragraph.text.strip().lower())
name_company
# ## 1.1.2 Парсинг данных с json файла
company_list = {
"name_company": [],
"description": [],
"company_field": [],
"rating": [],
"date": [],
"text": [],
}
# Создан словарь для дальнейшего заполнения его набором данных
# Получение всех файлов json
list_json = glob.glob("Data/*.json")
list_json
# Метод считывания файла json
def read_in_utf8(filename):
with codecs.open(filename, "r", encoding="utf_8_sig") as f: # Открытие json файла
return json.load(f)
# Получаем данные из json файла
Companys = []
for file in list_json:
load_file = read_in_utf8(file)
name = file[5:-5]
Companys.append(name.strip())
for j in load_file["refs"]:
if load_file["info"] != None:
company_list["description"].append(load_file["info"]["about"])
company_list["rating"].append(load_file["info"]["rate"])
sss = []
for c in load_file["info"]["industries"]:
sss.append(c + " ")
company_list["company_field"].append(sss)
else:
company_list["description"].append(None)
company_list["rating"].append(None)
company_list["company_field"].append(None)
if j != None:
company_list["text"].append(j[0])
company_list["date"].append(j[1]["day"] + " " + j[1]["month"])
company_list["name_company"].append(name)
Companys
# Вывод 1.1.2: Получен набор данных из json файлов. Во многих записях отсутствует некоторая информация о компании, а именно рейтинг компании, отрасль и описание.
# ## 1.1.3 Парсинг информации о компании с Habr'а
# Метод дли приведения даты в более удобный формат, для дальнейшей работы с ней
def FormatDate(date):
if date == "nan":
return "NaN"
try:
datee = date.split()
except:
return "NaN"
if datee[1] == "сен":
datee[1] = "сентября"
if datee[1] == "окт":
datee[1] = "октября"
if datee[1] == "ноя":
datee[1] = "ноября"
if datee[1] == "дек":
datee[1] = "декабря"
if datee[1] == "янв":
datee[1] = "января"
if datee[1] == "фев":
datee[1] = "февраля"
if datee[1] == "апр":
datee[1] = "апреля"
if datee[1] == "май":
datee[1] = "мая"
if datee[1] == "июн":
datee[1] = "июня"
if datee[1] == "июл":
datee[1] = "июля"
if datee[1] == "авг":
datee[1] = "августа"
if datee[1] == "мар":
datee[1] = "марта"
if len(datee) == 3:
new_date = datee[0] + " " + datee[1] + " " + datee[2]
else:
new_date = datee[0] + " " + datee[1]
return new_date
# Импортируем библиотеки для работы с запросами на сайт
import requests
from bs4 import BeautifulSoup as bs
def GetInfoFinal(text):
names = text
if " " in text:
text = text.replace(" ", "%20")
link = (
"https://habr.com/ru/search/?q="
+ text
+ "&target_type=companies&order=relevance"
)
req = requests.get(link)
soup = bs(req.text, "html.parser")
company_names = soup.find_all("a", class_="tm-company-snippet__title")
if len(company_names) == 0:
return
aboutt = ""
ratingg = ""
Jojo = ""
if len(company_names) != 0:
if company_names[0].text.lower() == names.lower() and (len(company_names) != 0):
linkk = "https://habr.com" + company_names[0].get("href")
reqs = requests.get(linkk)
soupp = bs(reqs.text, "html.parser")
ratingg = soupp.find(
"span",
class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating",
)
aboutt = soupp.find("span", class_="tm-company-profile__content")
indastries = soupp.find_all(
"span", class_="tm-company-profile__categories-wrapper"
)
aboutt = aboutt.text.strip()
ratingg = ratingg.text.strip()
Jojo = ""
indastry = []
for ind in indastries:
indastry.append(ind.text.strip())
Jojo = ", ".join(indastry)
linkkk = (
"https://habr.com/ru/search/?q=" + names + "&target_type=posts&order=relevance"
)
reqqq = requests.get(linkkk)
souppp = bs(reqqq.text, "html.parser")
pages = souppp.find_all("a", class_="tm-pagination__page")
print(pages)
try:
max_page = pages[len(pages) - 1].text.strip()
except:
max_page = 1
max_page = int(max_page)
silki = []
for i in range(0, max_page + 1):
linkkkk = (
"https://habr.com/ru/search/page"
+ str(i)
+ "/?q="
+ text
+ "&target_type=posts&order=relevance"
)
reqqqq = requests.get(linkkkk)
soupppp = bs(reqqqq.text, "html.parser")
linkss = soupppp.find_all("a", class_="tm-title__link")
for j in linkss:
silki.append(j.get("href"))
for k in silki:
link_new = "https://habr.com" + k
req_new = requests.get(link_new)
soup_new = bs(req_new.text, "html.parser")
datee = soup_new.find("span", class_="tm-article-datetime-published")
textt = soup_new.find("div", class_="tm-article-body")
company_list["name_company"].append(names)
if len(aboutt) != 0:
company_list["description"].append(aboutt)
else:
company_list["description"].append(None)
if len(ratingg) != 0:
company_list["rating"].append(ratingg)
else:
company_list["rating"].append(None)
if len(Jojo) != 0:
company_list["company_field"].append(Jojo)
else:
company_list["company_field"].append(None)
if datee is not None:
company_list["date"].append(FormatDate(datee.text))
else:
company_list["date"].append(None)
if textt is not None:
company_list["text"].append(textt.text)
else:
company_list["text"].append(None)
# Используем особенности множества для того чтобы найти НЕ одинавковые записи
need_company = set.difference(set(name_company), set(Companys))
need_company = list(need_company)
print(Companys)
print("------------------")
print(name_company)
print("------------------")
print(need_company)
print(len(need_company))
# Мы замечаем, что есть одно название, которое отличается всего лишь 1 словом, удаляем его.
need_company.pop(1)
print(need_company)
print(len(need_company))
for i in tqdm(need_company):
GetInfoFinal(i)
df = pd.DataFrame.from_dict(data=company_list, orient="index")
df = df.transpose()
df.head(10)
# Сохраняем промежуточные данные в csv
FILE_NAME = "HabrPars.csv"
df.to_csv(FILE_NAME)
len(company_list["name_company"])
# Проверяем набор данных
df.head(10)
df = pd.read_csv("HabrPars.csv")
# Вывод 1.1.3: был получен набор данных из сайта habr и из json файла, данные были записаны в один датафрейм, поэтому объединять их не нужно.
# В результате был получен набор данных состоящий из 3908 записей, также в данном наборе содержатся записи с пустым значением рейтинга компании и отрасли компании, этих данных не было в json файлах.
# # 1.2 Формирование структуры набора данных
# Выделение значимых атрибутов не предусматривает их полное уничтожение, а только отбор. Важные признаки можно определить путём нахожднения наиболее коррелируемых друг с другом.
# Обоснование удаления атрибутов: в наборе данных есть признаки, которые не несут никакой важной информации - 'description'. Эти данные никак не будут использоваться, для определения победителей.
# Удаление ненужных данных
ddf = df
ddf.drop(["description", "Unnamed: 0"], axis=1, inplace=True)
ddf = ddf[ddf["text"].notnull()]
ddf.head(10)
ddf.isnull().sum()
Dates = df["date"]
New_Dates = []
for i in Dates:
New_Dates.append(FormatDate(i))
df["date"] = New_Dates
# Мы оставляем следующие атрибуты:
# __name_company__ - название компании, этот атрибут важен для нас, чтобы мы понимали к какой компании относятся данные;
# __company_field__ - отрасль компании, этот атрибут важен, так как мы смотрим в какой отрасли работает компания и куда делает вклад;
# __rating__ - Рейтинг компании требуется для графика зависимости
# __date__ - дата публикации статьи
# __text__ - текст статьи требуется для обучения и прогнозирования номинации
# __Обоснование не добавления дополнительных полей.__ Было решено, что дополнительные поля не обязательны, так как получить необходимую информацию можно, через анализ текста статьи.
# # 1.3 Предварительная обработка данных
# Импортируем необходимые библиотеки
import string
import re
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
string.punctuation
ddf["rating"] = ddf["rating"].fillna(ddf["rating"].mean())
ddf["company_field"] = ddf["company_field"].fillna(ddf["name_company"])
# Заменил пустые рейтинги средними значениями, а также пустые сферы деятельности компаний заполнил названиями компаний.
ddf.isnull().sum()
# В итоге получил набор данных без пропусков данных и пустых значений
# Создаем методы, удаляющие из текста не нужные символы (знаки препинания, цифры, латинские буквы, пробелы и все что не является буквой)
# Медот для удаления всего того что не является буквой
def remove_notalpha(text):
return "".join([i if i.isalpha() else " " for i in text])
# Медот для удаления латинских букв
def remove_latin(text):
return re.sub("[a-z]", "", text, flags=re.I)
# Метод для удаления лишних пробелов
def remove_space(text):
return re.sub(r"\s+", " ", text.strip(), flags=re.I)
# Получаем список стоп слов
import nltk
from nltk.corpus import stopwords
nltk.download("stopwords")
# Пополняем список стоп слов:
stopword = nltk.corpus.stopwords.words("russian")
stopword.extend(
[
"ооо",
"ано",
"ао",
"пао",
"быть",
"а",
"мы",
"с",
"для",
"ещё",
"его",
"также",
"к",
"тем",
"кто",
"чтобы",
"но",
"они",
"будут",
"так",
"где",
"один",
"он ",
"и",
"на",
"но",
"или",
"либо",
"это",
"мб",
"далее",
"дв",
"свой",
"ваш",
"всё",
"очень",
"её",
"ещё",
"вообще",
"наш",
"который",
]
)
# Метод для удаления стоп слов
def remove_stopwords(text):
return [word for word in text if word not in stopword]
from nltk.tokenize import sent_tokenize, word_tokenize
# Создаем метод для токенезации слов
def tokenize(text):
t = word_tokenize(text)
return [token for token in t if token not in stopword]
nltk.download("punkt")
# #### Лемматизация
# Присваиваем словам начальную форму
def lemmatize(text):
res = list()
for word in text:
p = morph.parse(word)[0]
res.append(p.normal_form)
return res
# Для начала приводим текст в нижний регистри, удаляем символы которые не являются буквами, удаляем латинские буквы и лишние пробелы, далее происходит токенизация и удаление стоп слов
prep_text1 = [
remove_stopwords(
tokenize(remove_space(remove_latin(remove_notalpha(text.lower()))))
)
for text in ddf["text"]
]
# prep_text1[1]
# Удалив все лишнее проводится лемматизация текста
prep_text2 = [lemmatize(text) for text in tqdm(prep_text1)]
for i in range(len(prep_text2)):
prep_text2[i] = " ".join(prep_text2[i])
ddf["lemmatize_text"] = prep_text2
Leming_text = prep_text2
# Сохраняем набор данных в csv
FILE_NAME = "HabrLem.csv"
ddf.to_csv(FILE_NAME)
ddf = pd.read_csv("HabrLem.csv")
ddf.head(10)
# Вывод 1.3: была проведена предварительная обработка текста, слова были приведены в начальную форму. Конечные данные были включены в набор данных
# # 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов
# ## 1.4.1 Выбор алгоритмов
# __Мешок слов__ - решает проблему размерности по одной оси. Количество строк определяется количеством документов. Однако, этот метод не учитывает важность того или иного токена, ведь одно слово может повторятся по несколько раз.
# __TF-IDF__ - это способ векторизации текста, отражающий важность слова в документе, а не только частоту его появления.
# __Word Embeddings__ - векторное представление слов. Векторы можно складывать, вычитать, сравнивать.
# ## 1.4.2 Векторизация мешок слов
# В пакете scikit-learn есть модуль CountVectorizer, который преобразовывает входной текст в матрицу, значениями которой являются количества вхождения данного ключа(слова) в текст. Таким образом, мы получим матрицу, размерность которой будет равна количеству всех слов, умноженных на количество документов. И элементами матрицы будут числа, которые означают, сколько раз всего слово встретилось в тексте.
from sklearn.feature_extraction.text import CountVectorizer
text = ddf["text"]
import copy
df2 = copy.deepcopy(ddf)
Lem_Text = df2["lemmatize_text"].to_numpy()
# print(Lem_Text)
vectorizer = CountVectorizer(
analyzer="word", stop_words=stopword, ngram_range=(1, 3), min_df=2
)
count_matrix = vectorizer.fit_transform(Lem_Text)
vectorizer.get_feature_names_out()[:50]
# ## 1.4.3 Векторизация TF-IDF
# В тексте большого объема некоторые слова могут присутствовать очень часто, но при этом не нести никакой значимой информации о фактическом содержании текста (документа). Если такие данные передавать непосредственно классификатору, то такие частые термины могут затенять частоты более редких, но при этом более интересных терминов. Для того, чтобы этого избежать, достаточно разделить количество употреблений каждого слова в документе на общее количество слов в документе, это есть TF — частота термина. Термин IDF (inverse document frequency) обозначает обратную частоту термина (инверсия частоты) с которой некоторое слово встречается в документах. IDF позволяет измерить непосредственную важность термина.
from sklearn.feature_extraction.text import TfidfVectorizer
ddf.info(memory_usage="lemmatize_text")
Vector = []
tfidfv = TfidfVectorizer(
max_df=0.8, min_df=0.01, stop_words=stopword, ngram_range=(1, 3)
)
tfidf_ngram_features = tfidfv.fit_transform(Lem_Text)
tfidf_ngram_features
tfidf_words = tfidfv.inverse_transform(tfidf_ngram_features)
# print(tfidf_words[:50])
ddf["Vector"] = tfidf_words
# Добавление ключевых слов
tfidfv = TfidfVectorizer(
max_df=0.8, min_df=0.01, stop_words=stopword, ngram_range=(1, 1)
)
tfidf_ngram_features = tfidfv.fit_transform(Lem_Text)
tfidf_words = tfidfv.inverse_transform(tfidf_ngram_features)
mass = tfidf_ngram_features.toarray()
len(tfidf_words)
len(mass)
# Добавление биграм
for i in range(len(ddf["text"])):
if len(ddf["text"][i]) < 45:
ddf = ddf.drop(index=i)
ddf = ddf.drop(index=3485)
ddf = ddf.drop(columns="keyword")
ddf.reset_index(drop=True, inplace=True)
for i in range(len(ddf)):
tfidf_vectorizer = TfidfVectorizer(stop_words=stopword, ngram_range=(1, 1))
tfidf_matrix = tfidf_vectorizer.fit_transform(
[ddf.loc[i, "lemmatize_text"]]
).toarray()
ddf.loc[i, "keyword"] = str(
dict(zip(tfidf_vectorizer.get_feature_names_out(), tfidf_matrix[0]))
)
ddf.head(3892)
tfidf_ngram_features[0:100].todense()
# Размер нулевой строки
len(tfidf_ngram_features[0].todense()[0].getA1())
# Непустые значения нулевой строки
# [i for i in tfidf_ngram_features[0].todense()[0].getA1() if i>0]
# ## 1.4.4 Векторизация Word Embeddings
# Еще один популярный способ векторизации word2vec – это группа связанных моделей, которые используются для создания так называемых встраиваний слов. Эти модели представляют собой неглубокие двухслойные нейронные сети, которые обучены восстанавливать лингвистические контексты слов. После обучения модели word2vec можно использовать для сопоставления каждого слова с вектором, обычно состоящим из нескольких сотен элементов, которые представляют отношение этого слова к другим словам, так и получается векторизация.
import nltk
from gensim.models import Word2Vec
Bigger_list = []
for i in Lem_Text:
li = list(i.split(" "))
Bigger_list.append(li)
Model = Word2Vec(Bigger_list, min_count=1, vector_size=300, workers=4)
# Model.wv.index_to_key
# Вывод: Было решено выбрать векторизацию полученную способом TfidfVectorizer.
# Причиной этому послужило то, что при просмотре полученных слов можно было увидеть, что данный метод определяет важность слов, в отличии от других использованных методов. Что в свою очередь помогает уловить основной смысл статьи в данном случае.
# Другие причины выбора были связаны с эффективности в обработке текстовых данных.
# # 1.5 Разведочный анализ
# ## 1.5.1 Дополнение набора данных целевой переменной
# Создадим словарь для хранения данных из json
NominateCompany = {"Name": [], "Nominates": []}
# Открываем Target.json и получаем данные оттуда
with open("Target.json", "r", encoding="utf-8") as f:
text = json.load(f)
for i in text["list"]:
NominateCompany["Name"].append(i["Сompany"].strip().lower())
NominateCompany["Nominates"].append(i["Nominations"])
NominateCompany
# Вывод мы получили данные из json файла с номинантами.
# Создадим массив номинаций
Nomi = []
check = False
for i in ddf["name_company"]:
check = False
for j in range(len(NominateCompany["Name"])):
if i.strip().lower() == NominateCompany["Name"][j].strip().lower():
Nomi.append(NominateCompany["Nominates"][j])
check = True
break
print(check)
if check == False:
Nomi.append("NaN")
# true
# print(Nomi)
ddf["Target"] = Nomi
ddf.head(5)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(ddf["Target"])
ddf["Target2"] = le.transform(ddf["Target"])
ddf.head(600)
FILE_NAME = "result_data.csv"
ddf.to_csv(FILE_NAME)
import seaborn as sns
sns.distplot(ddf["Target2"])
import pylab
import scipy.stats as stats
stats.probplot(ddf["Target2"], dist="norm", plot=pylab)
pylab.show()
sns.boxplot(x=ddf["Target2"])
import matplotlib.pyplot as plt
ddf["rating"].unique()
plt.figure(figsize=(15, 5))
sns.boxplot(x=ddf["Target"], y=ddf["rating"], palette="hls")
plt.title("Таргет Vs Рейтинг", fontsize=15)
plt.xlabel("Рейтинг", fontsize=15)
plt.ylabel("Таргет", fontsize=15)
plt.show()
plt.figure(figsize=(20, 15))
sns.histplot(data=ddf, x="Target", y="name_company")
plt.show()
plt.figure(figsize=(20, 10))
sns.barplot(x=ddf["rating"], y=ddf["Target2"])
months = [] # список месяцев публикации статей
days = [] # список дней публикации сатьи
for i in range(len(ddf)):
if len(ddf["date"][i].split()) > 1:
months.append(ddf["date"][i].split()[1])
days.append(ddf["date"][i].split()[0])
else:
months.append("Month")
days.append("Day")
plt.figure(figsize=(20, 10))
sns.barplot(x=months, y=ddf["Target2"])
sns.set(rc={"figure.figsize": (20, 10)})
sns.countplot(x=months)
sns.set(rc={"figure.figsize": (20, 10)})
sns.countplot(x=days)
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/543/129543098.ipynb
| null | null |
[{"Id": 129543098, "ScriptId": 38519671, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 17:16:24", "VersionNumber": 1.0, "Title": "Report1-SHA-djostit", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 585.0, "LinesInsertedFromPrevious": 585.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# ## Импортируем библиотеки
import pandas as pd
import json
import numpy as np
import docx
import requests
from bs4 import BeautifulSoup as bs
import glob
import codecs
import pyLDAvis.sklearn
import pyLDAvis
import pyLDAvis.lda_model
from tqdm import tqdm
# pip install python-docx
# # 1.1 Парсинг данных
# ## 1.1.1 Парсинг компаний из docx файла
name_company = []
doc = docx.Document("Condidates.docx")
all_paragraphs = doc.paragraphs
for paragraph in all_paragraphs:
if paragraph.text != "":
name_company.append(paragraph.text.strip().lower())
name_company
# ## 1.1.2 Парсинг данных с json файла
company_list = {
"name_company": [],
"description": [],
"company_field": [],
"rating": [],
"date": [],
"text": [],
}
# Создан словарь для дальнейшего заполнения его набором данных
# Получение всех файлов json
list_json = glob.glob("Data/*.json")
list_json
# Метод считывания файла json
def read_in_utf8(filename):
with codecs.open(filename, "r", encoding="utf_8_sig") as f: # Открытие json файла
return json.load(f)
# Получаем данные из json файла
Companys = []
for file in list_json:
load_file = read_in_utf8(file)
name = file[5:-5]
Companys.append(name.strip())
for j in load_file["refs"]:
if load_file["info"] != None:
company_list["description"].append(load_file["info"]["about"])
company_list["rating"].append(load_file["info"]["rate"])
sss = []
for c in load_file["info"]["industries"]:
sss.append(c + " ")
company_list["company_field"].append(sss)
else:
company_list["description"].append(None)
company_list["rating"].append(None)
company_list["company_field"].append(None)
if j != None:
company_list["text"].append(j[0])
company_list["date"].append(j[1]["day"] + " " + j[1]["month"])
company_list["name_company"].append(name)
Companys
# Вывод 1.1.2: Получен набор данных из json файлов. Во многих записях отсутствует некоторая информация о компании, а именно рейтинг компании, отрасль и описание.
# ## 1.1.3 Парсинг информации о компании с Habr'а
# Метод дли приведения даты в более удобный формат, для дальнейшей работы с ней
def FormatDate(date):
if date == "nan":
return "NaN"
try:
datee = date.split()
except:
return "NaN"
if datee[1] == "сен":
datee[1] = "сентября"
if datee[1] == "окт":
datee[1] = "октября"
if datee[1] == "ноя":
datee[1] = "ноября"
if datee[1] == "дек":
datee[1] = "декабря"
if datee[1] == "янв":
datee[1] = "января"
if datee[1] == "фев":
datee[1] = "февраля"
if datee[1] == "апр":
datee[1] = "апреля"
if datee[1] == "май":
datee[1] = "мая"
if datee[1] == "июн":
datee[1] = "июня"
if datee[1] == "июл":
datee[1] = "июля"
if datee[1] == "авг":
datee[1] = "августа"
if datee[1] == "мар":
datee[1] = "марта"
if len(datee) == 3:
new_date = datee[0] + " " + datee[1] + " " + datee[2]
else:
new_date = datee[0] + " " + datee[1]
return new_date
# Импортируем библиотеки для работы с запросами на сайт
import requests
from bs4 import BeautifulSoup as bs
def GetInfoFinal(text):
names = text
if " " in text:
text = text.replace(" ", "%20")
link = (
"https://habr.com/ru/search/?q="
+ text
+ "&target_type=companies&order=relevance"
)
req = requests.get(link)
soup = bs(req.text, "html.parser")
company_names = soup.find_all("a", class_="tm-company-snippet__title")
if len(company_names) == 0:
return
aboutt = ""
ratingg = ""
Jojo = ""
if len(company_names) != 0:
if company_names[0].text.lower() == names.lower() and (len(company_names) != 0):
linkk = "https://habr.com" + company_names[0].get("href")
reqs = requests.get(linkk)
soupp = bs(reqs.text, "html.parser")
ratingg = soupp.find(
"span",
class_="tm-votes-lever__score-counter tm-votes-lever__score-counter tm-votes-lever__score-counter_rating",
)
aboutt = soupp.find("span", class_="tm-company-profile__content")
indastries = soupp.find_all(
"span", class_="tm-company-profile__categories-wrapper"
)
aboutt = aboutt.text.strip()
ratingg = ratingg.text.strip()
Jojo = ""
indastry = []
for ind in indastries:
indastry.append(ind.text.strip())
Jojo = ", ".join(indastry)
linkkk = (
"https://habr.com/ru/search/?q=" + names + "&target_type=posts&order=relevance"
)
reqqq = requests.get(linkkk)
souppp = bs(reqqq.text, "html.parser")
pages = souppp.find_all("a", class_="tm-pagination__page")
print(pages)
try:
max_page = pages[len(pages) - 1].text.strip()
except:
max_page = 1
max_page = int(max_page)
silki = []
for i in range(0, max_page + 1):
linkkkk = (
"https://habr.com/ru/search/page"
+ str(i)
+ "/?q="
+ text
+ "&target_type=posts&order=relevance"
)
reqqqq = requests.get(linkkkk)
soupppp = bs(reqqqq.text, "html.parser")
linkss = soupppp.find_all("a", class_="tm-title__link")
for j in linkss:
silki.append(j.get("href"))
for k in silki:
link_new = "https://habr.com" + k
req_new = requests.get(link_new)
soup_new = bs(req_new.text, "html.parser")
datee = soup_new.find("span", class_="tm-article-datetime-published")
textt = soup_new.find("div", class_="tm-article-body")
company_list["name_company"].append(names)
if len(aboutt) != 0:
company_list["description"].append(aboutt)
else:
company_list["description"].append(None)
if len(ratingg) != 0:
company_list["rating"].append(ratingg)
else:
company_list["rating"].append(None)
if len(Jojo) != 0:
company_list["company_field"].append(Jojo)
else:
company_list["company_field"].append(None)
if datee is not None:
company_list["date"].append(FormatDate(datee.text))
else:
company_list["date"].append(None)
if textt is not None:
company_list["text"].append(textt.text)
else:
company_list["text"].append(None)
# Используем особенности множества для того чтобы найти НЕ одинавковые записи
need_company = set.difference(set(name_company), set(Companys))
need_company = list(need_company)
print(Companys)
print("------------------")
print(name_company)
print("------------------")
print(need_company)
print(len(need_company))
# Мы замечаем, что есть одно название, которое отличается всего лишь 1 словом, удаляем его.
need_company.pop(1)
print(need_company)
print(len(need_company))
for i in tqdm(need_company):
GetInfoFinal(i)
df = pd.DataFrame.from_dict(data=company_list, orient="index")
df = df.transpose()
df.head(10)
# Сохраняем промежуточные данные в csv
FILE_NAME = "HabrPars.csv"
df.to_csv(FILE_NAME)
len(company_list["name_company"])
# Проверяем набор данных
df.head(10)
df = pd.read_csv("HabrPars.csv")
# Вывод 1.1.3: был получен набор данных из сайта habr и из json файла, данные были записаны в один датафрейм, поэтому объединять их не нужно.
# В результате был получен набор данных состоящий из 3908 записей, также в данном наборе содержатся записи с пустым значением рейтинга компании и отрасли компании, этих данных не было в json файлах.
# # 1.2 Формирование структуры набора данных
# Выделение значимых атрибутов не предусматривает их полное уничтожение, а только отбор. Важные признаки можно определить путём нахожднения наиболее коррелируемых друг с другом.
# Обоснование удаления атрибутов: в наборе данных есть признаки, которые не несут никакой важной информации - 'description'. Эти данные никак не будут использоваться, для определения победителей.
# Удаление ненужных данных
ddf = df
ddf.drop(["description", "Unnamed: 0"], axis=1, inplace=True)
ddf = ddf[ddf["text"].notnull()]
ddf.head(10)
ddf.isnull().sum()
Dates = df["date"]
New_Dates = []
for i in Dates:
New_Dates.append(FormatDate(i))
df["date"] = New_Dates
# Мы оставляем следующие атрибуты:
# __name_company__ - название компании, этот атрибут важен для нас, чтобы мы понимали к какой компании относятся данные;
# __company_field__ - отрасль компании, этот атрибут важен, так как мы смотрим в какой отрасли работает компания и куда делает вклад;
# __rating__ - Рейтинг компании требуется для графика зависимости
# __date__ - дата публикации статьи
# __text__ - текст статьи требуется для обучения и прогнозирования номинации
# __Обоснование не добавления дополнительных полей.__ Было решено, что дополнительные поля не обязательны, так как получить необходимую информацию можно, через анализ текста статьи.
# # 1.3 Предварительная обработка данных
# Импортируем необходимые библиотеки
import string
import re
import pymorphy2
morph = pymorphy2.MorphAnalyzer()
string.punctuation
ddf["rating"] = ddf["rating"].fillna(ddf["rating"].mean())
ddf["company_field"] = ddf["company_field"].fillna(ddf["name_company"])
# Заменил пустые рейтинги средними значениями, а также пустые сферы деятельности компаний заполнил названиями компаний.
ddf.isnull().sum()
# В итоге получил набор данных без пропусков данных и пустых значений
# Создаем методы, удаляющие из текста не нужные символы (знаки препинания, цифры, латинские буквы, пробелы и все что не является буквой)
# Медот для удаления всего того что не является буквой
def remove_notalpha(text):
return "".join([i if i.isalpha() else " " for i in text])
# Медот для удаления латинских букв
def remove_latin(text):
return re.sub("[a-z]", "", text, flags=re.I)
# Метод для удаления лишних пробелов
def remove_space(text):
return re.sub(r"\s+", " ", text.strip(), flags=re.I)
# Получаем список стоп слов
import nltk
from nltk.corpus import stopwords
nltk.download("stopwords")
# Пополняем список стоп слов:
stopword = nltk.corpus.stopwords.words("russian")
stopword.extend(
[
"ооо",
"ано",
"ао",
"пао",
"быть",
"а",
"мы",
"с",
"для",
"ещё",
"его",
"также",
"к",
"тем",
"кто",
"чтобы",
"но",
"они",
"будут",
"так",
"где",
"один",
"он ",
"и",
"на",
"но",
"или",
"либо",
"это",
"мб",
"далее",
"дв",
"свой",
"ваш",
"всё",
"очень",
"её",
"ещё",
"вообще",
"наш",
"который",
]
)
# Метод для удаления стоп слов
def remove_stopwords(text):
return [word for word in text if word not in stopword]
from nltk.tokenize import sent_tokenize, word_tokenize
# Создаем метод для токенезации слов
def tokenize(text):
t = word_tokenize(text)
return [token for token in t if token not in stopword]
nltk.download("punkt")
# #### Лемматизация
# Присваиваем словам начальную форму
def lemmatize(text):
res = list()
for word in text:
p = morph.parse(word)[0]
res.append(p.normal_form)
return res
# Для начала приводим текст в нижний регистри, удаляем символы которые не являются буквами, удаляем латинские буквы и лишние пробелы, далее происходит токенизация и удаление стоп слов
prep_text1 = [
remove_stopwords(
tokenize(remove_space(remove_latin(remove_notalpha(text.lower()))))
)
for text in ddf["text"]
]
# prep_text1[1]
# Удалив все лишнее проводится лемматизация текста
prep_text2 = [lemmatize(text) for text in tqdm(prep_text1)]
for i in range(len(prep_text2)):
prep_text2[i] = " ".join(prep_text2[i])
ddf["lemmatize_text"] = prep_text2
Leming_text = prep_text2
# Сохраняем набор данных в csv
FILE_NAME = "HabrLem.csv"
ddf.to_csv(FILE_NAME)
ddf = pd.read_csv("HabrLem.csv")
ddf.head(10)
# Вывод 1.3: была проведена предварительная обработка текста, слова были приведены в начальную форму. Конечные данные были включены в набор данных
# # 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов
# ## 1.4.1 Выбор алгоритмов
# __Мешок слов__ - решает проблему размерности по одной оси. Количество строк определяется количеством документов. Однако, этот метод не учитывает важность того или иного токена, ведь одно слово может повторятся по несколько раз.
# __TF-IDF__ - это способ векторизации текста, отражающий важность слова в документе, а не только частоту его появления.
# __Word Embeddings__ - векторное представление слов. Векторы можно складывать, вычитать, сравнивать.
# ## 1.4.2 Векторизация мешок слов
# В пакете scikit-learn есть модуль CountVectorizer, который преобразовывает входной текст в матрицу, значениями которой являются количества вхождения данного ключа(слова) в текст. Таким образом, мы получим матрицу, размерность которой будет равна количеству всех слов, умноженных на количество документов. И элементами матрицы будут числа, которые означают, сколько раз всего слово встретилось в тексте.
from sklearn.feature_extraction.text import CountVectorizer
text = ddf["text"]
import copy
df2 = copy.deepcopy(ddf)
Lem_Text = df2["lemmatize_text"].to_numpy()
# print(Lem_Text)
vectorizer = CountVectorizer(
analyzer="word", stop_words=stopword, ngram_range=(1, 3), min_df=2
)
count_matrix = vectorizer.fit_transform(Lem_Text)
vectorizer.get_feature_names_out()[:50]
# ## 1.4.3 Векторизация TF-IDF
# В тексте большого объема некоторые слова могут присутствовать очень часто, но при этом не нести никакой значимой информации о фактическом содержании текста (документа). Если такие данные передавать непосредственно классификатору, то такие частые термины могут затенять частоты более редких, но при этом более интересных терминов. Для того, чтобы этого избежать, достаточно разделить количество употреблений каждого слова в документе на общее количество слов в документе, это есть TF — частота термина. Термин IDF (inverse document frequency) обозначает обратную частоту термина (инверсия частоты) с которой некоторое слово встречается в документах. IDF позволяет измерить непосредственную важность термина.
from sklearn.feature_extraction.text import TfidfVectorizer
ddf.info(memory_usage="lemmatize_text")
Vector = []
tfidfv = TfidfVectorizer(
max_df=0.8, min_df=0.01, stop_words=stopword, ngram_range=(1, 3)
)
tfidf_ngram_features = tfidfv.fit_transform(Lem_Text)
tfidf_ngram_features
tfidf_words = tfidfv.inverse_transform(tfidf_ngram_features)
# print(tfidf_words[:50])
ddf["Vector"] = tfidf_words
# Добавление ключевых слов
tfidfv = TfidfVectorizer(
max_df=0.8, min_df=0.01, stop_words=stopword, ngram_range=(1, 1)
)
tfidf_ngram_features = tfidfv.fit_transform(Lem_Text)
tfidf_words = tfidfv.inverse_transform(tfidf_ngram_features)
mass = tfidf_ngram_features.toarray()
len(tfidf_words)
len(mass)
# Добавление биграм
for i in range(len(ddf["text"])):
if len(ddf["text"][i]) < 45:
ddf = ddf.drop(index=i)
ddf = ddf.drop(index=3485)
ddf = ddf.drop(columns="keyword")
ddf.reset_index(drop=True, inplace=True)
for i in range(len(ddf)):
tfidf_vectorizer = TfidfVectorizer(stop_words=stopword, ngram_range=(1, 1))
tfidf_matrix = tfidf_vectorizer.fit_transform(
[ddf.loc[i, "lemmatize_text"]]
).toarray()
ddf.loc[i, "keyword"] = str(
dict(zip(tfidf_vectorizer.get_feature_names_out(), tfidf_matrix[0]))
)
ddf.head(3892)
tfidf_ngram_features[0:100].todense()
# Размер нулевой строки
len(tfidf_ngram_features[0].todense()[0].getA1())
# Непустые значения нулевой строки
# [i for i in tfidf_ngram_features[0].todense()[0].getA1() if i>0]
# ## 1.4.4 Векторизация Word Embeddings
# Еще один популярный способ векторизации word2vec – это группа связанных моделей, которые используются для создания так называемых встраиваний слов. Эти модели представляют собой неглубокие двухслойные нейронные сети, которые обучены восстанавливать лингвистические контексты слов. После обучения модели word2vec можно использовать для сопоставления каждого слова с вектором, обычно состоящим из нескольких сотен элементов, которые представляют отношение этого слова к другим словам, так и получается векторизация.
import nltk
from gensim.models import Word2Vec
Bigger_list = []
for i in Lem_Text:
li = list(i.split(" "))
Bigger_list.append(li)
Model = Word2Vec(Bigger_list, min_count=1, vector_size=300, workers=4)
# Model.wv.index_to_key
# Вывод: Было решено выбрать векторизацию полученную способом TfidfVectorizer.
# Причиной этому послужило то, что при просмотре полученных слов можно было увидеть, что данный метод определяет важность слов, в отличии от других использованных методов. Что в свою очередь помогает уловить основной смысл статьи в данном случае.
# Другие причины выбора были связаны с эффективности в обработке текстовых данных.
# # 1.5 Разведочный анализ
# ## 1.5.1 Дополнение набора данных целевой переменной
# Создадим словарь для хранения данных из json
NominateCompany = {"Name": [], "Nominates": []}
# Открываем Target.json и получаем данные оттуда
with open("Target.json", "r", encoding="utf-8") as f:
text = json.load(f)
for i in text["list"]:
NominateCompany["Name"].append(i["Сompany"].strip().lower())
NominateCompany["Nominates"].append(i["Nominations"])
NominateCompany
# Вывод мы получили данные из json файла с номинантами.
# Создадим массив номинаций
Nomi = []
check = False
for i in ddf["name_company"]:
check = False
for j in range(len(NominateCompany["Name"])):
if i.strip().lower() == NominateCompany["Name"][j].strip().lower():
Nomi.append(NominateCompany["Nominates"][j])
check = True
break
print(check)
if check == False:
Nomi.append("NaN")
# true
# print(Nomi)
ddf["Target"] = Nomi
ddf.head(5)
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
le.fit(ddf["Target"])
ddf["Target2"] = le.transform(ddf["Target"])
ddf.head(600)
FILE_NAME = "result_data.csv"
ddf.to_csv(FILE_NAME)
import seaborn as sns
sns.distplot(ddf["Target2"])
import pylab
import scipy.stats as stats
stats.probplot(ddf["Target2"], dist="norm", plot=pylab)
pylab.show()
sns.boxplot(x=ddf["Target2"])
import matplotlib.pyplot as plt
ddf["rating"].unique()
plt.figure(figsize=(15, 5))
sns.boxplot(x=ddf["Target"], y=ddf["rating"], palette="hls")
plt.title("Таргет Vs Рейтинг", fontsize=15)
plt.xlabel("Рейтинг", fontsize=15)
plt.ylabel("Таргет", fontsize=15)
plt.show()
plt.figure(figsize=(20, 15))
sns.histplot(data=ddf, x="Target", y="name_company")
plt.show()
plt.figure(figsize=(20, 10))
sns.barplot(x=ddf["rating"], y=ddf["Target2"])
months = [] # список месяцев публикации статей
days = [] # список дней публикации сатьи
for i in range(len(ddf)):
if len(ddf["date"][i].split()) > 1:
months.append(ddf["date"][i].split()[1])
days.append(ddf["date"][i].split()[0])
else:
months.append("Month")
days.append("Day")
plt.figure(figsize=(20, 10))
sns.barplot(x=months, y=ddf["Target2"])
sns.set(rc={"figure.figsize": (20, 10)})
sns.countplot(x=months)
sns.set(rc={"figure.figsize": (20, 10)})
sns.countplot(x=days)
| false | 0 | 7,286 | 0 | 7,286 | 7,286 |
||
129543702
|
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.naive_bayes import MultinomialNB
# # 2.1 Построение модели классификации
df = pd.read_pickle("habr.pickle")
df.head()
txt_train_list = []
txt_train = ""
for i in df["text_lemtize"]:
txt_train = ""
for j in i:
txt_train = txt_train + " " + j
txt_train_list.append(txt_train)
df["text_lemtize"] = txt_train_list
df = df.drop(["rating"], axis=1)
df = df.drop("Vector", axis=1)
df.head()
# ### Разделим выборку на обучающую и тестовую
df["name"] = pd.factorize(df["name"])[0]
X = df["text_lemtize"]
y = df["nominations"]
# На тестовые значения отведём только 1/5 от всего набора.
# В переменную X введём все данные кроме целевой переменной, в y - только целевую переменную.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=27
)
# ### Обучим нашу модель на классификации Naive Bayes
# Наивный байесовский классификатор (Naive Bayes) — это алгоритм машинного обучения, предназначенный для многоклассовой классификации данных с независимыми признаками. За один проход вычисляется условная вероятность каждого признака
nb = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", MultinomialNB()),
]
)
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)
# ### Реализуем метрику. Посмотрим точность модели на классификации Naive Bayes
# Accuracy - это метрика, показывающая отношение числа правильно угаданных классов к общему количеству примеров. Данная метрика вполне реализует и показывает нам точность нашей модели, учитывая описанный способ её оценки.
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ### Обучим модель на линейной классификации SGDClassifier
# Этот оценщик реализует регуляризованные линейные модели со стохастическим градиентным спуском (SGD): градиент потерь оценивается для каждой выборки за раз, и модель обновляется по ходу работы с графиком убывающей прочности (он же скорость обучения).
from sklearn.linear_model import SGDClassifier
sgd = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", SGDClassifier()),
]
)
sgd.fit(X_train, y_train)
# ### Посмотрим точность модели на классификации SGDClassifier
y_pred = sgd.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ### Обучим модель на классификации логистической регресии LogisticRegression
# Логистическая регрессия - это процесс моделирования вероятности дискретного результата с учетом входной переменной
from sklearn.linear_model import LogisticRegression
logreg = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", LogisticRegression(n_jobs=1, C=1000)),
]
)
logreg.fit(X_train, y_train)
# ### Посмотрим точность модели на классификации LogisticRegression
y_pred = logreg.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ## Выберем наилучшую модель
# Обучив и посмотрел на все модели (их точность по выбранной метрике). Мы сделаем выбор в пользу модели, которая показала наилучшие результаты в выбранной метрике. Наилучшей моделью таким образом оказалась модель, основанная на классификации при помощи SGDClassifier - метрика показала результат в 0.725, что является наивысшим результатом среди всех обученных моделей. Выберем SGDClassifier
# # 2.2 Оптимизация модели
# ### Настроим нашу модель
# ### Оптимизируем выбранную модель путём настройки гиперпараметров
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
sgd = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", SGDClassifier(alpha=0.001, max_iter=1500)),
]
)
sgd.fit(X_train, y_train)
y_pred = sgd.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# Как мы можем видеть увеличение значений alpha и max_iter по сравнению со стандартными дали увеличение значений метрики. Было - 0.725, Стало - 0.746
# Мы улучшили нашу модель
# ### Построим кривую обучения
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
train_sizes, train_scores, test_scores = learning_curve(sgd, X_train, y_train, cv=5)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.plot(train_sizes, train_scores_mean, label="Train")
plt.plot(train_sizes, test_scores_mean, label="Test")
plt.legend()
plt.xlabel("Number of samples")
plt.ylabel("Accuracy score")
plt.title("Learning curve")
# Мы явно не видим недообучения модели - с достаточным количеством образцов (как мы и указали для обучения) тестовые данные показывают хоть ожидаемо и меньший результат по сравнению с тренировочными данными, но вполне высокий результат по метрике, которую мы уже посмотрели.
# Переобучения так же не происходит - модель на тестовых данных не показывает таких же хороших результатов, как на тренировочном наборе
# ## Сохраним нашу модель
from joblib import dump, load
dump(sgd, "filename.joblib")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/543/129543702.ipynb
| null | null |
[{"Id": 129543702, "ScriptId": 38519870, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 17:22:46", "VersionNumber": 1.0, "Title": "Report2-BA-djostit", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 179.0, "LinesInsertedFromPrevious": 179.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.model_selection import train_test_split
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.naive_bayes import MultinomialNB
# # 2.1 Построение модели классификации
df = pd.read_pickle("habr.pickle")
df.head()
txt_train_list = []
txt_train = ""
for i in df["text_lemtize"]:
txt_train = ""
for j in i:
txt_train = txt_train + " " + j
txt_train_list.append(txt_train)
df["text_lemtize"] = txt_train_list
df = df.drop(["rating"], axis=1)
df = df.drop("Vector", axis=1)
df.head()
# ### Разделим выборку на обучающую и тестовую
df["name"] = pd.factorize(df["name"])[0]
X = df["text_lemtize"]
y = df["nominations"]
# На тестовые значения отведём только 1/5 от всего набора.
# В переменную X введём все данные кроме целевой переменной, в y - только целевую переменную.
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.20, random_state=27
)
# ### Обучим нашу модель на классификации Naive Bayes
# Наивный байесовский классификатор (Naive Bayes) — это алгоритм машинного обучения, предназначенный для многоклассовой классификации данных с независимыми признаками. За один проход вычисляется условная вероятность каждого признака
nb = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", MultinomialNB()),
]
)
nb.fit(X_train, y_train)
y_pred = nb.predict(X_test)
# ### Реализуем метрику. Посмотрим точность модели на классификации Naive Bayes
# Accuracy - это метрика, показывающая отношение числа правильно угаданных классов к общему количеству примеров. Данная метрика вполне реализует и показывает нам точность нашей модели, учитывая описанный способ её оценки.
from sklearn.metrics import accuracy_score
from sklearn.metrics import classification_report
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ### Обучим модель на линейной классификации SGDClassifier
# Этот оценщик реализует регуляризованные линейные модели со стохастическим градиентным спуском (SGD): градиент потерь оценивается для каждой выборки за раз, и модель обновляется по ходу работы с графиком убывающей прочности (он же скорость обучения).
from sklearn.linear_model import SGDClassifier
sgd = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", SGDClassifier()),
]
)
sgd.fit(X_train, y_train)
# ### Посмотрим точность модели на классификации SGDClassifier
y_pred = sgd.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ### Обучим модель на классификации логистической регресии LogisticRegression
# Логистическая регрессия - это процесс моделирования вероятности дискретного результата с учетом входной переменной
from sklearn.linear_model import LogisticRegression
logreg = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", LogisticRegression(n_jobs=1, C=1000)),
]
)
logreg.fit(X_train, y_train)
# ### Посмотрим точность модели на классификации LogisticRegression
y_pred = logreg.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# ## Выберем наилучшую модель
# Обучив и посмотрел на все модели (их точность по выбранной метрике). Мы сделаем выбор в пользу модели, которая показала наилучшие результаты в выбранной метрике. Наилучшей моделью таким образом оказалась модель, основанная на классификации при помощи SGDClassifier - метрика показала результат в 0.725, что является наивысшим результатом среди всех обученных моделей. Выберем SGDClassifier
# # 2.2 Оптимизация модели
# ### Настроим нашу модель
# ### Оптимизируем выбранную модель путём настройки гиперпараметров
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
sgd = Pipeline(
[
("vect", CountVectorizer()),
("tfidf", TfidfTransformer()),
("clf", SGDClassifier(alpha=0.001, max_iter=1500)),
]
)
sgd.fit(X_train, y_train)
y_pred = sgd.predict(X_test)
print("accuracy %s" % accuracy_score(y_pred, y_test))
# Как мы можем видеть увеличение значений alpha и max_iter по сравнению со стандартными дали увеличение значений метрики. Было - 0.725, Стало - 0.746
# Мы улучшили нашу модель
# ### Построим кривую обучения
from sklearn.model_selection import learning_curve
from sklearn.model_selection import validation_curve
import matplotlib.pyplot as plt
train_sizes, train_scores, test_scores = learning_curve(sgd, X_train, y_train, cv=5)
train_scores_mean = np.mean(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
plt.plot(train_sizes, train_scores_mean, label="Train")
plt.plot(train_sizes, test_scores_mean, label="Test")
plt.legend()
plt.xlabel("Number of samples")
plt.ylabel("Accuracy score")
plt.title("Learning curve")
# Мы явно не видим недообучения модели - с достаточным количеством образцов (как мы и указали для обучения) тестовые данные показывают хоть ожидаемо и меньший результат по сравнению с тренировочными данными, но вполне высокий результат по метрике, которую мы уже посмотрели.
# Переобучения так же не происходит - модель на тестовых данных не показывает таких же хороших результатов, как на тренировочном наборе
# ## Сохраним нашу модель
from joblib import dump, load
dump(sgd, "filename.joblib")
| false | 0 | 2,119 | 0 | 2,119 | 2,119 |
||
129543413
|
# # Модуль 1. Парсинг и предобработка данных
# # 1.1 Парсинг данных
# ## Подключение необходимых библиотек
import pandas as pd
import numpy as np
import docx
import json
import glob
from bs4 import BeautifulSoup as bs
import requests
import os
# ## Получаем список кандидатов из данного docx-файла
doc = docx.Document("Condidates.docx")
condidates = []
for paragraph in doc.paragraphs:
if paragraph.text != "":
txt = paragraph.text.strip()
condidates.append(txt)
print(condidates)
# ## Получаем данные с json-файлов, данных в папке
result_list = {
"name": [],
"description": [],
"rating": [],
"field": [],
"date": [],
"text": [],
}
path = "Data"
filelist = []
json_list = []
for root, dirs, files in os.walk(path):
for file in files:
filelist.append(os.path.join(root, file))
for name1 in filelist:
with open(name1, encoding="utf8") as json_data:
data = json.load(json_data)
search2 = os.path.relpath(name1, "Data")
search2 = search2.replace(".json", "")
json_list.append(search2)
if data["info"] == None:
search = os.path.relpath(name1, "Data")
search = search.replace(".json", "")
search = search.replace(" ", "%20")
url = "https://habr.com/ru/companies/?q=" + search
page = requests.get(url)
soup = bs(page.text, "html.parser")
name = soup.find("em", class_="searched-item")
description = soup.find("div", class_="tm-company-snippet__description")
rating = soup.find(
"span",
class_="tm-companies__score-counter tm-companies__score-counter_rating",
)
field = soup.find("div", class_="tm-companies__company-hubs")
if name is not None:
rating1 = rating.text.replace("Рейтинг", "")
rating1 = rating1.replace(" ", "")
rating1 = rating1.replace("\n", "")
result_list["name"].append(name.text)
result_list["description"].append(description.text)
result_list["rating"].append(rating1)
for i in data["refs"]:
if (i[0]) is not None:
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
else:
for i in data["refs"]:
if i is not None:
search1 = os.path.relpath(name1, "Data")
search1 = search.replace(".json", "")
search1 = search.replace("%20", " ")
result_list["description"].append(None)
result_list["rating"].append(None)
result_list["field"].append(None)
result_list["name"].append(search1)
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
else:
for i in data["refs"]:
search = os.path.relpath(name1, "Data")
search = search.replace(".json", "")
industries = data["info"].get("industries")
industries = str(industries)
industries = industries.replace("[", "")
industries = industries.replace("]", "")
if (i[0]) is not None:
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
result_list["name"].append(search)
result_list["description"].append(data["info"].get("about"))
result_list["rating"].append(data["info"].get("rate"))
result_list["field"].append(industries)
# Результат прасинга с папки с json-файлами:
FILE_NAME = "habr_json.csv"
df = pd.DataFrame.from_dict(result_list, orient="index")
df.to_csv(FILE_NAME)
df = df.transpose()
pd.set_option("display.max_rows", 2000)
df.head(3000)
# В данном случае, если мы не нашли информацию о компании в json-файле мы ищем эту информацию на сайте habr.com, если компании с данным именем уже не существует - заполняем пустыми значениями.
# ### Те компании, которые отсутсвуют в данной папке с json-файлами парсим с сайта habr.com
for cnd in condidates:
if cnd not in json_list:
search = cnd
search = search.replace(" ", "%20")
url = "https://habr.com/ru/companies/?q=" + search
page = requests.get(url)
soup = bs(page.text, "html.parser")
name = soup.find("em", class_="searched-item")
description = soup.find("div", class_="tm-company-snippet__description")
rating = soup.find(
"span",
class_="tm-companies__score-counter tm-companies__score-counter_rating",
)
field = soup.find("div", class_="tm-companies__company-hubs")
if name is not None:
rating1 = rating.text.replace("Рейтинг", "")
rating1 = rating1.replace(" ", "")
rating1 = rating1.replace("\n", "")
href = soup.find("a", class_="tm-company-snippet__title")
url = "https://habr.com" + href.get("href")
page = requests.get(url)
soup = bs(page.text, "html.parser")
field_company = soup.find("div", class_="tm-company-profile__categories")
field = field_company.text.replace("\n", "")
url3 = (
"https://habr.com/ru/search/?q="
+ search
+ "&target_type=posts&order=relevance"
)
page3 = requests.get(url3)
soup3 = bs(page3.text, "html.parser")
a = 25
g = 0
for i in range(a):
url4 = (
"https://habr.com/ru/search/page"
+ str(i)
+ "/?q="
+ search
+ "&target_type=posts&order=relevance"
)
page4 = requests.get(url4)
soup4 = bs(page4.text, "html.parser")
statie = soup4.find_all("a", class_="tm-title__link")
for j in statie:
href_ = soup.find("a", class_="tm-title__link")
url2 = "https://habr.com" + str(j.get("href"))
page2 = requests.get(url2)
soup2 = bs(page2.text, "html.parser")
txt2 = soup2.find("div", class_="tm-article-body")
date2 = soup2.find("span", class_="tm-article-datetime-published")
if txt2 is not None:
result_list["name"].append(name.text)
result_list["description"].append(description.text)
result_list["rating"].append(rating1)
result_list["field"].append(field)
result_list["text"].append(txt2.text)
result_list["date"].append(date2.text)
FILE_NAME = "habr_json.csv"
df = pd.DataFrame.from_dict(result_list, orient="index")
df.to_csv(FILE_NAME)
df = df.transpose()
pd.set_option("display.max_rows", 2000)
df.head(3000)
# В итоге мы получили csv-файл с статьями с данных в папке json-файлов с данными о компаниях и их статьями, и статьями с данными о компаниях с сайта habr.com. Все компании были взяты из списка компаний в данном docx файле.
# # 1.2 Формирование структуры набора данных
df.info()
# Изменим наш получившийся csv-файл.
# Сразу оставим самые основные аттрибуты для дальнейшей работы: name, text.
# name - ключевой аттрибут для определения компании.
# text - заключает в себе текст для обработки текстовых данных.
# Аттрибут field имеет информацию о
# направленной деятельности компании, но обрабатывать текст и векторизировать его для наших нужд не целесообразно, удалим его.
# Описание компании (description) имеет слишком много пропусков и мало данных внутри для проведения дополнительной обработки текстовых данных, удалим его.
# Аттрибуты ratind и date оставим для дальнейшей визуализации зависимостей и важности переменной для обучения.
# Данных данных нам вполне хватит для проведения всех дальнейших манипуляций и обучения
df = df.drop(["description"], axis=1)
df = df.drop(["field"], axis=1)
# # 1.3 Предварительная обработка текстовых данных
import string
import nltk
from nltk.corpus import stopwords
stopwords = nltk.corpus.stopwords.words("russian")
nltk.download("stopwords")
nltk.download("averaged_perceptron_tagger")
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize, word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import re
from pandas_profiling import ProfileReport
# ### Выполним разведочный анализ:
pr = ProfileReport(df)
pr
date_list = []
for i in df["date"]:
check = True
if "янв" in i:
date_list.append("январь")
check = False
if "фев" in i:
date_list.append("февраль")
check = False
if "мар" in i:
date_list.append("март")
check = False
if "апр" in i:
date_list.append("апрель")
check = False
if "май" in i:
date_list.append("май")
check = False
if "мая" in i:
date_list.append("май")
check = False
if "июн" in i:
date_list.append("июнь")
check = False
if "июл" in i:
date_list.append("июль")
check = False
if "авг" in i:
date_list.append("август")
check = False
if "сен" in i:
date_list.append("сентябрь")
check = False
if "окт" in i:
date_list.append("октябрь")
check = False
if "ноя" in i:
date_list.append("ноябрь")
check = False
if "дек" in i:
date_list.append("декабрь")
check = False
if check == True:
date_list.append("месяц")
df["date"] = date_list
df.head(1000)
# ### Выполним текстовую обработку:
def remove_punctuation(text):
return "".join(
[ch for ch in text if ch not in string.punctuation]
) # метод для удаления пунктуации
def remove_numbers(text):
return "".join(
[i if not i.isdigit() else " " for i in text]
) # метод для удаления чисел
def remove_notalpha(text):
return "".join(
[i if i.isalpha() else " " for i in text]
) # метод для проверки на только алфавитные символы
import re
def remove_space(text):
return re.sub(r"\s+", " ", text, flags=re.I) # метод для удаления пробелов
def remove_latin(text):
return re.sub(
"[a-z]", "", text, flags=re.I
) # метод для удаления символов латинского алфавит
st = "❯\0"
def remove_othersymbol(text):
return "".join(
[ch if ch not in st else " " for ch in text]
) # удаление остальных не нужных символов
def remove_stopwords(text):
return [word for word in text if word not in stopwords] # удаление стоп-слов
def tokenize(text):
t = word_tokenize(text)
return [token for token in t if token not in stopwords] # метод для токенизации
def part_speech(text):
# t = word_tokenize(text)
return nltk.pos_tag(text) # метод для проверки частей речи
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download("wordnet")
import pymorphy2
nltk.download("punkt")
morph = pymorphy2.MorphAnalyzer()
wn = nltk.WordNetLemmatizer()
def lemmatize(text):
res = list()
for word in text:
p = morph.parse(word)[0]
res.append(p.normal_form)
return res # метод для леммитизации
# Для проведения всех вышеперечисленных операций обработки текста мы использовали специализированные библиотеки, как например специальный пакет nltk и его возможностями для токенизации в частности, заместо, например, стандартного метода split() не так хорошо отвечающего главной цели токенизации. То же и с лимматизацией при помощи nltk и встроенной библиотеки re для удаления лишних символов. Таким образом специализрованные библиотеки лучше справялются с задачей, а иногда и вовсе отсаются единстенным способом. Конечно, это не единственные библиотеки, так для лимматизации сущеюсвтуют ещё и TextBlob и GensinLemmatizer и др. однако в случае положительного результата всех библиотек выбор может быть любым.
text_list = []
# #### Обработам наши текстовые данные: удалим пунктуацию, числа, проверим на только алфавитные символы, удалим пробелы, символы латинского алфавита, а так же другие не нужные нам символы. Проведём токенизацию и лимматизацию, удалим стоп-слова
g = 0
for i in df["text"]:
if i is not None:
i = remove_punctuation(i)
i = remove_numbers(i)
i = remove_notalpha(i)
i = remove_space(i)
i = remove_latin(i)
i = remove_othersymbol(i)
i = tokenize(i)
i = remove_stopwords(i)
i = lemmatize(i)
text_list.append(i)
# Посмотрим на пример результата обработки текста:
print(text_list[0])
df["text_lemtize"] = text_list
part_speech_list = []
# ### Проанализируем значимые части речи
# Посмотрим на пример результата:
def part(text):
res = list()
for word in text:
p = morph.parse(word)[0]
a = (word, p.tag.cyr_repr)
res.append(tuple(a))
return res # метод для выделения значимых частей речи
prep_txt_list = []
for txt in text_list:
prep_txt = part(txt)
prep_txt_list.append(prep_txt)
# Посмотрим на примере результат работы анализа значимых частей речи:
print(prep_txt_list[2])
# Алгоритм выделил значимые части речи для каждого нашего слова
# # 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов
# ### Выполним поиск ключевых слов/биграмм/триграмм в тексте
#
import nltk
from nltk.tokenize import word_tokenize
from collections import Counter
from nltk.util import ngrams
import statistics
from statistics import mode
from nltk.collocations import BigramAssocMeasures, BigramCollocationFinder
from nltk.collocations import TrigramAssocMeasures, TrigramCollocationFinder
new_list = []
for i in text_list:
txt = " ".join(i)
new_list.append(txt)
new_list = np.array(new_list)
import yake
kw_extractor1 = yake.KeywordExtractor(lan="ru", top=20, n=1, dedupLim=0.9)
keywords1 = kw_extractor1.extract_keywords(new_list[0])
kw_extractor2 = yake.KeywordExtractor(lan="ru", top=20, n=2, dedupLim=0.9)
keywords2 = kw_extractor2.extract_keywords(new_list[0])
kw_extractor3 = yake.KeywordExtractor(lan="ru", top=20, n=3, dedupLim=0.9)
keywords3 = kw_extractor3.extract_keywords(new_list[0])
# Для определения униграмм, биграмм и триграмм будем использовать модуь yake, для вышеперислыенных действий на него треубется меньше всего строчек кода, что не может обеспечить, например ltk-пакет
for kw in keywords1:
print(kw)
for kw in keywords2:
print(kw)
for kw in keywords3:
print(kw)
# ### Представим наш документ в виде униграмм/биграмм и триграмм и преобразуем его в векторные представления различными способами
# #### Сделаем при помощи CountVectorizer
# #### В результате выполнения c CountVectorizer мы получим матрицы с количеством вхождений слов в каждом тексте
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(analyzer="word", ngram_range=(1, 3), min_df=2)
# ngram_range=(1,3) - Здесь (и в последующих использованиях) означает, что для построения объектов будут учитываться как униграммы, биграммы и триграммы
cv_matrix = cv.fit_transform(new_list)
cv_matrix = cv_matrix.toarray()
# #### Полученная матрица:
cv_matrix
for i in cv_matrix[99]:
print(i)
# #### Сделаем при помощи TfidfVectorizer
# #### с TfidfVectorizer мы получим матрицу весов
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(analyzer="word", ngram_range=(1, 3), min_df=2)
tfidf_matrix = tfidf.fit_transform(new_list)
tfidf_matrix = tfidf_matrix.toarray()
# Посмотрим на наши униграммы/биграммы/триграммы в виде слов:
tfidf.get_feature_names_out()
tfidf_matrix
for i in tfidf_matrix[99]:
print(i)
# Получили числовые тензоры методом TfidfVectorizer
# #### Сделаем при помощи HashingVectorizer
# #### В отличие от CountVectorizer и TfidfVectorizer HashingVectorizer использует хэш слов, что решает проблему огромного словарного запаса.
from sklearn.feature_extraction.text import HashingVectorizer
vectorizer = HashingVectorizer(n_features=20)
vector = vectorizer.transform(new_list)
vector = vector.toarray()
for i in vector:
print(i)
hashing_list = []
for i in vector:
hashing_list.append(i)
# #### Добавим наши униграммы/биграммы/триграммы в наш набор данных в векторизированном виде при помощи библиотеки HashingVectorizer
df["Vector"] = hashing_list
df.head()
# # 1.5 Разведочный анализ
# #### Дополним наш набор данных целевой переменной
nominations_list = []
with open("Target.json", "r", encoding="utf8") as json_data:
data = json.load(json_data)
g = 0
for i in df["name"]:
for j in data["name"]:
if i == j["Сompany"]:
nominations_list.append(j["Nominations"])
df["nominations"] = nominations_list
df.head(2000)
# #### преобразуем категориальный признак в числовой
df["nominations"] = pd.factorize(df["nominations"])[0]
df.head(1000)
# #### Проведём анализ нормальности распределения целевой переменной
import seaborn as sns
import numpy as np
import pylab
import scipy.stats as stats
from scipy.stats import shapiro
normal_df = df["nominations"]
stats.probplot(normal_df, dist="norm", plot=pylab)
pylab.show()
# Переменная не имеет нормальной плотности распределения, далеко не все точки расположены на прямой линии
graph2 = sns.boxplot(x=normal_df)
# Переменная не имеет нормальной плотности распределения, медиана находится не строго по центру
stat, p = shapiro(normal_df)
print("stat=%.3f, p=%.3f" % (stat, p))
if p > 0.05:
print("Гауссовское распределение")
else:
print("Не гауссовское распределение")
# В результате анализа распределения целевой переменной (графического и статистического) мы сделали вывод о не нормальной
# плотности распределения целевой переменной
# #### Посмотрим на зависимость целевой переменной от времени:
sns.countplot(x=df["nominations"], hue=df["date"])
# На графике видно количество публикаций в том или ином месяце в разных номинациях, где цвет это какой-либо месяц.
# #### Посмотрим на статистику публикаций:
sns.countplot(x=df["nominations"])
# На графике видно количество публикаций с той или иной номинацией
list_keyWords = tfidf.inverse_transform(vector)
df["keyWords"] = list_keyWords
# #### Посмотрим на зависимость целевой переменной от рейтинга компании:
sns.countplot(x=df["nominations"], hue=df["rating"])
# На графике видно количество статей по определённым номинациям с разбиением с учётом рейтинга компании, где рейтинг зависит от цвета.
# #### Посмотрим на зависимость целевой переменной от ключевых слов
df.plot(x="keyWords", y="nominations")
# #### Удалим уже не нужные нам столбцы text
df["date"] = pd.factorize(df["date"])[0]
df["name"] = pd.factorize(df["name"])[0]
df = df.drop(["text"], axis=1)
df = df.drop(["keyWords"], axis=1)
df.head()
df.to_csv(FILE_NAME)
df.to_pickle("habr.pickle")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/543/129543413.ipynb
| null | null |
[{"Id": 129543413, "ScriptId": 38519719, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 13417746, "CreationDate": "05/14/2023 17:19:56", "VersionNumber": 1.0, "Title": "Report1-BA-djostit", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 599.0, "LinesInsertedFromPrevious": 599.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
# # Модуль 1. Парсинг и предобработка данных
# # 1.1 Парсинг данных
# ## Подключение необходимых библиотек
import pandas as pd
import numpy as np
import docx
import json
import glob
from bs4 import BeautifulSoup as bs
import requests
import os
# ## Получаем список кандидатов из данного docx-файла
doc = docx.Document("Condidates.docx")
condidates = []
for paragraph in doc.paragraphs:
if paragraph.text != "":
txt = paragraph.text.strip()
condidates.append(txt)
print(condidates)
# ## Получаем данные с json-файлов, данных в папке
result_list = {
"name": [],
"description": [],
"rating": [],
"field": [],
"date": [],
"text": [],
}
path = "Data"
filelist = []
json_list = []
for root, dirs, files in os.walk(path):
for file in files:
filelist.append(os.path.join(root, file))
for name1 in filelist:
with open(name1, encoding="utf8") as json_data:
data = json.load(json_data)
search2 = os.path.relpath(name1, "Data")
search2 = search2.replace(".json", "")
json_list.append(search2)
if data["info"] == None:
search = os.path.relpath(name1, "Data")
search = search.replace(".json", "")
search = search.replace(" ", "%20")
url = "https://habr.com/ru/companies/?q=" + search
page = requests.get(url)
soup = bs(page.text, "html.parser")
name = soup.find("em", class_="searched-item")
description = soup.find("div", class_="tm-company-snippet__description")
rating = soup.find(
"span",
class_="tm-companies__score-counter tm-companies__score-counter_rating",
)
field = soup.find("div", class_="tm-companies__company-hubs")
if name is not None:
rating1 = rating.text.replace("Рейтинг", "")
rating1 = rating1.replace(" ", "")
rating1 = rating1.replace("\n", "")
result_list["name"].append(name.text)
result_list["description"].append(description.text)
result_list["rating"].append(rating1)
for i in data["refs"]:
if (i[0]) is not None:
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
else:
for i in data["refs"]:
if i is not None:
search1 = os.path.relpath(name1, "Data")
search1 = search.replace(".json", "")
search1 = search.replace("%20", " ")
result_list["description"].append(None)
result_list["rating"].append(None)
result_list["field"].append(None)
result_list["name"].append(search1)
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
else:
for i in data["refs"]:
search = os.path.relpath(name1, "Data")
search = search.replace(".json", "")
industries = data["info"].get("industries")
industries = str(industries)
industries = industries.replace("[", "")
industries = industries.replace("]", "")
if (i[0]) is not None:
result_list["date"].append(
i[1].get("day") + " " + i[1].get("month")
)
result_list["text"].append(i[0])
result_list["name"].append(search)
result_list["description"].append(data["info"].get("about"))
result_list["rating"].append(data["info"].get("rate"))
result_list["field"].append(industries)
# Результат прасинга с папки с json-файлами:
FILE_NAME = "habr_json.csv"
df = pd.DataFrame.from_dict(result_list, orient="index")
df.to_csv(FILE_NAME)
df = df.transpose()
pd.set_option("display.max_rows", 2000)
df.head(3000)
# В данном случае, если мы не нашли информацию о компании в json-файле мы ищем эту информацию на сайте habr.com, если компании с данным именем уже не существует - заполняем пустыми значениями.
# ### Те компании, которые отсутсвуют в данной папке с json-файлами парсим с сайта habr.com
for cnd in condidates:
if cnd not in json_list:
search = cnd
search = search.replace(" ", "%20")
url = "https://habr.com/ru/companies/?q=" + search
page = requests.get(url)
soup = bs(page.text, "html.parser")
name = soup.find("em", class_="searched-item")
description = soup.find("div", class_="tm-company-snippet__description")
rating = soup.find(
"span",
class_="tm-companies__score-counter tm-companies__score-counter_rating",
)
field = soup.find("div", class_="tm-companies__company-hubs")
if name is not None:
rating1 = rating.text.replace("Рейтинг", "")
rating1 = rating1.replace(" ", "")
rating1 = rating1.replace("\n", "")
href = soup.find("a", class_="tm-company-snippet__title")
url = "https://habr.com" + href.get("href")
page = requests.get(url)
soup = bs(page.text, "html.parser")
field_company = soup.find("div", class_="tm-company-profile__categories")
field = field_company.text.replace("\n", "")
url3 = (
"https://habr.com/ru/search/?q="
+ search
+ "&target_type=posts&order=relevance"
)
page3 = requests.get(url3)
soup3 = bs(page3.text, "html.parser")
a = 25
g = 0
for i in range(a):
url4 = (
"https://habr.com/ru/search/page"
+ str(i)
+ "/?q="
+ search
+ "&target_type=posts&order=relevance"
)
page4 = requests.get(url4)
soup4 = bs(page4.text, "html.parser")
statie = soup4.find_all("a", class_="tm-title__link")
for j in statie:
href_ = soup.find("a", class_="tm-title__link")
url2 = "https://habr.com" + str(j.get("href"))
page2 = requests.get(url2)
soup2 = bs(page2.text, "html.parser")
txt2 = soup2.find("div", class_="tm-article-body")
date2 = soup2.find("span", class_="tm-article-datetime-published")
if txt2 is not None:
result_list["name"].append(name.text)
result_list["description"].append(description.text)
result_list["rating"].append(rating1)
result_list["field"].append(field)
result_list["text"].append(txt2.text)
result_list["date"].append(date2.text)
FILE_NAME = "habr_json.csv"
df = pd.DataFrame.from_dict(result_list, orient="index")
df.to_csv(FILE_NAME)
df = df.transpose()
pd.set_option("display.max_rows", 2000)
df.head(3000)
# В итоге мы получили csv-файл с статьями с данных в папке json-файлов с данными о компаниях и их статьями, и статьями с данными о компаниях с сайта habr.com. Все компании были взяты из списка компаний в данном docx файле.
# # 1.2 Формирование структуры набора данных
df.info()
# Изменим наш получившийся csv-файл.
# Сразу оставим самые основные аттрибуты для дальнейшей работы: name, text.
# name - ключевой аттрибут для определения компании.
# text - заключает в себе текст для обработки текстовых данных.
# Аттрибут field имеет информацию о
# направленной деятельности компании, но обрабатывать текст и векторизировать его для наших нужд не целесообразно, удалим его.
# Описание компании (description) имеет слишком много пропусков и мало данных внутри для проведения дополнительной обработки текстовых данных, удалим его.
# Аттрибуты ratind и date оставим для дальнейшей визуализации зависимостей и важности переменной для обучения.
# Данных данных нам вполне хватит для проведения всех дальнейших манипуляций и обучения
df = df.drop(["description"], axis=1)
df = df.drop(["field"], axis=1)
# # 1.3 Предварительная обработка текстовых данных
import string
import nltk
from nltk.corpus import stopwords
stopwords = nltk.corpus.stopwords.words("russian")
nltk.download("stopwords")
nltk.download("averaged_perceptron_tagger")
from nltk.tokenize import word_tokenize
from nltk.tokenize import sent_tokenize, word_tokenize
from sklearn.feature_extraction.text import CountVectorizer
import re
from pandas_profiling import ProfileReport
# ### Выполним разведочный анализ:
pr = ProfileReport(df)
pr
date_list = []
for i in df["date"]:
check = True
if "янв" in i:
date_list.append("январь")
check = False
if "фев" in i:
date_list.append("февраль")
check = False
if "мар" in i:
date_list.append("март")
check = False
if "апр" in i:
date_list.append("апрель")
check = False
if "май" in i:
date_list.append("май")
check = False
if "мая" in i:
date_list.append("май")
check = False
if "июн" in i:
date_list.append("июнь")
check = False
if "июл" in i:
date_list.append("июль")
check = False
if "авг" in i:
date_list.append("август")
check = False
if "сен" in i:
date_list.append("сентябрь")
check = False
if "окт" in i:
date_list.append("октябрь")
check = False
if "ноя" in i:
date_list.append("ноябрь")
check = False
if "дек" in i:
date_list.append("декабрь")
check = False
if check == True:
date_list.append("месяц")
df["date"] = date_list
df.head(1000)
# ### Выполним текстовую обработку:
def remove_punctuation(text):
return "".join(
[ch for ch in text if ch not in string.punctuation]
) # метод для удаления пунктуации
def remove_numbers(text):
return "".join(
[i if not i.isdigit() else " " for i in text]
) # метод для удаления чисел
def remove_notalpha(text):
return "".join(
[i if i.isalpha() else " " for i in text]
) # метод для проверки на только алфавитные символы
import re
def remove_space(text):
return re.sub(r"\s+", " ", text, flags=re.I) # метод для удаления пробелов
def remove_latin(text):
return re.sub(
"[a-z]", "", text, flags=re.I
) # метод для удаления символов латинского алфавит
st = "❯\0"
def remove_othersymbol(text):
return "".join(
[ch if ch not in st else " " for ch in text]
) # удаление остальных не нужных символов
def remove_stopwords(text):
return [word for word in text if word not in stopwords] # удаление стоп-слов
def tokenize(text):
t = word_tokenize(text)
return [token for token in t if token not in stopwords] # метод для токенизации
def part_speech(text):
# t = word_tokenize(text)
return nltk.pos_tag(text) # метод для проверки частей речи
from nltk.stem import WordNetLemmatizer
import nltk
nltk.download("wordnet")
import pymorphy2
nltk.download("punkt")
morph = pymorphy2.MorphAnalyzer()
wn = nltk.WordNetLemmatizer()
def lemmatize(text):
res = list()
for word in text:
p = morph.parse(word)[0]
res.append(p.normal_form)
return res # метод для леммитизации
# Для проведения всех вышеперечисленных операций обработки текста мы использовали специализированные библиотеки, как например специальный пакет nltk и его возможностями для токенизации в частности, заместо, например, стандартного метода split() не так хорошо отвечающего главной цели токенизации. То же и с лимматизацией при помощи nltk и встроенной библиотеки re для удаления лишних символов. Таким образом специализрованные библиотеки лучше справялются с задачей, а иногда и вовсе отсаются единстенным способом. Конечно, это не единственные библиотеки, так для лимматизации сущеюсвтуют ещё и TextBlob и GensinLemmatizer и др. однако в случае положительного результата всех библиотек выбор может быть любым.
text_list = []
# #### Обработам наши текстовые данные: удалим пунктуацию, числа, проверим на только алфавитные символы, удалим пробелы, символы латинского алфавита, а так же другие не нужные нам символы. Проведём токенизацию и лимматизацию, удалим стоп-слова
g = 0
for i in df["text"]:
if i is not None:
i = remove_punctuation(i)
i = remove_numbers(i)
i = remove_notalpha(i)
i = remove_space(i)
i = remove_latin(i)
i = remove_othersymbol(i)
i = tokenize(i)
i = remove_stopwords(i)
i = lemmatize(i)
text_list.append(i)
# Посмотрим на пример результата обработки текста:
print(text_list[0])
df["text_lemtize"] = text_list
part_speech_list = []
# ### Проанализируем значимые части речи
# Посмотрим на пример результата:
def part(text):
res = list()
for word in text:
p = morph.parse(word)[0]
a = (word, p.tag.cyr_repr)
res.append(tuple(a))
return res # метод для выделения значимых частей речи
prep_txt_list = []
for txt in text_list:
prep_txt = part(txt)
prep_txt_list.append(prep_txt)
# Посмотрим на примере результат работы анализа значимых частей речи:
print(prep_txt_list[2])
# Алгоритм выделил значимые части речи для каждого нашего слова
# # 1.4 Поиск ключевых слов/n-грамм. Векторизация текстов
# ### Выполним поиск ключевых слов/биграмм/триграмм в тексте
#
import nltk
from nltk.tokenize import word_tokenize
from collections import Counter
from nltk.util import ngrams
import statistics
from statistics import mode
from nltk.collocations import BigramAssocMeasures, BigramCollocationFinder
from nltk.collocations import TrigramAssocMeasures, TrigramCollocationFinder
new_list = []
for i in text_list:
txt = " ".join(i)
new_list.append(txt)
new_list = np.array(new_list)
import yake
kw_extractor1 = yake.KeywordExtractor(lan="ru", top=20, n=1, dedupLim=0.9)
keywords1 = kw_extractor1.extract_keywords(new_list[0])
kw_extractor2 = yake.KeywordExtractor(lan="ru", top=20, n=2, dedupLim=0.9)
keywords2 = kw_extractor2.extract_keywords(new_list[0])
kw_extractor3 = yake.KeywordExtractor(lan="ru", top=20, n=3, dedupLim=0.9)
keywords3 = kw_extractor3.extract_keywords(new_list[0])
# Для определения униграмм, биграмм и триграмм будем использовать модуь yake, для вышеперислыенных действий на него треубется меньше всего строчек кода, что не может обеспечить, например ltk-пакет
for kw in keywords1:
print(kw)
for kw in keywords2:
print(kw)
for kw in keywords3:
print(kw)
# ### Представим наш документ в виде униграмм/биграмм и триграмм и преобразуем его в векторные представления различными способами
# #### Сделаем при помощи CountVectorizer
# #### В результате выполнения c CountVectorizer мы получим матрицы с количеством вхождений слов в каждом тексте
from sklearn.feature_extraction.text import CountVectorizer
cv = CountVectorizer(analyzer="word", ngram_range=(1, 3), min_df=2)
# ngram_range=(1,3) - Здесь (и в последующих использованиях) означает, что для построения объектов будут учитываться как униграммы, биграммы и триграммы
cv_matrix = cv.fit_transform(new_list)
cv_matrix = cv_matrix.toarray()
# #### Полученная матрица:
cv_matrix
for i in cv_matrix[99]:
print(i)
# #### Сделаем при помощи TfidfVectorizer
# #### с TfidfVectorizer мы получим матрицу весов
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(analyzer="word", ngram_range=(1, 3), min_df=2)
tfidf_matrix = tfidf.fit_transform(new_list)
tfidf_matrix = tfidf_matrix.toarray()
# Посмотрим на наши униграммы/биграммы/триграммы в виде слов:
tfidf.get_feature_names_out()
tfidf_matrix
for i in tfidf_matrix[99]:
print(i)
# Получили числовые тензоры методом TfidfVectorizer
# #### Сделаем при помощи HashingVectorizer
# #### В отличие от CountVectorizer и TfidfVectorizer HashingVectorizer использует хэш слов, что решает проблему огромного словарного запаса.
from sklearn.feature_extraction.text import HashingVectorizer
vectorizer = HashingVectorizer(n_features=20)
vector = vectorizer.transform(new_list)
vector = vector.toarray()
for i in vector:
print(i)
hashing_list = []
for i in vector:
hashing_list.append(i)
# #### Добавим наши униграммы/биграммы/триграммы в наш набор данных в векторизированном виде при помощи библиотеки HashingVectorizer
df["Vector"] = hashing_list
df.head()
# # 1.5 Разведочный анализ
# #### Дополним наш набор данных целевой переменной
nominations_list = []
with open("Target.json", "r", encoding="utf8") as json_data:
data = json.load(json_data)
g = 0
for i in df["name"]:
for j in data["name"]:
if i == j["Сompany"]:
nominations_list.append(j["Nominations"])
df["nominations"] = nominations_list
df.head(2000)
# #### преобразуем категориальный признак в числовой
df["nominations"] = pd.factorize(df["nominations"])[0]
df.head(1000)
# #### Проведём анализ нормальности распределения целевой переменной
import seaborn as sns
import numpy as np
import pylab
import scipy.stats as stats
from scipy.stats import shapiro
normal_df = df["nominations"]
stats.probplot(normal_df, dist="norm", plot=pylab)
pylab.show()
# Переменная не имеет нормальной плотности распределения, далеко не все точки расположены на прямой линии
graph2 = sns.boxplot(x=normal_df)
# Переменная не имеет нормальной плотности распределения, медиана находится не строго по центру
stat, p = shapiro(normal_df)
print("stat=%.3f, p=%.3f" % (stat, p))
if p > 0.05:
print("Гауссовское распределение")
else:
print("Не гауссовское распределение")
# В результате анализа распределения целевой переменной (графического и статистического) мы сделали вывод о не нормальной
# плотности распределения целевой переменной
# #### Посмотрим на зависимость целевой переменной от времени:
sns.countplot(x=df["nominations"], hue=df["date"])
# На графике видно количество публикаций в том или ином месяце в разных номинациях, где цвет это какой-либо месяц.
# #### Посмотрим на статистику публикаций:
sns.countplot(x=df["nominations"])
# На графике видно количество публикаций с той или иной номинацией
list_keyWords = tfidf.inverse_transform(vector)
df["keyWords"] = list_keyWords
# #### Посмотрим на зависимость целевой переменной от рейтинга компании:
sns.countplot(x=df["nominations"], hue=df["rating"])
# На графике видно количество статей по определённым номинациям с разбиением с учётом рейтинга компании, где рейтинг зависит от цвета.
# #### Посмотрим на зависимость целевой переменной от ключевых слов
df.plot(x="keyWords", y="nominations")
# #### Удалим уже не нужные нам столбцы text
df["date"] = pd.factorize(df["date"])[0]
df["name"] = pd.factorize(df["name"])[0]
df = df.drop(["text"], axis=1)
df = df.drop(["keyWords"], axis=1)
df.head()
df.to_csv(FILE_NAME)
df.to_pickle("habr.pickle")
| false | 0 | 6,627 | 0 | 6,627 | 6,627 |
||
129543562
|
<jupyter_start><jupyter_text>Pakistan Data Talent
This comprehensive dataset features a collection of LinkedIn profiles belonging to talented data scientists hailing from Pakistan. It presents a valuable resource for researchers, recruiters, and data enthusiasts seeking insights into the diverse and growing field of data science within the Pakistani professional landscape.
The dataset includes the following key information for each profile: URL, full name, headline, and location. The profile URLs provide direct access to each individual's LinkedIn page, allowing users to explore their professional background, experiences, and expertise in more detail.
Whether you are a recruiter looking to identify potential candidates, a researcher investigating trends and skills in the Pakistani data science community, or simply an enthusiast curious about the professionals driving data-driven innovation in Pakistan, this dataset will prove invaluable.
By making this dataset available on Kaggle, we aim to foster collaboration, knowledge sharing, and networking opportunities within the Pakistani data science community. We encourage users to leverage this dataset for various analytical and research purposes, such as demographic analysis, skillset mapping, or creating tailored outreach strategies.
Note: The dataset contains publicly available information from LinkedIn profiles. We kindly request that users respect privacy and professional boundaries when utilizing this dataset, refraining from any unauthorized use or misuse of the provided information.
Start exploring the wealth of talent within the Pakistani data science domain by downloading this dataset today!
Kaggle dataset identifier: pakistan-data-talent
<jupyter_script>import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/pakistan-data-talent/Pakistan Data Talent.csv")
df.info()
df.head()
df.isna().sum()
# Let's start with getting some idea about the dataset
df.columns = df.columns.str.lower()
df.isna().sum()
df["headline"] = df["headline"].fillna("Unknown")
df["location"] = df["location"].fillna("Unknown")
df.isna().sum()
# now we have no null values whatsoever, and we can choose to drop these later if we want
# let's find out how many fellow Data Scientists we got in this dataset
countDS = (df["headline"].str.contains("Data Scientist")).sum()
# print the count
print(countDS)
# Awesome! But where are they from?
# Let's try a crosstab first
import matplotlib.pyplot as plt
import seaborn as sns
# create a horizontal bar plot of 'Data Scientist' observations by location
plt.figure(figsize=(10, 4))
sns.barplot(
x=df["headline"].str.contains("Data Scientist"),
y=df["location"],
estimator=sum,
ci=None,
palette="magma",
)
plt.xlabel("Number of Data Scientist Observations", fontsize=12)
plt.show()
import matplotlib.pyplot as plt
import seaborn as sns
# create a count plot of 'Data Scientist' observations by location
plt.figure(figsize=(10, 4))
sns.countplot(
x=df["location"], hue=df["headline"].str.contains("Data Scientist"), palette="magma"
)
plt.xlabel("Location", fontsize=12)
plt.ylabel("Count", fontsize=12)
plt.legend(title="Data Scientist", loc="upper right")
plt.show()
# create a dictionary of cities in punjab
import csv
kp_cities = [
"Peshawar",
"Mardan",
"Abbottabad",
"Mingora",
"Kohat",
"Bannu",
"Swabi",
"Dera Ismail Khan",
"Nowshera",
"Charsadda",
"Mansehra",
"Chitral",
"Karak",
"Hangu",
"Batkhela",
"Kohistan",
"Shangla",
"Upper Dir",
"Lower Dir",
"Malakand",
"Lakki Marwat",
"Tank",
"Swat",
"Haripur",
"Buner",
"Torghar",
"Dera Ismail Khan",
]
sindh_cities = [
"Karachi",
"Hyderabad",
"Sukkur",
"Larkana",
"Mirpur Khas",
"Nawabshah",
"Jacobabad",
"Shikarpur",
"Dadu",
"Tando Adam",
"Khairpur",
"Tando Muhammad Khan",
"Badin",
"Thatta",
"Sanghar",
"Matiari",
"Umerkot",
"Ghotki",
"Jamshoro",
"Tharparkar",
"Shaheed Benazirabad",
"Kashmore",
"Naushahro Feroze",
"Sujawal",
"Qambar Shahdadkot",
"Mirpur Bathoro",
"Matli",
"Tando Allahyar",
"Mithi",
"Kunri",
"Kandhkot",
"Jati",
"Rohri",
"Diplo",
"Ghotki",
"Sita Road",
"Shikarpur",
]
balochistan_cities = [
"Quetta",
"Khuzdar",
"Chaman",
"Turbat",
"Gwadar",
"Sibi",
"Nushki",
"Kalat",
"Mastung",
"Panjgur",
"Khar",
]
Pakistan = ["Pakistan"]
other_country = ["United Arab Emirates"]
capital = {"Islamabad"}
# create a DataFrame with a 'region' column
df = pd.DataFrame(df)
df["region"] = None
# add 'region' for cities in each province
df.loc[df["location"].isin(punjab_cities), "region"] = "Punjab"
df.loc[df["location"].isin(sindh_cities), "region"] = "Sindh"
df.loc[df["location"].isin(kp_cities), "region"] = "KP"
df.loc[df["location"].isin(balochistan_cities), "region"] = "Balochistan"
df.loc[df["location"].isin(capital), "region"] = "ICT"
df.loc[df["location"].isin(Pakistan), "region"] = "Pakistan"
df.loc[df["location"].isin(other_country), "region"] = "Other Country"
df.head()
df["region"].value_counts()
df["region"].info()
# create a count plot of 'Data Scientist' observations by region
plt.figure(figsize=(10, 4))
sns.countplot(
x=df["region"], hue=df["headline"].str.contains("Data Scientist"), palette="magma"
)
plt.xlabel("Location", fontsize=12)
plt.ylabel("Count", fontsize=12)
plt.legend(title="Data Scientist", loc="upper right")
plt.show()
other_country_locations = df[df["region"] == "Other Country"]
# Create a scatter plot of the locations
plt.scatter(other_country_locations["location"], other_country_locations["region"])
# Set the title of the plot
plt.title("Locations with Region = 'Other Country'")
# Show the plot
plt.show()
# only other country is UAE where we have two observations
location_counts = punjab_locations["location"].value_counts()
sizes = [location_counts[loc] for loc in punjab_locations["location"]]
plt.xticks(rotation=90)
plt.scatter(
punjab_locations["location"],
np.ones(len(punjab_locations)),
s=sizes,
c=sizes,
cmap="magma",
)
# Set the style of the plot
sns.set_style("darkgrid")
# Create a count plot of the locations
sns.countplot(x=punjab_locations["location"], palette="summer")
# Set the title of the plot
plt.title("Locations with Region = 'Punjab'")
plt.ylabel("Number of People")
plt.xlabel("Locations within Punjab")
plt.xticks(rotation=90)
# Show the plot
plt.show()
# Lahore has the highest number of locations in the Punjab Region
# Set the style of the plot
sns.set_style("darkgrid")
# Create a count plot of the locations
# sns.countplot(x=df['location'], hue=df['headline'].str.contains('Data Scientist'), palette='magma')
sns.countplot(
x=punjab_locations["location"],
hue=df["headline"].str.contains("Data Scientist"),
palette="cool",
)
# Set the title of the plot
plt.title("Location of Data Scientists within Punjab")
plt.ylabel("Number of Data Scientists")
plt.xlabel("Locations within Punjab")
plt.xticks(rotation=90)
# Show the plot
plt.show()
# Lahore has the highest number of Data Scientists in the Punjab Region
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/543/129543562.ipynb
|
pakistan-data-talent
|
hskhawaja
|
[{"Id": 129543562, "ScriptId": 38443968, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 10602010, "CreationDate": "05/14/2023 17:21:23", "VersionNumber": 1.0, "Title": "Data Talent EDA", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 171.0, "LinesInsertedFromPrevious": 171.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 185712289, "KernelVersionId": 129543562, "SourceDatasetVersionId": 5670125}]
|
[{"Id": 5670125, "DatasetId": 3259472, "DatasourceVersionId": 5745628, "CreatorUserId": 938987, "LicenseName": "CC0: Public Domain", "CreationDate": "05/12/2023 12:59:27", "VersionNumber": 1.0, "Title": "Pakistan Data Talent", "Slug": "pakistan-data-talent", "Subtitle": "Tap into the Data Talent of Pakistan - Data Scientists, ML Engineers, BI Experts", "Description": "This comprehensive dataset features a collection of LinkedIn profiles belonging to talented data scientists hailing from Pakistan. It presents a valuable resource for researchers, recruiters, and data enthusiasts seeking insights into the diverse and growing field of data science within the Pakistani professional landscape.\n\nThe dataset includes the following key information for each profile: URL, full name, headline, and location. The profile URLs provide direct access to each individual's LinkedIn page, allowing users to explore their professional background, experiences, and expertise in more detail.\n\nWhether you are a recruiter looking to identify potential candidates, a researcher investigating trends and skills in the Pakistani data science community, or simply an enthusiast curious about the professionals driving data-driven innovation in Pakistan, this dataset will prove invaluable.\n\nBy making this dataset available on Kaggle, we aim to foster collaboration, knowledge sharing, and networking opportunities within the Pakistani data science community. We encourage users to leverage this dataset for various analytical and research purposes, such as demographic analysis, skillset mapping, or creating tailored outreach strategies.\n\nNote: The dataset contains publicly available information from LinkedIn profiles. We kindly request that users respect privacy and professional boundaries when utilizing this dataset, refraining from any unauthorized use or misuse of the provided information.\n\nStart exploring the wealth of talent within the Pakistani data science domain by downloading this dataset today!", "VersionNotes": "Initial release", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 3259472, "CreatorUserId": 938987, "OwnerUserId": 938987.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 5689263.0, "CurrentDatasourceVersionId": 5764863.0, "ForumId": 3325009, "Type": 2, "CreationDate": "05/12/2023 12:59:27", "LastActivityDate": "05/12/2023", "TotalViews": 2838, "TotalDownloads": 204, "TotalVotes": 25, "TotalKernels": 2}]
|
[{"Id": 938987, "UserName": "hskhawaja", "DisplayName": "Hussain Shahbaz Khawaja", "RegisterDate": "03/02/2017", "PerformanceTier": 2}]
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
df = pd.read_csv("/kaggle/input/pakistan-data-talent/Pakistan Data Talent.csv")
df.info()
df.head()
df.isna().sum()
# Let's start with getting some idea about the dataset
df.columns = df.columns.str.lower()
df.isna().sum()
df["headline"] = df["headline"].fillna("Unknown")
df["location"] = df["location"].fillna("Unknown")
df.isna().sum()
# now we have no null values whatsoever, and we can choose to drop these later if we want
# let's find out how many fellow Data Scientists we got in this dataset
countDS = (df["headline"].str.contains("Data Scientist")).sum()
# print the count
print(countDS)
# Awesome! But where are they from?
# Let's try a crosstab first
import matplotlib.pyplot as plt
import seaborn as sns
# create a horizontal bar plot of 'Data Scientist' observations by location
plt.figure(figsize=(10, 4))
sns.barplot(
x=df["headline"].str.contains("Data Scientist"),
y=df["location"],
estimator=sum,
ci=None,
palette="magma",
)
plt.xlabel("Number of Data Scientist Observations", fontsize=12)
plt.show()
import matplotlib.pyplot as plt
import seaborn as sns
# create a count plot of 'Data Scientist' observations by location
plt.figure(figsize=(10, 4))
sns.countplot(
x=df["location"], hue=df["headline"].str.contains("Data Scientist"), palette="magma"
)
plt.xlabel("Location", fontsize=12)
plt.ylabel("Count", fontsize=12)
plt.legend(title="Data Scientist", loc="upper right")
plt.show()
# create a dictionary of cities in punjab
import csv
kp_cities = [
"Peshawar",
"Mardan",
"Abbottabad",
"Mingora",
"Kohat",
"Bannu",
"Swabi",
"Dera Ismail Khan",
"Nowshera",
"Charsadda",
"Mansehra",
"Chitral",
"Karak",
"Hangu",
"Batkhela",
"Kohistan",
"Shangla",
"Upper Dir",
"Lower Dir",
"Malakand",
"Lakki Marwat",
"Tank",
"Swat",
"Haripur",
"Buner",
"Torghar",
"Dera Ismail Khan",
]
sindh_cities = [
"Karachi",
"Hyderabad",
"Sukkur",
"Larkana",
"Mirpur Khas",
"Nawabshah",
"Jacobabad",
"Shikarpur",
"Dadu",
"Tando Adam",
"Khairpur",
"Tando Muhammad Khan",
"Badin",
"Thatta",
"Sanghar",
"Matiari",
"Umerkot",
"Ghotki",
"Jamshoro",
"Tharparkar",
"Shaheed Benazirabad",
"Kashmore",
"Naushahro Feroze",
"Sujawal",
"Qambar Shahdadkot",
"Mirpur Bathoro",
"Matli",
"Tando Allahyar",
"Mithi",
"Kunri",
"Kandhkot",
"Jati",
"Rohri",
"Diplo",
"Ghotki",
"Sita Road",
"Shikarpur",
]
balochistan_cities = [
"Quetta",
"Khuzdar",
"Chaman",
"Turbat",
"Gwadar",
"Sibi",
"Nushki",
"Kalat",
"Mastung",
"Panjgur",
"Khar",
]
Pakistan = ["Pakistan"]
other_country = ["United Arab Emirates"]
capital = {"Islamabad"}
# create a DataFrame with a 'region' column
df = pd.DataFrame(df)
df["region"] = None
# add 'region' for cities in each province
df.loc[df["location"].isin(punjab_cities), "region"] = "Punjab"
df.loc[df["location"].isin(sindh_cities), "region"] = "Sindh"
df.loc[df["location"].isin(kp_cities), "region"] = "KP"
df.loc[df["location"].isin(balochistan_cities), "region"] = "Balochistan"
df.loc[df["location"].isin(capital), "region"] = "ICT"
df.loc[df["location"].isin(Pakistan), "region"] = "Pakistan"
df.loc[df["location"].isin(other_country), "region"] = "Other Country"
df.head()
df["region"].value_counts()
df["region"].info()
# create a count plot of 'Data Scientist' observations by region
plt.figure(figsize=(10, 4))
sns.countplot(
x=df["region"], hue=df["headline"].str.contains("Data Scientist"), palette="magma"
)
plt.xlabel("Location", fontsize=12)
plt.ylabel("Count", fontsize=12)
plt.legend(title="Data Scientist", loc="upper right")
plt.show()
other_country_locations = df[df["region"] == "Other Country"]
# Create a scatter plot of the locations
plt.scatter(other_country_locations["location"], other_country_locations["region"])
# Set the title of the plot
plt.title("Locations with Region = 'Other Country'")
# Show the plot
plt.show()
# only other country is UAE where we have two observations
location_counts = punjab_locations["location"].value_counts()
sizes = [location_counts[loc] for loc in punjab_locations["location"]]
plt.xticks(rotation=90)
plt.scatter(
punjab_locations["location"],
np.ones(len(punjab_locations)),
s=sizes,
c=sizes,
cmap="magma",
)
# Set the style of the plot
sns.set_style("darkgrid")
# Create a count plot of the locations
sns.countplot(x=punjab_locations["location"], palette="summer")
# Set the title of the plot
plt.title("Locations with Region = 'Punjab'")
plt.ylabel("Number of People")
plt.xlabel("Locations within Punjab")
plt.xticks(rotation=90)
# Show the plot
plt.show()
# Lahore has the highest number of locations in the Punjab Region
# Set the style of the plot
sns.set_style("darkgrid")
# Create a count plot of the locations
# sns.countplot(x=df['location'], hue=df['headline'].str.contains('Data Scientist'), palette='magma')
sns.countplot(
x=punjab_locations["location"],
hue=df["headline"].str.contains("Data Scientist"),
palette="cool",
)
# Set the title of the plot
plt.title("Location of Data Scientists within Punjab")
plt.ylabel("Number of Data Scientists")
plt.xlabel("Locations within Punjab")
plt.xticks(rotation=90)
# Show the plot
plt.show()
# Lahore has the highest number of Data Scientists in the Punjab Region
| false | 1 | 2,050 | 0 | 2,413 | 2,050 |
||
129919227
|
<jupyter_start><jupyter_text>Tutorial2_data
Kaggle dataset identifier: tutorial2-data
<jupyter_script>import geopandas as gpd
import matplotlib.pyplot as plt
# Importing and plotting the cities shapefile
cities = gpd.read_file("../input/tutorial2-data/belgian_cities.shp")
cities.plot()
cities.plot(cmap="jet")
# Importing and plotting AOI shapefile
AOI = gpd.read_file("../input/tutorial2-data/area_of_interest_.shp")
AOI.plot()
# Display both shapefiles together
fig, ax = plt.subplots(1)
# choose your color map on https://matplotlib.org/stable/tutorials/colors/colormaps.html
cities.plot(ax=ax, cmap="rainbow", column="NAME_4")
AOI.plot(ax=ax)
# you may want other color
# AOI.plot(ax=ax, facecolor = 'yellow')
# Intersecting
cities_out_AOI = gpd.overlay(cities, AOI, how="difference")
cities_out_AOI.plot(figsize=(10, 10), cmap="winter", column="NAME_4")
# Assigning a new column - Area
cities_out_AOI["Area(km2)"] = cities_in_AOI.area / 1000000
cities_out_AOI.head(5)
cities_out_AOI.describe()
cities_centroid = cities.centroid
cities_centroid.plot()
cities_centroid_buffer = cities_centroid.buffer(distance=3000)
cities_centroid_buffer.plot()
cities_centroid_buffer.to_file("./centroid_buffer.shp")
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/919/129919227.ipynb
|
tutorial2-data
|
kyrenchen
|
[{"Id": 129919227, "ScriptId": 38642774, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 15147234, "CreationDate": "05/17/2023 12:33:51", "VersionNumber": 1.0, "Title": "test2", "EvaluationDate": "05/17/2023", "IsChange": true, "TotalLines": 39.0, "LinesInsertedFromPrevious": 39.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 0.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
|
[{"Id": 186338880, "KernelVersionId": 129919227, "SourceDatasetVersionId": 3529467}]
|
[{"Id": 3529467, "DatasetId": 2123013, "DatasourceVersionId": 3582279, "CreatorUserId": 3948686, "LicenseName": "Unknown", "CreationDate": "04/26/2022 04:38:00", "VersionNumber": 2.0, "Title": "Tutorial2_data", "Slug": "tutorial2-data", "Subtitle": NaN, "Description": NaN, "VersionNotes": "Data Update 2022/04/26", "TotalCompressedBytes": 0.0, "TotalUncompressedBytes": 0.0}]
|
[{"Id": 2123013, "CreatorUserId": 3948686, "OwnerUserId": 3948686.0, "OwnerOrganizationId": NaN, "CurrentDatasetVersionId": 3529467.0, "CurrentDatasourceVersionId": 3582279.0, "ForumId": 2148571, "Type": 2, "CreationDate": "04/26/2022 03:37:14", "LastActivityDate": "04/26/2022", "TotalViews": 184, "TotalDownloads": 35, "TotalVotes": 0, "TotalKernels": 19}]
|
[{"Id": 3948686, "UserName": "kyrenchen", "DisplayName": "Kyren Chen", "RegisterDate": "10/30/2019", "PerformanceTier": 0}]
|
import geopandas as gpd
import matplotlib.pyplot as plt
# Importing and plotting the cities shapefile
cities = gpd.read_file("../input/tutorial2-data/belgian_cities.shp")
cities.plot()
cities.plot(cmap="jet")
# Importing and plotting AOI shapefile
AOI = gpd.read_file("../input/tutorial2-data/area_of_interest_.shp")
AOI.plot()
# Display both shapefiles together
fig, ax = plt.subplots(1)
# choose your color map on https://matplotlib.org/stable/tutorials/colors/colormaps.html
cities.plot(ax=ax, cmap="rainbow", column="NAME_4")
AOI.plot(ax=ax)
# you may want other color
# AOI.plot(ax=ax, facecolor = 'yellow')
# Intersecting
cities_out_AOI = gpd.overlay(cities, AOI, how="difference")
cities_out_AOI.plot(figsize=(10, 10), cmap="winter", column="NAME_4")
# Assigning a new column - Area
cities_out_AOI["Area(km2)"] = cities_in_AOI.area / 1000000
cities_out_AOI.head(5)
cities_out_AOI.describe()
cities_centroid = cities.centroid
cities_centroid.plot()
cities_centroid_buffer = cities_centroid.buffer(distance=3000)
cities_centroid_buffer.plot()
cities_centroid_buffer.to_file("./centroid_buffer.shp")
| false | 0 | 384 | 0 | 406 | 384 |
||
129534879
|
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
df.head()
df.info()
df.describe()
sns.countplot(data=df, x="Transported")
plt.figure(figsize=(20, 7))
sns.countplot(data=df, x="Age", hue="Transported")
sns.countplot(data=df, x="HomePlanet", hue="Transported")
sns.heatmap(df.corr(), annot=True)
sns.countplot(data=df, x="VIP", hue="Transported")
df["Age"].mean()
df.select_dtypes(exclude="number").columns
# we will drop passengerid and name, it's not important for the case. VIP will be transformed into dummies. Then, we need to analyze CryoSleep, Cabin, Destination to see if it worth it to transform this also into dummies.
df.isnull().sum()
df = df.drop(["PassengerId", "Name"], axis=1)
df.isnull().sum()
df["CryoSleep"].unique()
# Easy to turn into dummies.
df["Cabin"].unique()
df["Cabin"].nunique()
# Since it is too much to transform int dummies, we need to drop this column. But We should do some more analysis into with.
df["Destination"].unique()
|
/fsx/loubna/kaggle_data/kaggle-code-data/data/0129/534/129534879.ipynb
| null | null |
[{"Id": 129534879, "ScriptId": 38487462, "ParentScriptVersionId": NaN, "ScriptLanguageId": 9, "AuthorUserId": 14324296, "CreationDate": "05/14/2023 15:55:11", "VersionNumber": 2.0, "Title": "Spaceship Titanic Competition", "EvaluationDate": "05/14/2023", "IsChange": true, "TotalLines": 77.0, "LinesInsertedFromPrevious": 60.0, "LinesChangedFromPrevious": 0.0, "LinesUnchangedFromPrevious": 17.0, "LinesInsertedFromFork": NaN, "LinesDeletedFromFork": NaN, "LinesChangedFromFork": NaN, "LinesUnchangedFromFork": NaN, "TotalVotes": 0}]
| null | null | null | null |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
# Input data files are available in the read-only "../input/" directory
# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory
import os
for dirname, _, filenames in os.walk("/kaggle/input"):
for filename in filenames:
print(os.path.join(dirname, filename))
# You can write up to 20GB to the current directory (/kaggle/working/) that gets preserved as output when you create a version using "Save & Run All"
# You can also write temporary files to /kaggle/temp/, but they won't be saved outside of the current session
import seaborn as sns
import matplotlib.pyplot as plt
df = pd.read_csv("/kaggle/input/spaceship-titanic/train.csv")
df.head()
df.info()
df.describe()
sns.countplot(data=df, x="Transported")
plt.figure(figsize=(20, 7))
sns.countplot(data=df, x="Age", hue="Transported")
sns.countplot(data=df, x="HomePlanet", hue="Transported")
sns.heatmap(df.corr(), annot=True)
sns.countplot(data=df, x="VIP", hue="Transported")
df["Age"].mean()
df.select_dtypes(exclude="number").columns
# we will drop passengerid and name, it's not important for the case. VIP will be transformed into dummies. Then, we need to analyze CryoSleep, Cabin, Destination to see if it worth it to transform this also into dummies.
df.isnull().sum()
df = df.drop(["PassengerId", "Name"], axis=1)
df.isnull().sum()
df["CryoSleep"].unique()
# Easy to turn into dummies.
df["Cabin"].unique()
df["Cabin"].nunique()
# Since it is too much to transform int dummies, we need to drop this column. But We should do some more analysis into with.
df["Destination"].unique()
| false | 0 | 532 | 0 | 532 | 532 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.